aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--application/src/main/java/com/yahoo/application/Application.java14
-rw-r--r--application/src/test/app-packages/model-evaluation/models/onnx/mnist_softmax.onnxbin0 -> 31758 bytes
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/saved_model.pbtxt8830
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.data-00000-of-00001bin0 -> 1066440 bytes
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.indexbin0 -> 308 bytes
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/simple_mnist.py100
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/mnist_sftmax_with_saving.py93
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/saved_model.pbtxt5039
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.data-00000-of-00001bin0 -> 31400 bytes
-rw-r--r--application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.indexbin0 -> 165 bytes
-rw-r--r--application/src/test/app-packages/model-evaluation/models/vespa/constant1asLarge.json7
-rw-r--r--application/src/test/app-packages/model-evaluation/models/vespa/example.model25
-rw-r--r--application/src/test/app-packages/model-evaluation/models/xgboost/xgboost.2.2.json19
-rw-r--r--application/src/test/app-packages/model-evaluation/services.xml3
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerDocprocTest.java (renamed from application/src/test/java/com/yahoo/application/container/JDiscContainerDocprocTest.java)2
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerModelEvaluationTest.java82
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerProcessingTest.java (renamed from application/src/test/java/com/yahoo/application/container/JDiscContainerProcessingTest.java)8
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerRequestTest.java (renamed from application/src/test/java/com/yahoo/application/container/JDiscContainerRequestTest.java)12
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerSearchTest.java (renamed from application/src/test/java/com/yahoo/application/container/JDiscContainerSearchTest.java)8
-rw-r--r--application/src/test/java/com/yahoo/application/container/ContainerTest.java (renamed from application/src/test/java/com/yahoo/application/container/JDiscTest.java)14
-rw-r--r--application/src/test/java/com/yahoo/application/container/jersey/JerseyTest.java5
-rw-r--r--build_settings.cmake2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java28
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/Endpoint.java3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java (renamed from configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java)10
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/TlsSecrets.java30
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java6
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java19
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java13
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Index.java12
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexSchema.java10
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/IndexOperation.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java24
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicConsumer.java32
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicMetrics.java120
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/SystemMetrics.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java14
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidator.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java24
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredDirectSslProvider.java66
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredFilebasedSslProvider.java (renamed from config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredSslProvider.java)6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java35
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerServiceBuilder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Content.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java15
-rw-r--r--config-model/src/main/javacc/SDParser.jj4
-rw-r--r--config-model/src/main/resources/schema/content.rnc5
-rw-r--r--config-model/src/test/derived/indexschema/index-info.cfg10
-rw-r--r--config-model/src/test/derived/indexschema/indexschema.cfg50
-rw-r--r--config-model/src/test/derived/indexschema/indexschema.sd4
-rw-r--r--config-model/src/test/derived/indexschema/vsmfields.cfg6
-rw-r--r--config-model/src/test/derived/uri_array/indexschema.cfg16
-rw-r--r--config-model/src/test/derived/uri_wset/indexschema.cfg16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/HostResourceTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java81
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java33
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidatorTest.java88
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java135
-rwxr-xr-xconfig-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java42
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java47
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java41
-rw-r--r--config-provisioning/abi-spec.json28
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/CertificateNotReadyException.java17
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java11
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ParentHostUnavailableException.java2
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/TransientException.java20
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneList.java9
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java32
-rw-r--r--configdefinitions/src/vespa/indexschema.def4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/GlobalComponentRegistry.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistry.java11
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java10
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java22
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java39
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TlsSecretsKeys.java86
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistryTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/MockSecretStore.java35
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java11
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java19
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java98
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java7
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java51
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java6
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java4
-rw-r--r--container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java77
-rw-r--r--container-accesslogging/src/test/java/com/yahoo/container/logging/CompressWhileDrop.java10
-rw-r--r--container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java5
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/DeprecatedSecretStoreProvider.java34
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/SecretStoreProvider.java23
-rwxr-xr-xcontainer-disc/src/main/sh/vespa-start-container-daemon.sh5
-rw-r--r--container-search/abi-spec.json46
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/hitfield/AnnotateStringFieldPart.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/hitfield/HitField.java62
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java10
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/NonReducibleCompositeItem.java6
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java8
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/PhraseSegmentItem.java11
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java13
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java11
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java6
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java20
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AnyParser.java10
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/CustomParser.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/textualrepresentation/TextualQueryRepresentation.java11
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/ranking/SoftTimeout.java6
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java44
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/Renderer.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/SyncDefaultRenderer.java9
-rw-r--r--container-search/src/main/java/com/yahoo/search/result/FeatureData.java31
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/fastsearch/SlimeSummaryTestCase.java57
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/fastsearch/summary.cfg4
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/SoftTimeoutTestCase.java4
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java34
-rw-r--r--container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificate.java31
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificateProvider.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/CertificateProvider.java14
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyId.java18
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyPairProvider.java14
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/VersionedKeyPair.java28
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/package-info.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/ArtifactId.java26
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MavenRepository.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/Metadata.java50
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/package-info.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMavenRepository.java31
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MetadataTest.java56
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java34
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java55
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java25
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java61
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java61
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointId.java53
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java66
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/proxy/ConfigServerRestExecutorImpl.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/cost/CostCalculator.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v1/ZoneApiHandler.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v2/ZoneApiHandler.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClient.java64
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/package-info.java5
-rw-r--r--controller-server/src/main/resources/configdefinitions/maven-repository.def15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationCertificateMock.java14
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java34
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializerTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClientTest.java22
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java24
-rw-r--r--dist/vespa.spec1
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java7
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java21
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java9
-rw-r--r--docproc/abi-spec.json3
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/AbstractConcreteDocumentFactory.java50
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/Accesses.java9
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/Call.java4
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/CallStack.java107
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/SimpleDocumentProcessor.java3
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/TransientFailureException.java4
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/jdisc/RequestContext.java35
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/MessageFactory.java44
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ProcessingFactory.java5
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ResponseMerger.java3
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/jdisc/metric/NullMetric.java4
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/proxy/ProxyDocument.java13
-rw-r--r--docproc/src/main/java/com/yahoo/docproc/util/JoinerDocumentProcessor.java1
-rw-r--r--docprocs/src/main/java/com/yahoo/docprocs/indexing/DocumentScript.java1
-rw-r--r--docprocs/src/main/java/com/yahoo/docprocs/indexing/FastLogger.java1
-rw-r--r--docprocs/src/main/java/com/yahoo/docprocs/indexing/IndexingProcessor.java9
-rw-r--r--document/src/main/java/com/yahoo/document/datatypes/Struct.java14
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/PutDocumentMessage.java35
-rw-r--r--documentgen-test/src/main/java/com/yahoo/vespa/document/NodeImpl.java1
-rw-r--r--documentgen-test/src/main/java/com/yahoo/vespa/document/dom/DocumentImpl.java1
-rw-r--r--documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java55
-rw-r--r--eval/CMakeLists.txt2
-rw-r--r--eval/src/tests/tensor/dense_dimension_combiner/CMakeLists.txt9
-rw-r--r--eval/src/tests/tensor/dense_dimension_combiner/dense_dimension_combiner_test.cpp185
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt8
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp32
-rw-r--r--eval/src/vespa/eval/tensor/dense/CMakeLists.txt4
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.cpp91
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.h114
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp63
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.h123
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp69
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java5
-rw-r--r--hosted-api/pom.xml10
-rw-r--r--hosted-api/src/test/java/ai/vespa/hosted/api/MultiPartStreamerTest.java16
-rw-r--r--hosted-api/src/test/java/ai/vespa/hosted/api/SignaturesTest.java14
-rw-r--r--jdisc_http_service/abi-spec.json4
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/impl/ConfiguredSslContextFactoryProvider.java34
-rw-r--r--jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def10
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/IntermediateSession.java17
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java3
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/GenericMetricsHandler.java35
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/ExternalMetrics.java2
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java1
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/GenericMetricsHandlerTest.java133
-rw-r--r--metrics/CMakeLists.txt3
-rw-r--r--metrics/src/tests/metricmanagertest.cpp2
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java40
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java2
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java2
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java2
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java2
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java29
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java10
-rw-r--r--model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt1999
-rw-r--r--model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001bin0 -> 72 bytes
-rw-r--r--model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.indexbin0 -> 155 bytes
-rw-r--r--model-integration/src/test/models/tensorflow/softmax/softmax.py29
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java10
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConnectionException.java43
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java20
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java246
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java10
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java3
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java17
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java42
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java144
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/network/IPVersion.java51
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java22
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java5
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java8
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java16
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java36
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java72
-rw-r--r--node-admin/src/test/resources/docker.stats.json12
-rw-r--r--node-admin/src/test/resources/expected.container.system.metrics.0.txt (renamed from node-admin/src/test/resources/expected.container.system.metrics.txt)2
-rw-r--r--node-admin/src/test/resources/expected.container.system.metrics.1.txt82
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java58
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java22
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceException.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java40
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirer.java231
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java38
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java61
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicy.java22
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyCache.java28
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyList.java39
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java47
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java88
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareChecker.java117
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCount.java103
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java124
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java19
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java19
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerServiceTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java9
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java69
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTest.java176
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTester.java214
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java63
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java60
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodesTest.java87
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java41
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java16
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCheckerTest.java229
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCountTest.java121
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java161
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java16
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json34
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/maintenance.json3
-rw-r--r--parent/pom.xml13
-rw-r--r--searchcommon/src/tests/schema/load-save-cfg/indexschema.cfg2
-rw-r--r--searchcommon/src/tests/schema/schema_test.cpp6
-rw-r--r--searchcommon/src/vespa/searchcommon/common/schema.cpp12
-rw-r--r--searchcommon/src/vespa/searchcommon/common/schema.h10
-rw-r--r--searchcommon/src/vespa/searchcommon/common/schemaconfigurer.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/matcher.cpp9
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp31
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h1
-rw-r--r--searchlib/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/diskindex/field_length_scanner/CMakeLists.txt11
-rw-r--r--searchlib/src/tests/diskindex/field_length_scanner/field_length_scanner_test.cpp73
-rw-r--r--searchlib/src/tests/diskindex/fusion/fusion_test.cpp137
-rw-r--r--searchlib/src/tests/memoryindex/compact_words_store/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp90
-rw-r--r--searchlib/src/tests/memoryindex/datastore/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp154
-rw-r--r--searchlib/src/tests/memoryindex/datastore/word_store_test.cpp60
-rw-r--r--searchlib/src/tests/memoryindex/document_inverter/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp273
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp17
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp578
-rw-r--r--searchlib/src/tests/memoryindex/field_index_remover/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/memoryindex/field_index_remover/field_index_remover_test.cpp62
-rw-r--r--searchlib/src/tests/memoryindex/field_inverter/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp362
-rw-r--r--searchlib/src/tests/memoryindex/url_field_inverter/CMakeLists.txt1
-rw-r--r--searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp588
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/field_length_scanner.cpp51
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h63
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp79
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.h12
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fusion.cpp30
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fusion.h2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposting.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/index/schemautil.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/index/schemautil.h6
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.cpp51
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index.h10
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_base.h1
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp201
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h42
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h51
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.cpp28
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/iterators.h3
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp10
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java2
-rw-r--r--standalone-container/src/main/java/com/yahoo/container/standalone/StandaloneContainerApplication.java2
-rw-r--r--storage/CMakeLists.txt3
-rw-r--r--storage/src/tests/CMakeLists.txt16
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp2
-rw-r--r--storage/src/tests/bucketdb/initializertest.cpp4
-rw-r--r--storage/src/tests/common/CMakeLists.txt1
-rw-r--r--storage/src/tests/common/hostreporter/CMakeLists.txt5
-rw-r--r--storage/src/tests/common/hostreporter/hostinfotest.cpp29
-rw-r--r--storage/src/tests/common/hostreporter/util.cpp9
-rw-r--r--storage/src/tests/common/hostreporter/versionreportertest.cpp31
-rw-r--r--storage/src/tests/common/message_sender_stub.cpp (renamed from storage/src/tests/distributor/messagesenderstub.cpp)21
-rw-r--r--storage/src/tests/common/message_sender_stub.h47
-rw-r--r--storage/src/tests/common/metricstest.cpp15
-rw-r--r--storage/src/tests/common/testhelper.h19
-rw-r--r--storage/src/tests/common/teststorageapp.cpp12
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt31
-rw-r--r--storage/src/tests/distributor/blockingoperationstartertest.cpp58
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.cpp16
-rw-r--r--storage/src/tests/distributor/bucketdbmetricupdatertest.cpp153
-rw-r--r--storage/src/tests/distributor/bucketdbupdatertest.cpp336
-rw-r--r--storage/src/tests/distributor/bucketgctimecalculatortest.cpp81
-rw-r--r--storage/src/tests/distributor/bucketstateoperationtest.cpp223
-rw-r--r--storage/src/tests/distributor/distributor_host_info_reporter_test.cpp64
-rw-r--r--storage/src/tests/distributor/distributor_message_sender_stub.cpp20
-rw-r--r--storage/src/tests/distributor/distributor_message_sender_stub.h98
-rw-r--r--storage/src/tests/distributor/distributortest.cpp680
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp6
-rw-r--r--storage/src/tests/distributor/distributortestutil.h12
-rw-r--r--storage/src/tests/distributor/externaloperationhandlertest.cpp344
-rw-r--r--storage/src/tests/distributor/garbagecollectiontest.cpp59
-rw-r--r--storage/src/tests/distributor/getoperationtest.cpp444
-rw-r--r--storage/src/tests/distributor/idealstatemanagertest.cpp174
-rw-r--r--storage/src/tests/distributor/joinbuckettest.cpp65
-rw-r--r--storage/src/tests/distributor/maintenancemocks.h5
-rw-r--r--storage/src/tests/distributor/maintenanceschedulertest.cpp102
-rw-r--r--storage/src/tests/distributor/mergelimitertest.cpp159
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp378
-rw-r--r--storage/src/tests/distributor/messagesenderstub.h68
-rw-r--r--storage/src/tests/distributor/nodeinfotest.cpp67
-rw-r--r--storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp77
-rw-r--r--storage/src/tests/distributor/operation_sequencer_test.cpp40
-rw-r--r--storage/src/tests/distributor/operationtargetresolvertest.cpp281
-rw-r--r--storage/src/tests/distributor/ownership_transfer_safe_time_point_calculator_test.cpp41
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp239
-rw-r--r--storage/src/tests/distributor/persistence_metrics_set_test.cpp56
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp43
-rw-r--r--storage/src/tests/distributor/removebucketoperationtest.cpp95
-rw-r--r--storage/src/tests/distributor/removelocationtest.cpp71
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp162
-rw-r--r--storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp103
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp144
-rw-r--r--storage/src/tests/distributor/splitbuckettest.cpp217
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp1153
-rw-r--r--storage/src/tests/distributor/statoperationtest.cpp97
-rw-r--r--storage/src/tests/distributor/statusreporterdelegatetest.cpp44
-rw-r--r--storage/src/tests/distributor/throttlingoperationstartertest.cpp145
-rw-r--r--storage/src/tests/distributor/twophaseupdateoperationtest.cpp834
-rw-r--r--storage/src/tests/distributor/updateoperationtest.cpp140
-rw-r--r--storage/src/tests/distributor/visitoroperationtest.cpp1080
-rw-r--r--storage/src/tests/gtest_runner.cpp8
-rw-r--r--storage/src/tests/persistence/CMakeLists.txt1
-rw-r--r--storage/src/tests/persistence/bucketownershipnotifiertest.cpp2
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.h1
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp2
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp2
-rw-r--r--storage/src/tests/storageserver/fnet_listener_test.cpp2
-rw-r--r--storage/src/tests/storageserver/service_layer_error_listener_test.cpp2
-rw-r--r--storage/src/tests/testrunner.cpp13
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h8
-rw-r--r--storage/src/vespa/storage/distributor/bucketdbupdater.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h16
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp28
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h14
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h3
-rw-r--r--storageapi/src/tests/CMakeLists.txt17
-rw-r--r--storageapi/src/tests/testrunner.cpp13
-rw-r--r--storageframework/CMakeLists.txt1
-rw-r--r--storageserver/src/tests/testhelper.h2
-rw-r--r--streamingvisitors/CMakeLists.txt3
-rw-r--r--tenant-base/pom.xml15
-rw-r--r--tenant-cd/pom.xml6
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java31
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java15
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java17
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java19
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java23
-rw-r--r--valgrind-suppressions.txt51
-rw-r--r--vdslib/CMakeLists.txt1
-rw-r--r--vdstestlib/CMakeLists.txt3
-rw-r--r--vdstestlib/src/tests/cppunit/.gitignore4
-rw-r--r--vdstestlib/src/tests/cppunit/CMakeLists.txt14
-rw-r--r--vdstestlib/src/tests/cppunit/cppunittest.cpp24
-rw-r--r--vdstestlib/src/tests/cppunit/testrunner.cpp10
-rw-r--r--vdstestlib/src/tests/dirconfig/dirconfigtest.cpp2
-rw-r--r--vdstestlib/src/vespa/vdstestlib/CMakeLists.txt4
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/.gitignore (renamed from vdstestlib/src/vespa/vdstestlib/cppunit/.gitignore)0
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/CMakeLists.txt (renamed from vdstestlib/src/vespa/vdstestlib/cppunit/CMakeLists.txt)3
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp (renamed from vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.cpp)0
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/dirconfig.h (renamed from vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.h)2
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/dirconfig.hpp (renamed from vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.hpp)2
-rw-r--r--vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.cpp157
-rw-r--r--vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.h58
-rw-r--r--vdstestlib/src/vespa/vdstestlib/cppunit/macros.h164
-rw-r--r--vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java16
-rw-r--r--vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java14
-rw-r--r--vespa-testrunner-components/src/test/resources/pom.xml_system_tests14
-rw-r--r--vespabase/desc.vespa_base_dev2
-rw-r--r--vespajlib/src/main/java/com/yahoo/io/NativeIO.java23
-rw-r--r--vespalib/src/tests/stllike/asciistream_test.cpp35
-rw-r--r--vespalib/src/tests/testkit-testhook/CMakeLists.txt1
-rw-r--r--vespalib/src/tests/testkit-testhook/testkit-testhook_test.cpp11
-rw-r--r--vespalib/src/vespa/vespalib/stllike/asciistream.cpp106
515 files changed, 26602 insertions, 11148 deletions
diff --git a/application/src/main/java/com/yahoo/application/Application.java b/application/src/main/java/com/yahoo/application/Application.java
index fb812ba6107..dffe458c798 100644
--- a/application/src/main/java/com/yahoo/application/Application.java
+++ b/application/src/main/java/com/yahoo/application/Application.java
@@ -1,6 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.application;
+import ai.vespa.rankingexpression.importer.configmodelview.MlModelImporter;
+import ai.vespa.rankingexpression.importer.onnx.OnnxImporter;
+import ai.vespa.rankingexpression.importer.tensorflow.TensorFlowImporter;
+import ai.vespa.rankingexpression.importer.vespa.VespaImporter;
+import ai.vespa.rankingexpression.importer.xgboost.XGBoostImporter;
import com.google.common.annotations.Beta;
import com.yahoo.application.container.JDisc;
import com.yahoo.application.container.impl.StandaloneContainerRunner;
@@ -109,9 +114,13 @@ public final class Application implements AutoCloseable {
private VespaModel createVespaModel() {
try {
+ List<MlModelImporter> modelImporters = List.of(new VespaImporter(),
+ new TensorFlowImporter(),
+ new OnnxImporter(),
+ new XGBoostImporter());
DeployState deployState = new DeployState.Builder()
- .applicationPackage(FilesApplicationPackage.fromFile(path.toFile(),
- /* Include source files */ true))
+ .applicationPackage(FilesApplicationPackage.fromFile(path.toFile(), true))
+ .modelImporters(modelImporters)
.deployLogger((level, s) -> { })
.build();
return new VespaModel(new NullConfigModelRegistry(), deployState);
@@ -133,6 +142,7 @@ public final class Application implements AutoCloseable {
@Override
public void close() {
container.close();
+ IOUtils.recursiveDeleteDir(new File(path.toFile(), "models.generated"));
if (deletePathWhenClosing)
IOUtils.recursiveDeleteDir(path.toFile());
}
diff --git a/application/src/test/app-packages/model-evaluation/models/onnx/mnist_softmax.onnx b/application/src/test/app-packages/model-evaluation/models/onnx/mnist_softmax.onnx
new file mode 100644
index 00000000000..a86019bf53a
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/onnx/mnist_softmax.onnx
Binary files differ
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/saved_model.pbtxt b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/saved_model.pbtxt
new file mode 100644
index 00000000000..5528aa99401
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/saved_model.pbtxt
@@ -0,0 +1,8830 @@
+saved_model_schema_version: 1
+meta_graphs {
+ meta_info_def {
+ stripped_op_list {
+ op {
+ name: "Add"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_STRING
+ }
+ }
+ }
+ }
+ op {
+ name: "AddN"
+ input_arg {
+ name: "inputs"
+ type_attr: "T"
+ number_attr: "N"
+ }
+ output_arg {
+ name: "sum"
+ type_attr: "T"
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ type: DT_VARIANT
+ }
+ }
+ }
+ is_aggregate: true
+ is_commutative: true
+ }
+ op {
+ name: "ApplyGradientDescent"
+ input_arg {
+ name: "var"
+ type_attr: "T"
+ is_ref: true
+ }
+ input_arg {
+ name: "alpha"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "delta"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "out"
+ type_attr: "T"
+ is_ref: true
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "use_locking"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ }
+ op {
+ name: "Assign"
+ input_arg {
+ name: "ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ input_arg {
+ name: "value"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output_ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "validate_shape"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ attr {
+ name: "use_locking"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ allows_uninitialized_input: true
+ }
+ op {
+ name: "BroadcastGradientArgs"
+ input_arg {
+ name: "s0"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "s1"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "r0"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "r1"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Cast"
+ input_arg {
+ name: "x"
+ type_attr: "SrcT"
+ }
+ output_arg {
+ name: "y"
+ type_attr: "DstT"
+ }
+ attr {
+ name: "SrcT"
+ type: "type"
+ }
+ attr {
+ name: "DstT"
+ type: "type"
+ }
+ }
+ op {
+ name: "Const"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "value"
+ type: "tensor"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ }
+ op {
+ name: "ExpandDims"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "dim"
+ type_attr: "Tdim"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tdim"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Fill"
+ input_arg {
+ name: "dims"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "value"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "FloorDiv"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "GreaterEqual"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type: DT_BOOL
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_HALF
+ }
+ }
+ }
+ }
+ op {
+ name: "Identity"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "InTopKV2"
+ input_arg {
+ name: "predictions"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "targets"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "k"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "precision"
+ type: DT_BOOL
+ }
+ attr {
+ name: "T"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "MatMul"
+ input_arg {
+ name: "a"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "b"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "product"
+ type_attr: "T"
+ }
+ attr {
+ name: "transpose_a"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "transpose_b"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Maximum"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "Mean"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "MergeV2Checkpoints"
+ input_arg {
+ name: "checkpoint_prefixes"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "destination_prefix"
+ type: DT_STRING
+ }
+ attr {
+ name: "delete_old_dirs"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "Mul"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "NoOp"
+ }
+ op {
+ name: "Pack"
+ input_arg {
+ name: "values"
+ type_attr: "T"
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "axis"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ }
+ op {
+ name: "Placeholder"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ default_value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ op {
+ name: "PreventGradient"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "message"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ }
+ op {
+ name: "Prod"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "RealDiv"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Reshape"
+ input_arg {
+ name: "tensor"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "shape"
+ type_attr: "Tshape"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tshape"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "RestoreV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "SaveV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "ScalarSummary"
+ input_arg {
+ name: "tags"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "values"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "summary"
+ type: DT_STRING
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_HALF
+ }
+ }
+ }
+ }
+ op {
+ name: "Select"
+ input_arg {
+ name: "condition"
+ type: DT_BOOL
+ }
+ input_arg {
+ name: "t"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "e"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "Shape"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "out_type"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "out_type"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "ShardedFilename"
+ input_arg {
+ name: "basename"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shard"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "num_shards"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "filename"
+ type: DT_STRING
+ }
+ }
+ op {
+ name: "SparseSoftmaxCrossEntropyWithLogits"
+ input_arg {
+ name: "features"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "labels"
+ type_attr: "Tlabels"
+ }
+ output_arg {
+ name: "loss"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "backprop"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ attr {
+ name: "Tlabels"
+ type: "type"
+ default_value {
+ type: DT_INT64
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "StringJoin"
+ input_arg {
+ name: "inputs"
+ type: DT_STRING
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type: DT_STRING
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "separator"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ }
+ op {
+ name: "Sum"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Tile"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "multiples"
+ type_attr: "Tmultiples"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tmultiples"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "TruncatedNormal"
+ input_arg {
+ name: "shape"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "seed"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ attr {
+ name: "seed2"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "VariableV2"
+ output_arg {
+ name: "ref"
+ type_attr: "dtype"
+ is_ref: true
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "container"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ attr {
+ name: "shared_name"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "ZerosLike"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ }
+ tags: "serve"
+ tensorflow_version: "1.4.1"
+ tensorflow_git_version: "v1.4.0-19-ga52c8d9"
+ }
+ graph_def {
+ node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "y"
+ op: "Placeholder"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ tensor_content: "\020\003\000\000,\001\000\000"
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal/mean"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal/stddev"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0714285746216774
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal/TruncatedNormal"
+ op: "TruncatedNormal"
+ input: "dnn/hidden1/truncated_normal/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal/mul"
+ op: "Mul"
+ input: "dnn/hidden1/truncated_normal/TruncatedNormal"
+ input: "dnn/hidden1/truncated_normal/stddev"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/truncated_normal"
+ op: "Add"
+ input: "dnn/hidden1/truncated_normal/mul"
+ input: "dnn/hidden1/truncated_normal/mean"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/weights"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/weights/Assign"
+ op: "Assign"
+ input: "dnn/hidden1/weights"
+ input: "dnn/hidden1/truncated_normal"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/weights/read"
+ op: "Identity"
+ input: "dnn/hidden1/weights"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/zeros"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 300
+ }
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/bias"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/bias/Assign"
+ op: "Assign"
+ input: "dnn/hidden1/bias"
+ input: "dnn/hidden1/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/bias/read"
+ op: "Identity"
+ input: "dnn/hidden1/bias"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/MatMul"
+ op: "MatMul"
+ input: "input"
+ input: "dnn/hidden1/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/add"
+ op: "Add"
+ input: "dnn/hidden1/MatMul"
+ input: "dnn/hidden1/bias/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/mul/x"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.009999999776482582
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/mul"
+ op: "Mul"
+ input: "dnn/hidden1/mul/x"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden1/Maximum"
+ op: "Maximum"
+ input: "dnn/hidden1/mul"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ tensor_content: ",\001\000\000d\000\000\000"
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal/mean"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal/stddev"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.1154700517654419
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal/TruncatedNormal"
+ op: "TruncatedNormal"
+ input: "dnn/hidden2/truncated_normal/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal/mul"
+ op: "Mul"
+ input: "dnn/hidden2/truncated_normal/TruncatedNormal"
+ input: "dnn/hidden2/truncated_normal/stddev"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/truncated_normal"
+ op: "Add"
+ input: "dnn/hidden2/truncated_normal/mul"
+ input: "dnn/hidden2/truncated_normal/mean"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/weights"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/weights/Assign"
+ op: "Assign"
+ input: "dnn/hidden2/weights"
+ input: "dnn/hidden2/truncated_normal"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/weights/read"
+ op: "Identity"
+ input: "dnn/hidden2/weights"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/zeros"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 100
+ }
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/bias"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/bias/Assign"
+ op: "Assign"
+ input: "dnn/hidden2/bias"
+ input: "dnn/hidden2/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/bias/read"
+ op: "Identity"
+ input: "dnn/hidden2/bias"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/MatMul"
+ op: "MatMul"
+ input: "dnn/hidden1/Maximum"
+ input: "dnn/hidden2/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/add"
+ op: "Add"
+ input: "dnn/hidden2/MatMul"
+ input: "dnn/hidden2/bias/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ float_val: 0.009999999776482582
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/mul"
+ op: "Mul"
+ input: "dnn/hidden2/Const"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/hidden2/Maximum"
+ op: "Maximum"
+ input: "dnn/hidden2/mul"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ tensor_content: "d\000\000\000\n\000\000\000"
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal/mean"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal/stddev"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.20000000298023224
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal/TruncatedNormal"
+ op: "TruncatedNormal"
+ input: "dnn/outputs/truncated_normal/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal/mul"
+ op: "Mul"
+ input: "dnn/outputs/truncated_normal/TruncatedNormal"
+ input: "dnn/outputs/truncated_normal/stddev"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/truncated_normal"
+ op: "Add"
+ input: "dnn/outputs/truncated_normal/mul"
+ input: "dnn/outputs/truncated_normal/mean"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/weights"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/weights/Assign"
+ op: "Assign"
+ input: "dnn/outputs/weights"
+ input: "dnn/outputs/truncated_normal"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/weights/read"
+ op: "Identity"
+ input: "dnn/outputs/weights"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/zeros"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 10
+ }
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/bias"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/bias/Assign"
+ op: "Assign"
+ input: "dnn/outputs/bias"
+ input: "dnn/outputs/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/bias/read"
+ op: "Identity"
+ input: "dnn/outputs/bias"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/MatMul"
+ op: "MatMul"
+ input: "dnn/hidden2/Maximum"
+ input: "dnn/outputs/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "dnn/outputs/add"
+ op: "Add"
+ input: "dnn/outputs/MatMul"
+ input: "dnn/outputs/bias/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "loss/SparseSoftmaxCrossEntropyWithLogits/Shape"
+ op: "Shape"
+ input: "y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"
+ op: "SparseSoftmaxCrossEntropyWithLogits"
+ input: "dnn/outputs/add"
+ input: "y"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tlabels"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "loss/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "loss/loss"
+ op: "Mean"
+ input: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"
+ input: "loss/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/Shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/Fill"
+ op: "Fill"
+ input: "train/gradients/Shape"
+ input: "train/gradients/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Reshape/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/Fill"
+ input: "train/gradients/loss/loss_grad/Reshape/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Shape"
+ op: "Shape"
+ input: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Tile"
+ op: "Tile"
+ input: "train/gradients/loss/loss_grad/Reshape"
+ input: "train/gradients/loss/loss_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tmultiples"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Shape_1"
+ op: "Shape"
+ input: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Shape_2"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Const"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Prod"
+ op: "Prod"
+ input: "train/gradients/loss/loss_grad/Shape_1"
+ input: "train/gradients/loss/loss_grad/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Const_1"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Prod_1"
+ op: "Prod"
+ input: "train/gradients/loss/loss_grad/Shape_2"
+ input: "train/gradients/loss/loss_grad/Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Maximum/y"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Maximum"
+ op: "Maximum"
+ input: "train/gradients/loss/loss_grad/Prod_1"
+ input: "train/gradients/loss/loss_grad/Maximum/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/floordiv"
+ op: "FloorDiv"
+ input: "train/gradients/loss/loss_grad/Prod"
+ input: "train/gradients/loss/loss_grad/Maximum"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/loss/loss_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/Cast"
+ op: "Cast"
+ input: "train/gradients/loss/loss_grad/floordiv"
+ attr {
+ key: "DstT"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "SrcT"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/loss_grad/truediv"
+ op: "RealDiv"
+ input: "train/gradients/loss/loss_grad/Tile"
+ input: "train/gradients/loss/loss_grad/Cast"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/zeros_like"
+ op: "ZerosLike"
+ input: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/PreventGradient"
+ op: "PreventGradient"
+ input: "loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "message"
+ value {
+ s: "Currently there is no way to take the second derivative of sparse_softmax_cross_entropy_with_logits due to the fused implementation\'s interaction with tf.gradients()"
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/ExpandDims/dim"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: -1
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/ExpandDims"
+ op: "ExpandDims"
+ input: "train/gradients/loss/loss_grad/truediv"
+ input: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/ExpandDims/dim"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tdim"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/mul"
+ op: "Mul"
+ input: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/ExpandDims"
+ input: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/PreventGradient"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Shape"
+ op: "Shape"
+ input: "dnn/outputs/MatMul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Shape_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 10
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/outputs/add_grad/Shape"
+ input: "train/gradients/dnn/outputs/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/mul"
+ input: "train/gradients/dnn/outputs/add_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/outputs/add_grad/Sum"
+ input: "train/gradients/dnn/outputs/add_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/loss/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits_grad/mul"
+ input: "train/gradients/dnn/outputs/add_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/outputs/add_grad/Sum_1"
+ input: "train/gradients/dnn/outputs/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/outputs/add_grad/Reshape"
+ input: "^train/gradients/dnn/outputs/add_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/outputs/add_grad/Reshape"
+ input: "^train/gradients/dnn/outputs/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/outputs/add_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/add_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/outputs/add_grad/Reshape_1"
+ input: "^train/gradients/dnn/outputs/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/outputs/add_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/MatMul_grad/MatMul"
+ op: "MatMul"
+ input: "train/gradients/dnn/outputs/add_grad/tuple/control_dependency"
+ input: "dnn/outputs/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/MatMul_grad/MatMul_1"
+ op: "MatMul"
+ input: "dnn/hidden2/Maximum"
+ input: "train/gradients/dnn/outputs/add_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/MatMul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/outputs/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/outputs/MatMul_grad/MatMul_1"
+ }
+ node {
+ name: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/outputs/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/outputs/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/outputs/MatMul_grad/MatMul"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/outputs/MatMul_grad/MatMul_1"
+ input: "^train/gradients/dnn/outputs/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/outputs/MatMul_grad/MatMul_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Shape"
+ op: "Shape"
+ input: "dnn/hidden2/mul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Shape_1"
+ op: "Shape"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Shape_2"
+ op: "Shape"
+ input: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/zeros/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/zeros"
+ op: "Fill"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Shape_2"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/zeros/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/GreaterEqual"
+ op: "GreaterEqual"
+ input: "dnn/hidden2/mul"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Shape"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Select"
+ op: "Select"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/GreaterEqual"
+ input: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Select_1"
+ op: "Select"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/GreaterEqual"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/zeros"
+ input: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Select"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Sum"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Select_1"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Sum_1"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden2/Maximum_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/Maximum_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/Maximum_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/Maximum_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/Maximum_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden2/Maximum_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/Maximum_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Shape_1"
+ op: "Shape"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden2/mul_grad/Shape"
+ input: "train/gradients/dnn/hidden2/mul_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/mul"
+ op: "Mul"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/tuple/control_dependency"
+ input: "dnn/hidden2/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden2/mul_grad/mul"
+ input: "train/gradients/dnn/hidden2/mul_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/mul_grad/Sum"
+ input: "train/gradients/dnn/hidden2/mul_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/mul_1"
+ op: "Mul"
+ input: "dnn/hidden2/Const"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden2/mul_grad/mul_1"
+ input: "train/gradients/dnn/hidden2/mul_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/mul_grad/Sum_1"
+ input: "train/gradients/dnn/hidden2/mul_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden2/mul_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/mul_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/mul_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/mul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/mul_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/mul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/mul_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden2/mul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/mul_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/AddN"
+ op: "AddN"
+ input: "train/gradients/dnn/hidden2/Maximum_grad/tuple/control_dependency_1"
+ input: "train/gradients/dnn/hidden2/mul_grad/tuple/control_dependency_1"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/Maximum_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Shape"
+ op: "Shape"
+ input: "dnn/hidden2/MatMul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Shape_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 100
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden2/add_grad/Shape"
+ input: "train/gradients/dnn/hidden2/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/AddN"
+ input: "train/gradients/dnn/hidden2/add_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/add_grad/Sum"
+ input: "train/gradients/dnn/hidden2/add_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/AddN"
+ input: "train/gradients/dnn/hidden2/add_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden2/add_grad/Sum_1"
+ input: "train/gradients/dnn/hidden2/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden2/add_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/add_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/add_grad/Reshape"
+ input: "^train/gradients/dnn/hidden2/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/add_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/add_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/add_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden2/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/add_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/MatMul_grad/MatMul"
+ op: "MatMul"
+ input: "train/gradients/dnn/hidden2/add_grad/tuple/control_dependency"
+ input: "dnn/hidden2/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/MatMul_grad/MatMul_1"
+ op: "MatMul"
+ input: "dnn/hidden1/Maximum"
+ input: "train/gradients/dnn/hidden2/add_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/MatMul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden2/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/hidden2/MatMul_grad/MatMul_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/hidden2/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/MatMul_grad/MatMul"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/MatMul_1"
+ input: "^train/gradients/dnn/hidden2/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden2/MatMul_grad/MatMul_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Shape"
+ op: "Shape"
+ input: "dnn/hidden1/mul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Shape_1"
+ op: "Shape"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Shape_2"
+ op: "Shape"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/zeros/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/zeros"
+ op: "Fill"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Shape_2"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/zeros/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/GreaterEqual"
+ op: "GreaterEqual"
+ input: "dnn/hidden1/mul"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Shape"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Select"
+ op: "Select"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/GreaterEqual"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Select_1"
+ op: "Select"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/GreaterEqual"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/zeros"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Select"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Sum"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Select_1"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Sum_1"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden1/Maximum_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/Maximum_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/Maximum_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/Maximum_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/Maximum_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden1/Maximum_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/Maximum_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Shape_1"
+ op: "Shape"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden1/mul_grad/Shape"
+ input: "train/gradients/dnn/hidden1/mul_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/mul"
+ op: "Mul"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/tuple/control_dependency"
+ input: "dnn/hidden1/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden1/mul_grad/mul"
+ input: "train/gradients/dnn/hidden1/mul_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/mul_grad/Sum"
+ input: "train/gradients/dnn/hidden1/mul_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/mul_1"
+ op: "Mul"
+ input: "dnn/hidden1/mul/x"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/dnn/hidden1/mul_grad/mul_1"
+ input: "train/gradients/dnn/hidden1/mul_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/mul_grad/Sum_1"
+ input: "train/gradients/dnn/hidden1/mul_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden1/mul_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/mul_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/mul_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/mul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/mul_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/mul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/mul_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden1/mul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/mul_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/AddN_1"
+ op: "AddN"
+ input: "train/gradients/dnn/hidden1/Maximum_grad/tuple/control_dependency_1"
+ input: "train/gradients/dnn/hidden1/mul_grad/tuple/control_dependency_1"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/Maximum_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Shape"
+ op: "Shape"
+ input: "dnn/hidden1/MatMul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Shape_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 300
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "train/gradients/dnn/hidden1/add_grad/Shape"
+ input: "train/gradients/dnn/hidden1/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Sum"
+ op: "Sum"
+ input: "train/gradients/AddN_1"
+ input: "train/gradients/dnn/hidden1/add_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Reshape"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/add_grad/Sum"
+ input: "train/gradients/dnn/hidden1/add_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Sum_1"
+ op: "Sum"
+ input: "train/gradients/AddN_1"
+ input: "train/gradients/dnn/hidden1/add_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/Reshape_1"
+ op: "Reshape"
+ input: "train/gradients/dnn/hidden1/add_grad/Sum_1"
+ input: "train/gradients/dnn/hidden1/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden1/add_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/add_grad/Reshape_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/add_grad/Reshape"
+ input: "^train/gradients/dnn/hidden1/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/add_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/add_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/add_grad/Reshape_1"
+ input: "^train/gradients/dnn/hidden1/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/add_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/MatMul_grad/MatMul"
+ op: "MatMul"
+ input: "train/gradients/dnn/hidden1/add_grad/tuple/control_dependency"
+ input: "dnn/hidden1/weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/MatMul_grad/MatMul_1"
+ op: "MatMul"
+ input: "input"
+ input: "train/gradients/dnn/hidden1/add_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/MatMul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^train/gradients/dnn/hidden1/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/hidden1/MatMul_grad/MatMul_1"
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/MatMul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/MatMul_grad/MatMul"
+ input: "^train/gradients/dnn/hidden1/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/MatMul_grad/MatMul"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/gradients/dnn/hidden1/MatMul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "train/gradients/dnn/hidden1/MatMul_grad/MatMul_1"
+ input: "^train/gradients/dnn/hidden1/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@train/gradients/dnn/hidden1/MatMul_grad/MatMul_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/learning_rate"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.009999999776482582
+ }
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/hidden1/weights/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/hidden1/weights"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/hidden1/MatMul_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/hidden1/bias/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/hidden1/bias"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/hidden1/add_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/hidden2/weights/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/hidden2/weights"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/hidden2/MatMul_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/hidden2/bias/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/hidden2/bias"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/hidden2/add_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/outputs/weights/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/outputs/weights"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/outputs/MatMul_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent/update_dnn/outputs/bias/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "dnn/outputs/bias"
+ input: "train/GradientDescent/learning_rate"
+ input: "train/gradients/dnn/outputs/add_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "train/GradientDescent"
+ op: "NoOp"
+ input: "^train/GradientDescent/update_dnn/hidden1/weights/ApplyGradientDescent"
+ input: "^train/GradientDescent/update_dnn/hidden1/bias/ApplyGradientDescent"
+ input: "^train/GradientDescent/update_dnn/hidden2/weights/ApplyGradientDescent"
+ input: "^train/GradientDescent/update_dnn/hidden2/bias/ApplyGradientDescent"
+ input: "^train/GradientDescent/update_dnn/outputs/weights/ApplyGradientDescent"
+ input: "^train/GradientDescent/update_dnn/outputs/bias/ApplyGradientDescent"
+ }
+ node {
+ name: "eval/in_top_k/InTopKV2/k"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT64
+ tensor_shape {
+ }
+ int64_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "eval/in_top_k/InTopKV2"
+ op: "InTopKV2"
+ input: "dnn/outputs/add"
+ input: "y"
+ input: "eval/in_top_k/InTopKV2/k"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "eval/Cast"
+ op: "Cast"
+ input: "eval/in_top_k/InTopKV2"
+ attr {
+ key: "DstT"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "SrcT"
+ value {
+ type: DT_BOOL
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "eval/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "eval/Mean"
+ op: "Mean"
+ input: "eval/Cast"
+ input: "eval/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "init"
+ op: "NoOp"
+ input: "^dnn/hidden1/weights/Assign"
+ input: "^dnn/hidden1/bias/Assign"
+ input: "^dnn/hidden2/weights/Assign"
+ input: "^dnn/hidden2/bias/Assign"
+ input: "^dnn/outputs/weights/Assign"
+ input: "^dnn/outputs/bias/Assign"
+ }
+ node {
+ name: "Accuracy/tags"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "Accuracy"
+ }
+ }
+ }
+ }
+ node {
+ name: "Accuracy"
+ op: "ScalarSummary"
+ input: "Accuracy/tags"
+ input: "eval/Mean"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "model"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin/inputs_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "_temp_af8f34e037d9459f96200d29e33a7078/part"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin"
+ op: "StringJoin"
+ input: "save/Const"
+ input: "save/StringJoin/inputs_1"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "separator"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "save/num_shards"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename/shard"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename"
+ op: "ShardedFilename"
+ input: "save/StringJoin"
+ input: "save/ShardedFilename/shard"
+ input: "save/num_shards"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 6
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 6
+ }
+ }
+ string_val: "dnn/hidden1/bias"
+ string_val: "dnn/hidden1/weights"
+ string_val: "dnn/hidden2/bias"
+ string_val: "dnn/hidden2/weights"
+ string_val: "dnn/outputs/bias"
+ string_val: "dnn/outputs/weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 6
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 6
+ }
+ }
+ string_val: ""
+ string_val: ""
+ string_val: ""
+ string_val: ""
+ string_val: ""
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2"
+ op: "SaveV2"
+ input: "save/ShardedFilename"
+ input: "save/SaveV2/tensor_names"
+ input: "save/SaveV2/shape_and_slices"
+ input: "dnn/hidden1/bias"
+ input: "dnn/hidden1/weights"
+ input: "dnn/hidden2/bias"
+ input: "dnn/hidden2/weights"
+ input: "dnn/outputs/bias"
+ input: "dnn/outputs/weights"
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ type: DT_FLOAT
+ type: DT_FLOAT
+ type: DT_FLOAT
+ type: DT_FLOAT
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/control_dependency"
+ op: "Identity"
+ input: "save/ShardedFilename"
+ input: "^save/SaveV2"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@save/ShardedFilename"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ op: "Pack"
+ input: "save/ShardedFilename"
+ input: "^save/control_dependency"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints"
+ op: "MergeV2Checkpoints"
+ input: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ input: "save/Const"
+ attr {
+ key: "delete_old_dirs"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/Identity"
+ op: "Identity"
+ input: "save/Const"
+ input: "^save/control_dependency"
+ input: "^save/MergeV2Checkpoints"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/hidden1/bias"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2/tensor_names"
+ input: "save/RestoreV2/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign"
+ op: "Assign"
+ input: "dnn/hidden1/bias"
+ input: "save/RestoreV2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/hidden1/weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_1/tensor_names"
+ input: "save/RestoreV2_1/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_1"
+ op: "Assign"
+ input: "dnn/hidden1/weights"
+ input: "save/RestoreV2_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden1/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 300
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_2/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/hidden2/bias"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_2/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_2"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_2/tensor_names"
+ input: "save/RestoreV2_2/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_2"
+ op: "Assign"
+ input: "dnn/hidden2/bias"
+ input: "save/RestoreV2_2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_3/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/hidden2/weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_3/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_3"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_3/tensor_names"
+ input: "save/RestoreV2_3/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_3"
+ op: "Assign"
+ input: "dnn/hidden2/weights"
+ input: "save/RestoreV2_3"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/hidden2/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 300
+ }
+ dim {
+ size: 100
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_4/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/outputs/bias"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_4/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_4"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_4/tensor_names"
+ input: "save/RestoreV2_4/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_4"
+ op: "Assign"
+ input: "dnn/outputs/bias"
+ input: "save/RestoreV2_4"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_5/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "dnn/outputs/weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_5/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_5"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_5/tensor_names"
+ input: "save/RestoreV2_5/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_5"
+ op: "Assign"
+ input: "dnn/outputs/weights"
+ input: "save/RestoreV2_5"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@dnn/outputs/weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 100
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/restore_shard"
+ op: "NoOp"
+ input: "^save/Assign"
+ input: "^save/Assign_1"
+ input: "^save/Assign_2"
+ input: "^save/Assign_3"
+ input: "^save/Assign_4"
+ input: "^save/Assign_5"
+ }
+ node {
+ name: "save/restore_all"
+ op: "NoOp"
+ input: "^save/restore_shard"
+ }
+ versions {
+ producer: 24
+ }
+ }
+ saver_def {
+ filename_tensor_name: "save/Const:0"
+ save_tensor_name: "save/Identity:0"
+ restore_op_name: "save/restore_all"
+ max_to_keep: 5
+ sharded: true
+ keep_checkpoint_every_n_hours: 10000.0
+ version: V2
+ }
+ collection_def {
+ key: "summaries"
+ value {
+ node_list {
+ value: "Accuracy:0"
+ }
+ }
+ }
+ collection_def {
+ key: "train_op"
+ value {
+ node_list {
+ value: "train/GradientDescent"
+ }
+ }
+ }
+ collection_def {
+ key: "trainable_variables"
+ value {
+ bytes_list {
+ value: "\n\025dnn/hidden1/weights:0\022\032dnn/hidden1/weights/Assign\032\032dnn/hidden1/weights/read:02\036dnn/hidden1/truncated_normal:0"
+ value: "\n\022dnn/hidden1/bias:0\022\027dnn/hidden1/bias/Assign\032\027dnn/hidden1/bias/read:02\023dnn/hidden1/zeros:0"
+ value: "\n\025dnn/hidden2/weights:0\022\032dnn/hidden2/weights/Assign\032\032dnn/hidden2/weights/read:02\036dnn/hidden2/truncated_normal:0"
+ value: "\n\022dnn/hidden2/bias:0\022\027dnn/hidden2/bias/Assign\032\027dnn/hidden2/bias/read:02\023dnn/hidden2/zeros:0"
+ value: "\n\025dnn/outputs/weights:0\022\032dnn/outputs/weights/Assign\032\032dnn/outputs/weights/read:02\036dnn/outputs/truncated_normal:0"
+ value: "\n\022dnn/outputs/bias:0\022\027dnn/outputs/bias/Assign\032\027dnn/outputs/bias/read:02\023dnn/outputs/zeros:0"
+ }
+ }
+ }
+ collection_def {
+ key: "variables"
+ value {
+ bytes_list {
+ value: "\n\025dnn/hidden1/weights:0\022\032dnn/hidden1/weights/Assign\032\032dnn/hidden1/weights/read:02\036dnn/hidden1/truncated_normal:0"
+ value: "\n\022dnn/hidden1/bias:0\022\027dnn/hidden1/bias/Assign\032\027dnn/hidden1/bias/read:02\023dnn/hidden1/zeros:0"
+ value: "\n\025dnn/hidden2/weights:0\022\032dnn/hidden2/weights/Assign\032\032dnn/hidden2/weights/read:02\036dnn/hidden2/truncated_normal:0"
+ value: "\n\022dnn/hidden2/bias:0\022\027dnn/hidden2/bias/Assign\032\027dnn/hidden2/bias/read:02\023dnn/hidden2/zeros:0"
+ value: "\n\025dnn/outputs/weights:0\022\032dnn/outputs/weights/Assign\032\032dnn/outputs/weights/read:02\036dnn/outputs/truncated_normal:0"
+ value: "\n\022dnn/outputs/bias:0\022\027dnn/outputs/bias/Assign\032\027dnn/outputs/bias/read:02\023dnn/outputs/zeros:0"
+ }
+ }
+ }
+ signature_def {
+ key: "serving_default"
+ value {
+ inputs {
+ key: "x"
+ value {
+ name: "input:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ outputs {
+ key: "y"
+ value {
+ name: "dnn/outputs/add:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ method_name: "tensorflow/serving/predict"
+ }
+ }
+}
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.data-00000-of-00001 b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.data-00000-of-00001
new file mode 100644
index 00000000000..ed4af6c0f8c
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.data-00000-of-00001
Binary files differ
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.index b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.index
new file mode 100644
index 00000000000..c877b02b42a
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/saved/variables/variables.index
Binary files differ
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/simple_mnist.py b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/simple_mnist.py
new file mode 100644
index 00000000000..7494e93fa71
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist/simple_mnist.py
@@ -0,0 +1,100 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+# Common imports
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.examples.tutorials.mnist import input_data
+from datetime import datetime
+
+now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
+root_logdir = "tf_logs"
+logdir = "{}/run-{}/".format(root_logdir, now)
+
+mnist = input_data.read_data_sets("/tmp/data/")
+X_train = mnist.train.images
+X_test = mnist.test.images
+y_train = mnist.train.labels.astype("int")
+y_test = mnist.test.labels.astype("int")
+
+n_inputs = 28*28 # MNIST
+n_hidden1 = 300
+n_hidden2 = 100
+n_hidden3 = 40
+n_outputs = 10
+
+learning_rate = 0.01
+n_epochs = 20
+batch_size = 50
+
+input = tf.placeholder(tf.float32, shape=(None, n_inputs), name="input")
+y = tf.placeholder(tf.int64, shape=(None), name="y")
+
+
+def neuron_layer(X, n_neurons, name, activation=None):
+ with tf.name_scope(name):
+ n_inputs = int(X.get_shape()[1])
+ stddev = 2 / np.sqrt(n_inputs)
+ init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
+ W = tf.Variable(init, name="weights")
+ b = tf.Variable(tf.zeros([n_neurons]), name="bias")
+ Z = tf.matmul(X, W) + b
+ if activation is not None:
+ return activation(Z)
+ else:
+ return Z
+
+
+def leaky_relu(z, name=None):
+ return tf.maximum(0.01 * z, z, name=name)
+
+def leaky_relu_with_small_constant(z, name=None):
+ return tf.maximum(tf.constant(0.01, shape=[1]) * z, z, name=name)
+
+with tf.name_scope("dnn"):
+ hidden1 = neuron_layer(input, n_hidden1, name="hidden1", activation=leaky_relu)
+ hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2", activation=leaky_relu_with_small_constant)
+ logits = neuron_layer(hidden2, n_outputs, name="outputs") #, activation=tf.nn.sigmoid)
+
+with tf.name_scope("loss"):
+ xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
+ loss = tf.reduce_mean(xentropy, name="loss")
+
+with tf.name_scope("train"):
+ optimizer = tf.train.GradientDescentOptimizer(learning_rate)
+ training_op = optimizer.minimize(loss)
+
+with tf.name_scope("eval"):
+ correct = tf.nn.in_top_k(logits, y, 1)
+ accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
+
+init = tf.global_variables_initializer()
+accuracy_summary = tf.summary.scalar('Accuracy', accuracy)
+file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
+
+with tf.Session() as sess:
+ init.run()
+ for epoch in range(n_epochs):
+ for iteration in range(mnist.train.num_examples // batch_size):
+ X_batch, y_batch = mnist.train.next_batch(batch_size)
+ sess.run(training_op, feed_dict={input: X_batch, y: y_batch})
+ acc_train = accuracy.eval(feed_dict={input: X_batch, y: y_batch})
+ acc_val = accuracy.eval(feed_dict={input: mnist.validation.images,
+ y: mnist.validation.labels})
+ print(epoch, "Train accuracy:", acc_train, "Val accuracy:", acc_val)
+
+ # Save summary for tensorboard
+ summary_str = accuracy_summary.eval(feed_dict={input: mnist.validation.images,
+ y: mnist.validation.labels})
+ file_writer.add_summary(summary_str, epoch)
+
+ export_path = "saved"
+ print('Exporting trained model to ', export_path)
+ builder = tf.saved_model.builder.SavedModelBuilder(export_path)
+ signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs = {'x':input}, outputs = {'y':logits})
+ builder.add_meta_graph_and_variables(sess,
+ [tf.saved_model.tag_constants.SERVING],
+ signature_def_map={'serving_default':signature})
+ builder.save(as_text=True)
+
+file_writer.close()
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/mnist_sftmax_with_saving.py b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/mnist_sftmax_with_saving.py
new file mode 100644
index 00000000000..3f4f794d2ac
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/mnist_sftmax_with_saving.py
@@ -0,0 +1,93 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""A very simple MNIST classifier.
+
+See extensive documentation at
+https://www.tensorflow.org/get_started/mnist/beginners
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+import sys
+
+from tensorflow.examples.tutorials.mnist import input_data
+
+import tensorflow as tf
+
+FLAGS = None
+
+
+def main(_):
+ # Import data
+ mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
+
+ # Create the model
+ x = tf.placeholder(tf.float32, [None, 784])
+
+ with tf.name_scope("layer"):
+ W = tf.Variable(tf.zeros([784, 10]))
+ b = tf.Variable(tf.zeros([10]))
+ y = tf.matmul(x, W) + b
+
+
+ # Define loss and optimizer
+ y_ = tf.placeholder(tf.float32, [None, 10])
+
+ # The raw formulation of cross-entropy,
+ #
+ # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
+ # reduction_indices=[1]))
+ #
+ # can be numerically unstable.
+ #
+ # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
+ # outputs of 'y', and then average across the batch.
+ cross_entropy = tf.reduce_mean(
+ tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
+ train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
+
+ sess = tf.InteractiveSession()
+ tf.global_variables_initializer().run()
+ # Train
+ for _ in range(1000):
+ batch_xs, batch_ys = mnist.train.next_batch(100)
+ sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
+
+ # Test trained model
+ correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
+ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+ print(sess.run(accuracy, feed_dict={x: mnist.test.images,
+ y_: mnist.test.labels}))
+
+ # Save the model
+ export_path = "saved"
+ print('Exporting trained model to ', export_path)
+ builder = tf.saved_model.builder.SavedModelBuilder(export_path)
+ signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs = {'x':x}, outputs = {'y':y})
+ builder.add_meta_graph_and_variables(sess,
+ [tf.saved_model.tag_constants.SERVING],
+ signature_def_map={'serving_default':signature})
+ builder.save(as_text=True)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
+ help='Directory for storing input data')
+ FLAGS, unparsed = parser.parse_known_args()
+ tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/saved_model.pbtxt b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/saved_model.pbtxt
new file mode 100644
index 00000000000..05b0e4e0f29
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/saved_model.pbtxt
@@ -0,0 +1,5039 @@
+saved_model_schema_version: 1
+meta_graphs {
+ meta_info_def {
+ stripped_op_list {
+ op {
+ name: "Add"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_STRING
+ }
+ }
+ }
+ }
+ op {
+ name: "ApplyGradientDescent"
+ input_arg {
+ name: "var"
+ type_attr: "T"
+ is_ref: true
+ }
+ input_arg {
+ name: "alpha"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "delta"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "out"
+ type_attr: "T"
+ is_ref: true
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "use_locking"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ }
+ op {
+ name: "ArgMax"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "dimension"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "output_type"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ attr {
+ name: "output_type"
+ type: "type"
+ default_value {
+ type: DT_INT64
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Assign"
+ input_arg {
+ name: "ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ input_arg {
+ name: "value"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output_ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "validate_shape"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ attr {
+ name: "use_locking"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ allows_uninitialized_input: true
+ }
+ op {
+ name: "BroadcastGradientArgs"
+ input_arg {
+ name: "s0"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "s1"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "r0"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "r1"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Cast"
+ input_arg {
+ name: "x"
+ type_attr: "SrcT"
+ }
+ output_arg {
+ name: "y"
+ type_attr: "DstT"
+ }
+ attr {
+ name: "SrcT"
+ type: "type"
+ }
+ attr {
+ name: "DstT"
+ type: "type"
+ }
+ }
+ op {
+ name: "ConcatV2"
+ input_arg {
+ name: "values"
+ type_attr: "T"
+ number_attr: "N"
+ }
+ input_arg {
+ name: "axis"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 2
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Const"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "value"
+ type: "tensor"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ }
+ op {
+ name: "Equal"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type: DT_BOOL
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_QUINT8
+ type: DT_QINT8
+ type: DT_QINT32
+ type: DT_STRING
+ type: DT_BOOL
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "ExpandDims"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "dim"
+ type_attr: "Tdim"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tdim"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Fill"
+ input_arg {
+ name: "dims"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "value"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "FloorDiv"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Identity"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "MatMul"
+ input_arg {
+ name: "a"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "b"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "product"
+ type_attr: "T"
+ }
+ attr {
+ name: "transpose_a"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "transpose_b"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Maximum"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "Mean"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "MergeV2Checkpoints"
+ input_arg {
+ name: "checkpoint_prefixes"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "destination_prefix"
+ type: DT_STRING
+ }
+ attr {
+ name: "delete_old_dirs"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "Mul"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "NoOp"
+ }
+ op {
+ name: "Pack"
+ input_arg {
+ name: "values"
+ type_attr: "T"
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "axis"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ }
+ op {
+ name: "Placeholder"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ default_value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ op {
+ name: "Prod"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "RealDiv"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Reshape"
+ input_arg {
+ name: "tensor"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "shape"
+ type_attr: "Tshape"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tshape"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "RestoreV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "SaveV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "Shape"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "out_type"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "out_type"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "ShardedFilename"
+ input_arg {
+ name: "basename"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shard"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "num_shards"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "filename"
+ type: DT_STRING
+ }
+ }
+ op {
+ name: "Slice"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "begin"
+ type_attr: "Index"
+ }
+ input_arg {
+ name: "size"
+ type_attr: "Index"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Index"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "SoftmaxCrossEntropyWithLogits"
+ input_arg {
+ name: "features"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "labels"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "loss"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "backprop"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ }
+ op {
+ name: "StringJoin"
+ input_arg {
+ name: "inputs"
+ type: DT_STRING
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type: DT_STRING
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "separator"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ }
+ op {
+ name: "Sub"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "Sum"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "reduction_indices"
+ type_attr: "Tidx"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "keep_dims"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_HALF
+ }
+ }
+ }
+ attr {
+ name: "Tidx"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "Tile"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "multiples"
+ type_attr: "Tmultiples"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "Tmultiples"
+ type: "type"
+ default_value {
+ type: DT_INT32
+ }
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ }
+ op {
+ name: "VariableV2"
+ output_arg {
+ name: "ref"
+ type_attr: "dtype"
+ is_ref: true
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "container"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ attr {
+ name: "shared_name"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "ZerosLike"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ }
+ tags: "serve"
+ tensorflow_version: "1.4.1"
+ tensorflow_git_version: "v1.4.0-19-ga52c8d9"
+ }
+ graph_def {
+ node {
+ name: "Placeholder"
+ op: "Placeholder"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Placeholder_1"
+ op: "Placeholder"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "layer/zeros"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "layer/Variable"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "layer/Variable/Assign"
+ op: "Assign"
+ input: "layer/Variable"
+ input: "layer/zeros"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "layer/Variable/read"
+ op: "Identity"
+ input: "layer/Variable"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "layer/zeros_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: 10
+ }
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "layer/Variable_1"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "layer/Variable_1/Assign"
+ op: "Assign"
+ input: "layer/Variable_1"
+ input: "layer/zeros_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "layer/Variable_1/read"
+ op: "Identity"
+ input: "layer/Variable_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "layer/MatMul"
+ op: "MatMul"
+ input: "Placeholder"
+ input: "layer/Variable/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "layer/add"
+ op: "Add"
+ input: "layer/MatMul"
+ input: "layer/Variable_1/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Rank"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 2
+ }
+ }
+ }
+ }
+ node {
+ name: "Shape"
+ op: "Shape"
+ input: "layer/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "Rank_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 2
+ }
+ }
+ }
+ }
+ node {
+ name: "Shape_1"
+ op: "Shape"
+ input: "layer/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "Sub/y"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub"
+ op: "Sub"
+ input: "Rank_1"
+ input: "Sub/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice/begin"
+ op: "Pack"
+ input: "Sub"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "Slice/size"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice"
+ op: "Slice"
+ input: "Shape_1"
+ input: "Slice/begin"
+ input: "Slice/size"
+ attr {
+ key: "Index"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "concat/values_0"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: -1
+ }
+ }
+ }
+ }
+ node {
+ name: "concat/axis"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "concat"
+ op: "ConcatV2"
+ input: "concat/values_0"
+ input: "Slice"
+ input: "concat/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Reshape"
+ op: "Reshape"
+ input: "layer/add"
+ input: "concat"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Rank_2"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 2
+ }
+ }
+ }
+ }
+ node {
+ name: "Shape_2"
+ op: "Shape"
+ input: "Placeholder_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "Sub_1/y"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub_1"
+ op: "Sub"
+ input: "Rank_2"
+ input: "Sub_1/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice_1/begin"
+ op: "Pack"
+ input: "Sub_1"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "Slice_1/size"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice_1"
+ op: "Slice"
+ input: "Shape_2"
+ input: "Slice_1/begin"
+ input: "Slice_1/size"
+ attr {
+ key: "Index"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "concat_1/values_0"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: -1
+ }
+ }
+ }
+ }
+ node {
+ name: "concat_1/axis"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "concat_1"
+ op: "ConcatV2"
+ input: "concat_1/values_0"
+ input: "Slice_1"
+ input: "concat_1/axis"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Reshape_1"
+ op: "Reshape"
+ input: "Placeholder_1"
+ input: "concat_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "SoftmaxCrossEntropyWithLogits"
+ op: "SoftmaxCrossEntropyWithLogits"
+ input: "Reshape"
+ input: "Reshape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub_2/y"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "Sub_2"
+ op: "Sub"
+ input: "Rank"
+ input: "Sub_2/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice_2/begin"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "Slice_2/size"
+ op: "Pack"
+ input: "Sub_2"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "Slice_2"
+ op: "Slice"
+ input: "Shape"
+ input: "Slice_2/begin"
+ input: "Slice_2/size"
+ attr {
+ key: "Index"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Reshape_2"
+ op: "Reshape"
+ input: "SoftmaxCrossEntropyWithLogits"
+ input: "Slice_2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "Mean"
+ op: "Mean"
+ input: "Reshape_2"
+ input: "Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/Shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Fill"
+ op: "Fill"
+ input: "gradients/Shape"
+ input: "gradients/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Reshape/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Reshape"
+ op: "Reshape"
+ input: "gradients/Fill"
+ input: "gradients/Mean_grad/Reshape/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Shape"
+ op: "Shape"
+ input: "Reshape_2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Tile"
+ op: "Tile"
+ input: "gradients/Mean_grad/Reshape"
+ input: "gradients/Mean_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tmultiples"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Shape_1"
+ op: "Shape"
+ input: "Reshape_2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Shape_2"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Const"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Prod"
+ op: "Prod"
+ input: "gradients/Mean_grad/Shape_1"
+ input: "gradients/Mean_grad/Const"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Const_1"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Prod_1"
+ op: "Prod"
+ input: "gradients/Mean_grad/Shape_2"
+ input: "gradients/Mean_grad/Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Maximum/y"
+ op: "Const"
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Maximum"
+ op: "Maximum"
+ input: "gradients/Mean_grad/Prod_1"
+ input: "gradients/Mean_grad/Maximum/y"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/floordiv"
+ op: "FloorDiv"
+ input: "gradients/Mean_grad/Prod"
+ input: "gradients/Mean_grad/Maximum"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/Mean_grad/Shape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/Cast"
+ op: "Cast"
+ input: "gradients/Mean_grad/floordiv"
+ attr {
+ key: "DstT"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "SrcT"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Mean_grad/truediv"
+ op: "RealDiv"
+ input: "gradients/Mean_grad/Tile"
+ input: "gradients/Mean_grad/Cast"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Reshape_2_grad/Shape"
+ op: "Shape"
+ input: "SoftmaxCrossEntropyWithLogits"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "gradients/Reshape_2_grad/Reshape"
+ op: "Reshape"
+ input: "gradients/Mean_grad/truediv"
+ input: "gradients/Reshape_2_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/zeros_like"
+ op: "ZerosLike"
+ input: "SoftmaxCrossEntropyWithLogits:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/SoftmaxCrossEntropyWithLogits_grad/ExpandDims/dim"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: -1
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/SoftmaxCrossEntropyWithLogits_grad/ExpandDims"
+ op: "ExpandDims"
+ input: "gradients/Reshape_2_grad/Reshape"
+ input: "gradients/SoftmaxCrossEntropyWithLogits_grad/ExpandDims/dim"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tdim"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/SoftmaxCrossEntropyWithLogits_grad/mul"
+ op: "Mul"
+ input: "gradients/SoftmaxCrossEntropyWithLogits_grad/ExpandDims"
+ input: "SoftmaxCrossEntropyWithLogits:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/Reshape_grad/Shape"
+ op: "Shape"
+ input: "layer/add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "gradients/Reshape_grad/Reshape"
+ op: "Reshape"
+ input: "gradients/SoftmaxCrossEntropyWithLogits_grad/mul"
+ input: "gradients/Reshape_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Shape"
+ op: "Shape"
+ input: "layer/MatMul"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "out_type"
+ value {
+ type: DT_INT32
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Shape_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 10
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/BroadcastGradientArgs"
+ op: "BroadcastGradientArgs"
+ input: "gradients/layer/add_grad/Shape"
+ input: "gradients/layer/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Sum"
+ op: "Sum"
+ input: "gradients/Reshape_grad/Reshape"
+ input: "gradients/layer/add_grad/BroadcastGradientArgs"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Reshape"
+ op: "Reshape"
+ input: "gradients/layer/add_grad/Sum"
+ input: "gradients/layer/add_grad/Shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Sum_1"
+ op: "Sum"
+ input: "gradients/Reshape_grad/Reshape"
+ input: "gradients/layer/add_grad/BroadcastGradientArgs:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/Reshape_1"
+ op: "Reshape"
+ input: "gradients/layer/add_grad/Sum_1"
+ input: "gradients/layer/add_grad/Shape_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tshape"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^gradients/layer/add_grad/Reshape"
+ input: "^gradients/layer/add_grad/Reshape_1"
+ }
+ node {
+ name: "gradients/layer/add_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "gradients/layer/add_grad/Reshape"
+ input: "^gradients/layer/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/layer/add_grad/Reshape"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/add_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "gradients/layer/add_grad/Reshape_1"
+ input: "^gradients/layer/add_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/layer/add_grad/Reshape_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/MatMul_grad/MatMul"
+ op: "MatMul"
+ input: "gradients/layer/add_grad/tuple/control_dependency"
+ input: "layer/Variable/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/MatMul_grad/MatMul_1"
+ op: "MatMul"
+ input: "Placeholder"
+ input: "gradients/layer/add_grad/tuple/control_dependency"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/MatMul_grad/tuple/group_deps"
+ op: "NoOp"
+ input: "^gradients/layer/MatMul_grad/MatMul"
+ input: "^gradients/layer/MatMul_grad/MatMul_1"
+ }
+ node {
+ name: "gradients/layer/MatMul_grad/tuple/control_dependency"
+ op: "Identity"
+ input: "gradients/layer/MatMul_grad/MatMul"
+ input: "^gradients/layer/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/layer/MatMul_grad/MatMul"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "gradients/layer/MatMul_grad/tuple/control_dependency_1"
+ op: "Identity"
+ input: "gradients/layer/MatMul_grad/MatMul_1"
+ input: "^gradients/layer/MatMul_grad/tuple/group_deps"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@gradients/layer/MatMul_grad/MatMul_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "GradientDescent/learning_rate"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.5
+ }
+ }
+ }
+ }
+ node {
+ name: "GradientDescent/update_layer/Variable/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "layer/Variable"
+ input: "GradientDescent/learning_rate"
+ input: "gradients/layer/MatMul_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "GradientDescent/update_layer/Variable_1/ApplyGradientDescent"
+ op: "ApplyGradientDescent"
+ input: "layer/Variable_1"
+ input: "GradientDescent/learning_rate"
+ input: "gradients/layer/add_grad/tuple/control_dependency_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "GradientDescent"
+ op: "NoOp"
+ input: "^GradientDescent/update_layer/Variable/ApplyGradientDescent"
+ input: "^GradientDescent/update_layer/Variable_1/ApplyGradientDescent"
+ }
+ node {
+ name: "init"
+ op: "NoOp"
+ input: "^layer/Variable/Assign"
+ input: "^layer/Variable_1/Assign"
+ }
+ node {
+ name: "ArgMax/dimension"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "ArgMax"
+ op: "ArgMax"
+ input: "layer/add"
+ input: "ArgMax/dimension"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "output_type"
+ value {
+ type: DT_INT64
+ }
+ }
+ }
+ node {
+ name: "ArgMax_1/dimension"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "ArgMax_1"
+ op: "ArgMax"
+ input: "Placeholder_1"
+ input: "ArgMax_1/dimension"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "output_type"
+ value {
+ type: DT_INT64
+ }
+ }
+ }
+ node {
+ name: "Equal"
+ op: "Equal"
+ input: "ArgMax"
+ input: "ArgMax_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT64
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Cast_1"
+ op: "Cast"
+ input: "Equal"
+ attr {
+ key: "DstT"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "SrcT"
+ value {
+ type: DT_BOOL
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Const_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "Mean_1"
+ op: "Mean"
+ input: "Cast_1"
+ input: "Const_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "Tidx"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "keep_dims"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "save/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "model"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin/inputs_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "_temp_65caff16d5244276b9828b0dab21b157/part"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin"
+ op: "StringJoin"
+ input: "save/Const"
+ input: "save/StringJoin/inputs_1"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "separator"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "save/num_shards"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename/shard"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename"
+ op: "ShardedFilename"
+ input: "save/StringJoin"
+ input: "save/ShardedFilename/shard"
+ input: "save/num_shards"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: "layer/Variable"
+ string_val: "layer/Variable_1"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: ""
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2"
+ op: "SaveV2"
+ input: "save/ShardedFilename"
+ input: "save/SaveV2/tensor_names"
+ input: "save/SaveV2/shape_and_slices"
+ input: "layer/Variable"
+ input: "layer/Variable_1"
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/control_dependency"
+ op: "Identity"
+ input: "save/ShardedFilename"
+ input: "^save/SaveV2"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@save/ShardedFilename"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ op: "Pack"
+ input: "save/ShardedFilename"
+ input: "^save/control_dependency"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints"
+ op: "MergeV2Checkpoints"
+ input: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ input: "save/Const"
+ attr {
+ key: "delete_old_dirs"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/Identity"
+ op: "Identity"
+ input: "save/Const"
+ input: "^save/control_dependency"
+ input: "^save/MergeV2Checkpoints"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "layer/Variable"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2/tensor_names"
+ input: "save/RestoreV2/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign"
+ op: "Assign"
+ input: "layer/Variable"
+ input: "save/RestoreV2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 784
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1/tensor_names"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: "layer/Variable_1"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1/shape_and_slices"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2_1"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2_1/tensor_names"
+ input: "save/RestoreV2_1/shape_and_slices"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign_1"
+ op: "Assign"
+ input: "layer/Variable_1"
+ input: "save/RestoreV2_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@layer/Variable_1"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/restore_shard"
+ op: "NoOp"
+ input: "^save/Assign"
+ input: "^save/Assign_1"
+ }
+ node {
+ name: "save/restore_all"
+ op: "NoOp"
+ input: "^save/restore_shard"
+ }
+ versions {
+ producer: 24
+ }
+ }
+ saver_def {
+ filename_tensor_name: "save/Const:0"
+ save_tensor_name: "save/Identity:0"
+ restore_op_name: "save/restore_all"
+ max_to_keep: 5
+ sharded: true
+ keep_checkpoint_every_n_hours: 10000.0
+ version: V2
+ }
+ collection_def {
+ key: "train_op"
+ value {
+ node_list {
+ value: "GradientDescent"
+ }
+ }
+ }
+ collection_def {
+ key: "trainable_variables"
+ value {
+ bytes_list {
+ value: "\n\020layer/Variable:0\022\025layer/Variable/Assign\032\025layer/Variable/read:02\rlayer/zeros:0"
+ value: "\n\022layer/Variable_1:0\022\027layer/Variable_1/Assign\032\027layer/Variable_1/read:02\017layer/zeros_1:0"
+ }
+ }
+ }
+ collection_def {
+ key: "variables"
+ value {
+ bytes_list {
+ value: "\n\020layer/Variable:0\022\025layer/Variable/Assign\032\025layer/Variable/read:02\rlayer/zeros:0"
+ value: "\n\022layer/Variable_1:0\022\027layer/Variable_1/Assign\032\027layer/Variable_1/read:02\017layer/zeros_1:0"
+ }
+ }
+ }
+ signature_def {
+ key: "serving_default"
+ value {
+ inputs {
+ key: "x"
+ value {
+ name: "Placeholder:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 784
+ }
+ }
+ }
+ }
+ outputs {
+ key: "y"
+ value {
+ name: "layer/add:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 10
+ }
+ }
+ }
+ }
+ method_name: "tensorflow/serving/predict"
+ }
+ }
+}
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.data-00000-of-00001 b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.data-00000-of-00001
new file mode 100644
index 00000000000..826b0280abf
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.data-00000-of-00001
Binary files differ
diff --git a/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.index b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.index
new file mode 100644
index 00000000000..d00fc5b06ed
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/tensorflow/mnist_softmax/saved/variables/variables.index
Binary files differ
diff --git a/application/src/test/app-packages/model-evaluation/models/vespa/constant1asLarge.json b/application/src/test/app-packages/model-evaluation/models/vespa/constant1asLarge.json
new file mode 100644
index 00000000000..d2944d255af
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/vespa/constant1asLarge.json
@@ -0,0 +1,7 @@
+{
+ "cells": [
+ { "address": { "x": "0" }, "value": 0.5 },
+ { "address": { "x": "1" }, "value": 1.5 },
+ { "address": { "x": "2" }, "value": 2.5 }
+ ]
+} \ No newline at end of file
diff --git a/application/src/test/app-packages/model-evaluation/models/vespa/example.model b/application/src/test/app-packages/model-evaluation/models/vespa/example.model
new file mode 100644
index 00000000000..e9725d14923
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/vespa/example.model
@@ -0,0 +1,25 @@
+model example {
+
+ # All inputs that are not scalar (aka 0-dimensional tensor) must be declared
+ input1: tensor(name{}, x[3])
+ input2: tensor(x[3])
+
+ constants {
+ constant1: tensor(x[3]):{{x:0}:0.5, {x:1}:1.5, {x:2}:2.5}
+ constant2: 3.0
+ }
+
+ constant constant1asLarge {
+ type: tensor(x[3])
+ file: constant1asLarge.json
+ }
+
+ function foo1() {
+ expression: max(sum(input1 * input2, name) * constant1, x) * constant2
+ }
+
+ function foo2() {
+ expression: max(sum(input1 * input2, name) * constant(constant1asLarge), x) * constant2
+ }
+
+} \ No newline at end of file
diff --git a/application/src/test/app-packages/model-evaluation/models/xgboost/xgboost.2.2.json b/application/src/test/app-packages/model-evaluation/models/xgboost/xgboost.2.2.json
new file mode 100644
index 00000000000..f8949b47e52
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/models/xgboost/xgboost.2.2.json
@@ -0,0 +1,19 @@
+[
+ { "nodeid": 0, "depth": 0, "split": "f29", "split_condition": -0.1234567, "yes": 1, "no": 2, "missing": 1, "children": [
+ { "nodeid": 1, "depth": 1, "split": "f56", "split_condition": -0.242398, "yes": 3, "no": 4, "missing": 3, "children": [
+ { "nodeid": 3, "leaf": 1.71218 },
+ { "nodeid": 4, "leaf": -1.70044 }
+ ]},
+ { "nodeid": 2, "depth": 1, "split": "f109", "split_condition": 0.8723473, "yes": 5, "no": 6, "missing": 5, "children": [
+ { "nodeid": 5, "leaf": -1.94071 },
+ { "nodeid": 6, "leaf": 1.85965 }
+ ]}
+ ]},
+ { "nodeid": 0, "depth": 0, "split": "f60", "split_condition": -0.482947, "yes": 1, "no": 2, "missing": 1, "children": [
+ { "nodeid": 1, "depth": 1, "split": "f29", "split_condition": -4.2387498, "yes": 3, "no": 4, "missing": 3, "children": [
+ { "nodeid": 3, "leaf": 0.784718 },
+ { "nodeid": 4, "leaf": -0.96853 }
+ ]},
+ { "nodeid": 2, "leaf": -6.23624 }
+ ]}
+] \ No newline at end of file
diff --git a/application/src/test/app-packages/model-evaluation/services.xml b/application/src/test/app-packages/model-evaluation/services.xml
new file mode 100644
index 00000000000..88f9ba14abe
--- /dev/null
+++ b/application/src/test/app-packages/model-evaluation/services.xml
@@ -0,0 +1,3 @@
+<container version="1.0">
+ <model-evaluation/>
+</container>
diff --git a/application/src/test/java/com/yahoo/application/container/JDiscContainerDocprocTest.java b/application/src/test/java/com/yahoo/application/container/ContainerDocprocTest.java
index 2a363916fa3..fddd41e7cc2 100644
--- a/application/src/test/java/com/yahoo/application/container/JDiscContainerDocprocTest.java
+++ b/application/src/test/java/com/yahoo/application/container/ContainerDocprocTest.java
@@ -25,7 +25,7 @@ import static org.junit.Assert.assertTrue;
/**
* @author Einar M R Rosenvinge
*/
-public class JDiscContainerDocprocTest {
+public class ContainerDocprocTest {
private static final String DOCUMENT = "document music {\n"
+ " field title type string { }\n"
diff --git a/application/src/test/java/com/yahoo/application/container/ContainerModelEvaluationTest.java b/application/src/test/java/com/yahoo/application/container/ContainerModelEvaluationTest.java
new file mode 100644
index 00000000000..66a7ae579fa
--- /dev/null
+++ b/application/src/test/java/com/yahoo/application/container/ContainerModelEvaluationTest.java
@@ -0,0 +1,82 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.application.container;
+
+import com.yahoo.application.Application;
+import com.yahoo.application.Networking;
+import com.yahoo.application.container.handler.Request;
+import com.yahoo.application.container.handler.Response;
+import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.File;
+import java.net.URLEncoder;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.StandardCharsets;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify that we can create a JDisc (and hence Application) instance capable of doing model evaluation
+ *
+ * @author bratseth
+ */
+public class ContainerModelEvaluationTest {
+
+ @Test
+ @Ignore // This should ideally work but may not be worth the effort
+ public void testCreateJDiscInstanceWithModelEvaluation() {
+ try (JDisc jdisc =
+ JDisc.fromPath(new File("src/test/app-packages/model-evaluation").toPath(),
+ Networking.disable)) {
+ assertLoadedModels(jdisc);
+ }
+ }
+
+ @Test
+ public void testCreateApplicationInstanceWithModelEvaluation() {
+ try (Application application =
+ Application.fromApplicationPackage(new File("src/test/app-packages/model-evaluation"),
+ Networking.disable)) {
+ assertLoadedModels(application.getJDisc("default"));
+ }
+ }
+ private void assertLoadedModels(JDisc jdisc) {
+ {
+ String expected = "{\"xgboost_xgboost_2_2\":\"http://localhost/model-evaluation/v1/xgboost_xgboost_2_2\",\"onnx_mnist_softmax\":\"http://localhost/model-evaluation/v1/onnx_mnist_softmax\",\"tensorflow_mnist_softmax_saved\":\"http://localhost/model-evaluation/v1/tensorflow_mnist_softmax_saved\",\"tensorflow_mnist_saved\":\"http://localhost/model-evaluation/v1/tensorflow_mnist_saved\",\"vespa_example\":\"http://localhost/model-evaluation/v1/vespa_example\"}";
+ assertResponse("http://localhost/model-evaluation/v1", expected, jdisc);
+ }
+
+ {
+ String expected = "{\"cells\":[{\"address\":{},\"value\":-8.17695}]}";
+ assertResponse("http://localhost/model-evaluation/v1/xgboost_xgboost_2_2/eval", expected, jdisc);
+ }
+
+ {
+ // Note: The specific response value here has not been verified
+ String expected = "{\"cells\":[{\"address\":{\"d0\":\"0\",\"d1\":\"0\"},\"value\":-0.5066885003407351},{\"address\":{\"d0\":\"0\",\"d1\":\"1\"},\"value\":0.3912837743150205},{\"address\":{\"d0\":\"0\",\"d1\":\"2\"},\"value\":-0.12401806321703948},{\"address\":{\"d0\":\"0\",\"d1\":\"3\"},\"value\":-0.7019029168606575},{\"address\":{\"d0\":\"0\",\"d1\":\"4\"},\"value\":0.13120114146441697},{\"address\":{\"d0\":\"0\",\"d1\":\"5\"},\"value\":0.6611923203384626},{\"address\":{\"d0\":\"0\",\"d1\":\"6\"},\"value\":-0.22365810810026446},{\"address\":{\"d0\":\"0\",\"d1\":\"7\"},\"value\":-0.0740018307465809},{\"address\":{\"d0\":\"0\",\"d1\":\"8\"},\"value\":0.056492490256153896},{\"address\":{\"d0\":\"0\",\"d1\":\"9\"},\"value\":-0.18422015072393733}]}";
+ assertResponse("http://localhost/model-evaluation/v1/tensorflow_mnist_saved/serving_default.y/eval?input=" + inputTensor(), expected, jdisc);
+ }
+ }
+
+
+ private void assertResponse(String url, String expectedResponse, JDisc jdisc) {
+ try {
+ Response response = jdisc.handleRequest(new Request(url));
+ assertEquals(expectedResponse, response.getBodyAsString());
+ assertEquals(200, response.getStatus());
+ }
+ catch (CharacterCodingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private String inputTensor() {
+ Tensor.Builder b = Tensor.Builder.of(TensorType.fromSpec("tensor(d0[],d1[784])"));
+ for (int i = 0; i < 784; i++)
+ b.cell(0.0, 0, i);
+ return URLEncoder.encode(b.build().toString(), StandardCharsets.UTF_8);
+ }
+
+}
diff --git a/application/src/test/java/com/yahoo/application/container/JDiscContainerProcessingTest.java b/application/src/test/java/com/yahoo/application/container/ContainerProcessingTest.java
index 443b938693f..93ca09ac5fc 100644
--- a/application/src/test/java/com/yahoo/application/container/JDiscContainerProcessingTest.java
+++ b/application/src/test/java/com/yahoo/application/container/ContainerProcessingTest.java
@@ -18,7 +18,7 @@ import static org.junit.Assert.assertThat;
/**
* @author Einar M R Rosenvinge
*/
-public class JDiscContainerProcessingTest {
+public class ContainerProcessingTest {
private static String getXML(String chainName, String... processorIds) {
String xml =
@@ -55,17 +55,15 @@ public class JDiscContainerProcessingTest {
@Test
public void requireThatBasicProcessingDoesNotTruncateBigResponse() {
- final int SIZE = 50*1000;
+ int SIZE = 50*1000;
StringBuilder foo = new StringBuilder();
for (int j = 0 ; j < SIZE ; j++) {
foo.append('b');
}
try (JDisc container = getContainerWithRot13()) {
- final int NUM_TIMES = 100;
+ int NUM_TIMES = 100;
for (int i = 0; i < NUM_TIMES; i++) {
-
-
com.yahoo.application.container.handler.Response response =
container.handleRequest(
new com.yahoo.application.container.handler.Request("http://foo/processing/?chain=foo&title=" + foo.toString()));
diff --git a/application/src/test/java/com/yahoo/application/container/JDiscContainerRequestTest.java b/application/src/test/java/com/yahoo/application/container/ContainerRequestTest.java
index 9f5555069cd..8f3e7693bc5 100644
--- a/application/src/test/java/com/yahoo/application/container/JDiscContainerRequestTest.java
+++ b/application/src/test/java/com/yahoo/application/container/ContainerRequestTest.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertThat;
/**
* @author Einar M R Rosenvinge
*/
-public class JDiscContainerRequestTest {
+public class ContainerRequestTest {
private static String getXML(String className, String binding) {
return "<container version=\"1.0\">\n" +
@@ -37,7 +37,7 @@ public class JDiscContainerRequestTest {
}
@Test
- public void requireThatRequestBodyWorks() throws InterruptedException, CharacterCodingException {
+ public void requireThatRequestBodyWorks() throws CharacterCodingException {
String DATA = "we have no bananas today";
Request req = new Request("http://banana/echo", DATA.getBytes(Utf8.getCharset()));
@@ -50,7 +50,7 @@ public class JDiscContainerRequestTest {
}
@Test
- public void requireThatCustomRequestHeadersWork() throws InterruptedException {
+ public void requireThatCustomRequestHeadersWork() {
Request req = new Request("http://banana/echo");
req.getHeaders().add("X-Foo", "Bar");
@@ -63,7 +63,7 @@ public class JDiscContainerRequestTest {
}
@Test(expected = WriteException.class)
- public void requireThatRequestHandlerThatThrowsInWriteWorks() throws InterruptedException {
+ public void requireThatRequestHandlerThatThrowsInWriteWorks() {
String DATA = "we have no bananas today";
Request req = new Request("http://banana/throwwrite", DATA.getBytes(Utf8.getCharset()));
@@ -73,9 +73,8 @@ public class JDiscContainerRequestTest {
}
}
-
@Test(expected = DelayedWriteException.class)
- public void requireThatRequestHandlerThatThrowsDelayedInWriteWorks() throws InterruptedException {
+ public void requireThatRequestHandlerThatThrowsDelayedInWriteWorks() {
String DATA = "we have no bananas today";
Request req = new Request("http://banana/delayedthrowwrite", DATA.getBytes(Utf8.getCharset()));
@@ -83,6 +82,7 @@ public class JDiscContainerRequestTest {
Response response = container.handleRequest(req);
req.toString();
}
+
}
}
diff --git a/application/src/test/java/com/yahoo/application/container/JDiscContainerSearchTest.java b/application/src/test/java/com/yahoo/application/container/ContainerSearchTest.java
index b7445d13a17..d133b71b8da 100644
--- a/application/src/test/java/com/yahoo/application/container/JDiscContainerSearchTest.java
+++ b/application/src/test/java/com/yahoo/application/container/ContainerSearchTest.java
@@ -16,7 +16,8 @@ import static org.junit.Assert.assertThat;
* @author gjoranv
* @author ollivir
*/
-public class JDiscContainerSearchTest {
+public class ContainerSearchTest {
+
@Test
public void processing_and_rendering_works() throws Exception {
final String searcherId = AddHitSearcher.class.getName();
@@ -30,7 +31,7 @@ public class JDiscContainerSearchTest {
}
@Test
- public void searching_works() throws Exception {
+ public void searching_works() {
final String searcherId = AddHitSearcher.class.getName();
try (JDisc container = containerWithSearch(searcherId)) {
@@ -52,9 +53,10 @@ public class JDiscContainerSearchTest {
}
@Test(expected = UnsupportedOperationException.class)
- public void retrieving_search_from_container_without_search_is_illegal() throws Exception {
+ public void retrieving_search_from_container_without_search_is_illegal() {
try (JDisc container = JDisc.fromServicesXml("<container version=\"1.0\" />", Networking.disable)) {
container.search(); // throws
}
+
}
}
diff --git a/application/src/test/java/com/yahoo/application/container/JDiscTest.java b/application/src/test/java/com/yahoo/application/container/ContainerTest.java
index 86a96d04848..e44916d2ec4 100644
--- a/application/src/test/java/com/yahoo/application/container/JDiscTest.java
+++ b/application/src/test/java/com/yahoo/application/container/ContainerTest.java
@@ -32,9 +32,10 @@ import static org.junit.Assert.fail;
* @author gjoranv
* @author ollivir
*/
-public class JDiscTest {
+public class ContainerTest {
+
@Test
- public void jdisc_can_be_used_as_top_level_element() throws Exception {
+ public void jdisc_can_be_used_as_top_level_element() {
try (JDisc container = fromServicesXml("<jdisc version=\"1.0\">" + //
"<search />" + //
"</jdisc>", Networking.disable)) {
@@ -43,7 +44,7 @@ public class JDiscTest {
}
@Test
- public void jdisc_id_can_be_set() throws Exception {
+ public void jdisc_id_can_be_set() {
try (JDisc container = fromServicesXml("<jdisc version=\"1.0\" id=\"my-service-id\">" + //
"<search />" + //
"</jdisc>", Networking.disable)) {
@@ -52,7 +53,7 @@ public class JDiscTest {
}
@Test
- public void jdisc_can_be_embedded_in_services_tag() throws Exception {
+ public void jdisc_can_be_embedded_in_services_tag() {
try (JDisc container = fromServicesXml("<services>" + //
"<jdisc version=\"1.0\" id=\"my-service-id\">" + //
"<search />" + //
@@ -77,7 +78,7 @@ public class JDiscTest {
}
@Test
- public void handleRequest_yields_response_from_correct_request_handler() throws Exception {
+ public void handleRequest_yields_response_from_correct_request_handler() {
final String handlerClass = TestHandler.class.getName();
try (JDisc container = fromServicesXml("<container version=\"1.0\">" + //
"<handler id=\"test-handler\" class=\"" + handlerClass + "\">" + //
@@ -94,7 +95,7 @@ public class JDiscTest {
}
@Test
- public void load_searcher_from_bundle() throws Exception {
+ public void load_searcher_from_bundle() {
try (JDisc container = JDisc.fromPath(FileSystems.getDefault().getPath("src/test/app-packages/searcher-app"),
Networking.disable)) {
Result result = container.search().process(ComponentSpecification.fromString("default"),
@@ -175,4 +176,5 @@ public class JDiscTest {
}
throw new RuntimeException("No http server found");
}
+
}
diff --git a/application/src/test/java/com/yahoo/application/container/jersey/JerseyTest.java b/application/src/test/java/com/yahoo/application/container/jersey/JerseyTest.java
index 9c3cd1e612c..89c23fe0001 100644
--- a/application/src/test/java/com/yahoo/application/container/jersey/JerseyTest.java
+++ b/application/src/test/java/com/yahoo/application/container/jersey/JerseyTest.java
@@ -3,11 +3,10 @@ package com.yahoo.application.container.jersey;
import com.yahoo.application.Networking;
import com.yahoo.application.container.JDisc;
-import com.yahoo.application.container.JDiscTest;
+import com.yahoo.application.container.ContainerTest;
import com.yahoo.application.container.jersey.resources.TestResource;
import com.yahoo.application.container.jersey.resources.nestedpackage1.NestedTestResource1;
import com.yahoo.application.container.jersey.resources.nestedpackage2.NestedTestResource2;
-import com.yahoo.container.Container;
import com.yahoo.container.test.jars.jersey.resources.TestResourceBase;
import com.yahoo.osgi.maven.ProjectBundleClassPaths;
import com.yahoo.osgi.maven.ProjectBundleClassPaths.BundleClasspathMapping;
@@ -144,7 +143,7 @@ public class JerseyTest {
"</jdisc>" + //
"</services>", //
Networking.enable)) {
- final int port = JDiscTest.getListenPort();
+ final int port = ContainerTest.getListenPort();
f.accept(path -> {
String p = path.startsWith("/") ? path.substring(1) : path;
CloseableHttpClient client = HttpClientBuilder.create().build();
diff --git a/build_settings.cmake b/build_settings.cmake
index 59c5a66fcd3..7d8ba11f8a1 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -117,6 +117,6 @@ else()
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
-# Enable cppunit tests in shared libraries
+# Enable GTest unit tests in shared libraries
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
endif()
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
index 33f2f909910..821e98800d3 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
@@ -123,7 +123,7 @@ public class MasterElectionTest extends FleetControllerTest {
log.log(LogLevel.INFO, "STARTING TEST: MasterElectionTest::testMasterElection()");
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 1;
- setUpFleetController(5, true, options);
+ setUpFleetController(5, false, options);
waitForMaster(0);
log.log(LogLevel.INFO, "SHUTTING DOWN FLEET CONTROLLER 0");
fleetControllers.get(0).shutdown();
@@ -227,10 +227,10 @@ public class MasterElectionTest extends FleetControllerTest {
startingTest("MasterElectionTest::testClusterStateVersionIncreasesAcrossMasterElections");
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 1;
- setUpFleetController(5, true, options);
+ setUpFleetController(5, false, options);
// Currently need to have content nodes present for the cluster controller to even bother
// attempting to persisting its cluster state version to ZK.
- setUpVdsNodes(true, new DummyVdsNodeOptions());
+ setUpVdsNodes(false, new DummyVdsNodeOptions());
fleetController = fleetControllers.get(0); // Required to prevent waitForStableSystem from NPE'ing
waitForStableSystem();
waitForMaster(0);
@@ -254,7 +254,7 @@ public class MasterElectionTest extends FleetControllerTest {
// "Magic" port value is in range allocated to module for testing.
zooKeeperServer = ZooKeeperTestServer.createWithFixedPort(18342);
options.masterZooKeeperCooldownPeriod = 100;
- setUpFleetController(2, true, options);
+ setUpFleetController(2, false, options);
waitForMaster(0);
zooKeeperServer.shutdown(true);
@@ -276,7 +276,7 @@ public class MasterElectionTest extends FleetControllerTest {
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 100;
options.zooKeeperServerAddress = "localhost";
- setUpFleetController(5, true, options);
+ setUpFleetController(5, false, options);
waitForMaster(0);
log.log(LogLevel.INFO, "STOPPING ZOOKEEPER SERVER AT " + zooKeeperServer.getAddress());
@@ -310,7 +310,7 @@ public class MasterElectionTest extends FleetControllerTest {
startingTest("MasterElectionTest::testMasterZooKeeperCooldown");
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 3600 * 1000; // An hour
- setUpFleetController(3, true, options);
+ setUpFleetController(3, false, options);
waitForMaster(0);
timer.advanceTime(24 * 3600 * 1000); // A day
waitForCompleteCycle(1);
@@ -351,7 +351,7 @@ public class MasterElectionTest extends FleetControllerTest {
startingTest("MasterElectionTest::testGetMaster");
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 3600 * 1000; // An hour
- setUpFleetController(3, true, options);
+ setUpFleetController(3, false, options);
waitForMaster(0);
supervisor = new Supervisor(new Transport());
@@ -431,7 +431,7 @@ public class MasterElectionTest extends FleetControllerTest {
startingTest("MasterElectionTest::testReconfigure");
FleetControllerOptions options = defaultOptions("mycluster");
options.masterZooKeeperCooldownPeriod = 1;
- setUpFleetController(3, true, options);
+ setUpFleetController(3, false, options);
waitForMaster(0);
FleetControllerOptions newOptions = options.clone();
@@ -460,8 +460,8 @@ public class MasterElectionTest extends FleetControllerTest {
options.minRatioOfStorageNodesUp = 0;
options.minDistributorNodesUp = 0;
options.minStorageNodesUp = 1;
- setUpFleetController(3, true, options);
- setUpVdsNodes(true, new DummyVdsNodeOptions());
+ setUpFleetController(3, false, options);
+ setUpVdsNodes(false, new DummyVdsNodeOptions());
fleetController = fleetControllers.get(0); // Required to prevent waitForStableSystem from NPE'ing
waitForStableSystem();
waitForMaster(0);
@@ -504,8 +504,8 @@ public class MasterElectionTest extends FleetControllerTest {
options.clusterHasGlobalDocumentTypes = true;
options.masterZooKeeperCooldownPeriod = 1;
options.minTimeBeforeFirstSystemStateBroadcast = 100000;
- setUpFleetController(3, true, options);
- setUpVdsNodes(true, new DummyVdsNodeOptions());
+ setUpFleetController(3, false, options);
+ setUpVdsNodes(false, new DummyVdsNodeOptions());
fleetController = fleetControllers.get(0); // Required to prevent waitForStableSystem from NPE'ing
waitForMaster(0);
waitForStableSystem();
@@ -547,8 +547,8 @@ public class MasterElectionTest extends FleetControllerTest {
options.clusterHasGlobalDocumentTypes = false;
options.masterZooKeeperCooldownPeriod = 1;
options.minTimeBeforeFirstSystemStateBroadcast = 100000;
- setUpFleetController(3, true, options);
- setUpVdsNodes(true, new DummyVdsNodeOptions());
+ setUpFleetController(3, false, options);
+ setUpVdsNodes(false, new DummyVdsNodeOptions());
fleetController = fleetControllers.get(0); // Required to prevent waitForStableSystem from NPE'ing
waitForMaster(0);
waitForStableSystem();
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/Endpoint.java b/config-model-api/src/main/java/com/yahoo/config/application/api/Endpoint.java
index 7d31e07dac5..ca8eadd8d1f 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/Endpoint.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/Endpoint.java
@@ -29,6 +29,7 @@ public class Endpoint {
*/
private static final Pattern endpointPattern = Pattern.compile("^[a-z](?:-?[a-z0-9]+)*$");
private static final int endpointMaxLength = 12;
+ private static final String defaultEndpointId = "default";
private final Optional<String> endpointId;
private final String containerId;
@@ -48,7 +49,7 @@ public class Endpoint {
}
public String endpointId() {
- return endpointId.orElse(containerId);
+ return endpointId.orElse(defaultEndpointId);
}
public String containerId() {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java
index b0fd3a81732..5641233606e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java
@@ -1,7 +1,5 @@
// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.tenant;
-
-import com.yahoo.vespa.applicationmodel.ClusterId;
+package com.yahoo.config.model.api;
import java.util.List;
import java.util.Objects;
@@ -15,15 +13,15 @@ import java.util.Objects;
*/
public class ContainerEndpoint {
- private final ClusterId clusterId;
+ private final String clusterId;
private final List<String> names;
- public ContainerEndpoint(ClusterId clusterId, List<String> names) {
+ public ContainerEndpoint(String clusterId, List<String> names) {
this.clusterId = Objects.requireNonNull(clusterId);
this.names = List.copyOf(Objects.requireNonNull(names));
}
- public ClusterId clusterId() {
+ public String clusterId() {
return clusterId;
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 514ca2a00f5..9d7ae9759c3 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -50,6 +50,7 @@ public interface ModelContext {
boolean hostedVespa();
Zone zone();
Set<Rotation> rotations();
+ Set<ContainerEndpoint> endpoints();
boolean isBootstrap();
boolean isFirstTimeDeployment();
boolean useDedicatedNodeForLogserver();
@@ -58,6 +59,8 @@ public interface ModelContext {
boolean useAdaptiveDispatch();
// TODO: Remove when 7.61 is the oldest model in use
default boolean enableMetricsProxyContainer() { return false; }
+ // TODO: Remove temporary default implementation
+ default Optional<TlsSecrets> tlsSecrets() { return Optional.empty(); }
}
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/TlsSecrets.java b/config-model-api/src/main/java/com/yahoo/config/model/api/TlsSecrets.java
new file mode 100644
index 00000000000..3cb4cedcbac
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/TlsSecrets.java
@@ -0,0 +1,30 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.model.api;
+
+ public class TlsSecrets {
+ public static final TlsSecrets MISSING = new TlsSecrets();
+
+ private final String certificate;
+ private final String key;
+
+ private TlsSecrets() {
+ this(null,null);
+ }
+
+ public TlsSecrets(String certificate, String key) {
+ this.certificate = certificate;
+ this.key = key;
+ }
+
+ public String certificate() {
+ return certificate;
+ }
+
+ public String key() {
+ return key;
+ }
+
+ public boolean isMissing() {
+ return this == MISSING;
+ }
+}
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
index c161b04087f..8120c82e8f4 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
@@ -482,7 +482,7 @@ public class DeploymentSpecTest {
"</deployment>");
assertEquals(
- List.of("foo", "nalle", "quux"),
+ List.of("foo", "nalle", "default"),
spec.endpoints().stream().map(Endpoint::endpointId).collect(Collectors.toList())
);
@@ -507,8 +507,8 @@ public class DeploymentSpecTest {
@Test
public void validEndpoints() {
- assertEquals(List.of("qrs"), endpointIds("<endpoint container-id='qrs'/>"));
- assertEquals(List.of("qrs"), endpointIds("<endpoint id='' container-id='qrs'/>"));
+ assertEquals(List.of("default"), endpointIds("<endpoint container-id='qrs'/>"));
+ assertEquals(List.of("default"), endpointIds("<endpoint id='' container-id='qrs'/>"));
assertEquals(List.of("f"), endpointIds("<endpoint id='f' container-id='qrs'/>"));
assertEquals(List.of("foo"), endpointIds("<endpoint id='foo' container-id='qrs'/>"));
assertEquals(List.of("foo-bar"), endpointIds("<endpoint id='foo-bar' container-id='qrs'/>"));
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index c19865fafc9..1892c8920a7 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -11,9 +11,11 @@ import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.application.api.UnparsedConfigDefinition;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.model.api.ModelContext;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.api.ValidationParameters;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.MockFileRegistry;
@@ -67,6 +69,7 @@ public class DeployState implements ConfigDefinitionStore {
private final ModelContext.Properties properties;
private final Version vespaVersion;
private final Set<Rotation> rotations;
+ private final Set<ContainerEndpoint> endpoints;
private final Zone zone;
private final QueryProfiles queryProfiles;
private final SemanticRules semanticRules;
@@ -96,6 +99,7 @@ public class DeployState implements ConfigDefinitionStore {
Optional<ConfigDefinitionRepo> configDefinitionRepo,
java.util.Optional<Model> previousModel,
Set<Rotation> rotations,
+ Set<ContainerEndpoint> endpoints,
Collection<MlModelImporter> modelImporters,
Zone zone,
QueryProfiles queryProfiles,
@@ -115,6 +119,7 @@ public class DeployState implements ConfigDefinitionStore {
this.permanentApplicationPackage = permanentApplicationPackage;
this.configDefinitionRepo = configDefinitionRepo;
this.rotations = rotations;
+ this.endpoints = Set.copyOf(endpoints);
this.zone = zone;
this.queryProfiles = queryProfiles; // TODO: Remove this by seeing how pagetemplates are propagated
this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated
@@ -234,6 +239,10 @@ public class DeployState implements ConfigDefinitionStore {
return this.rotations; // todo: consider returning a copy or immutable view
}
+ public Set<ContainerEndpoint> getEndpoints() {
+ return endpoints;
+ }
+
/** Returns the zone in which this is currently running */
public Zone zone() { return zone; }
@@ -248,6 +257,8 @@ public class DeployState implements ConfigDefinitionStore {
public Instant now() { return now; }
+ public Optional<TlsSecrets> tlsSecrets() { return properties.tlsSecrets(); }
+
public static class Builder {
private ApplicationPackage applicationPackage = MockApplicationPackage.createEmpty();
@@ -260,10 +271,12 @@ public class DeployState implements ConfigDefinitionStore {
private Optional<ConfigDefinitionRepo> configDefinitionRepo = Optional.empty();
private Optional<Model> previousModel = Optional.empty();
private Set<Rotation> rotations = new HashSet<>();
+ private Set<ContainerEndpoint> endpoints = Set.of();
private Collection<MlModelImporter> modelImporters = Collections.emptyList();
private Zone zone = Zone.defaultZone();
private Instant now = Instant.now();
private Version wantedNodeVespaVersion = Vtag.currentVersion;
+ private Optional<TlsSecrets> tlsSecrets = Optional.empty();
public Builder applicationPackage(ApplicationPackage applicationPackage) {
this.applicationPackage = applicationPackage;
@@ -315,6 +328,11 @@ public class DeployState implements ConfigDefinitionStore {
return this;
}
+ public Builder endpoints(Set<ContainerEndpoint> endpoints) {
+ this.endpoints = endpoints;
+ return this;
+ }
+
public Builder modelImporters(Collection<MlModelImporter> modelImporters) {
this.modelImporters = modelImporters;
return this;
@@ -356,6 +374,7 @@ public class DeployState implements ConfigDefinitionStore {
configDefinitionRepo,
previousModel,
rotations,
+ endpoints,
modelImporters,
zone,
queryProfiles,
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 0a54dd6790d..d974db73547 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -3,7 +3,9 @@ package com.yahoo.config.model.deploy;
import com.google.common.collect.ImmutableList;
import com.yahoo.config.model.api.ConfigServerSpec;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.ModelContext;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.Rotation;
@@ -12,6 +14,7 @@ import com.yahoo.config.provision.Zone;
import java.net.URI;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
/**
@@ -31,12 +34,14 @@ public class TestProperties implements ModelContext.Properties {
private boolean hostedVespa = false;
private Zone zone;
private Set<Rotation> rotations;
+ private Set<ContainerEndpoint> endpoints = Collections.emptySet();
private boolean isBootstrap = false;
private boolean isFirstTimeDeployment = false;
private boolean useDedicatedNodeForLogserver = false;
private boolean useFdispatchByDefault = true;
private boolean dispatchWithProtobuf = true;
private boolean useAdaptiveDispatch = false;
+ private Optional<TlsSecrets> tlsSecrets = Optional.empty();
@Override public boolean multitenant() { return multitenant; }
@@ -48,12 +53,15 @@ public class TestProperties implements ModelContext.Properties {
@Override public boolean hostedVespa() { return hostedVespa; }
@Override public Zone zone() { return zone; }
@Override public Set<Rotation> rotations() { return rotations; }
+ @Override public Set<ContainerEndpoint> endpoints() { return endpoints; }
+
@Override public boolean isBootstrap() { return isBootstrap; }
@Override public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; }
@Override public boolean useAdaptiveDispatch() { return useAdaptiveDispatch; }
@Override public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
@Override public boolean useFdispatchByDefault() { return useFdispatchByDefault; }
@Override public boolean dispatchWithProtobuf() { return dispatchWithProtobuf; }
+ @Override public Optional<TlsSecrets> tlsSecrets() { return tlsSecrets; }
public TestProperties setApplicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
@@ -86,6 +94,11 @@ public class TestProperties implements ModelContext.Properties {
}
+ public TestProperties setTlsSecrets(Optional<TlsSecrets> tlsSecrets) {
+ this.tlsSecrets = tlsSecrets;
+ return this;
+ }
+
public static class Spec implements ConfigServerSpec {
private final String hostName;
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Index.java b/config-model/src/main/java/com/yahoo/searchdefinition/Index.java
index d7e9e0da081..0ea3f5c24a3 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Index.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Index.java
@@ -56,8 +56,8 @@ public class Index implements Cloneable, Serializable {
/** The boolean index definition, if set */
private BooleanIndexDefinition boolIndex;
- // TODO: Remove when experimental posting list format is made default
- private boolean experimentalPostingListFormat = false;
+ /** Whether the posting lists of this index field should have interleaved features (num occs, field length) in document id stream. */
+ private boolean interleavedFeatures = false;
public Index(String name) {
this(name, false);
@@ -184,12 +184,12 @@ public class Index implements Cloneable, Serializable {
boolIndex = def;
}
- public void setExperimentalPostingListFormat(boolean value) {
- experimentalPostingListFormat = value;
+ public void setInterleavedFeatures(boolean value) {
+ interleavedFeatures = value;
}
- public boolean useExperimentalPostingListFormat() {
- return experimentalPostingListFormat;
+ public boolean useInterleavedFeatures() {
+ return interleavedFeatures;
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexSchema.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexSchema.java
index 6f6e97a0876..60b8ee78c7b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexSchema.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexSchema.java
@@ -114,7 +114,7 @@ public class IndexSchema extends Derived implements IndexschemaConfig.Producer {
.prefix(f.hasPrefix())
.phrases(f.hasPhrases())
.positions(f.hasPositions())
- .experimentalpostinglistformat(f.useExperimentalPostingListFormat());
+ .interleavedfeatures(f.useInterleavedFeatures());
if (!f.getCollectionType().equals("SINGLE")) {
ifB.collectiontype(IndexschemaConfig.Indexfield.Collectiontype.Enum.valueOf(f.getCollectionType()));
}
@@ -175,8 +175,8 @@ public class IndexSchema extends Derived implements IndexschemaConfig.Producer {
private boolean phrases = false; // TODO dead, but keep a while to ensure config compatibility?
private boolean positions = true;// TODO dead, but keep a while to ensure config compatibility?
private BooleanIndexDefinition boolIndex = null;
- // TODO: Remove when experimental posting list format is made default
- private boolean experimentalPostingListFormat = false;
+ // Whether the posting lists of this index field should have interleaved features (num occs, field length) in document id stream.
+ private boolean interleavedFeatures = false;
public IndexField(String name, Index.Type type, DataType sdFieldType) {
this.name = name;
@@ -186,7 +186,7 @@ public class IndexSchema extends Derived implements IndexschemaConfig.Producer {
public void setIndexSettings(com.yahoo.searchdefinition.Index index) {
if (type.equals(Index.Type.TEXT)) {
prefix = index.isPrefix();
- experimentalPostingListFormat = index.useExperimentalPostingListFormat();
+ interleavedFeatures = index.useInterleavedFeatures();
}
sdType = index.getType();
boolIndex = index.getBooleanIndexDefiniton();
@@ -209,7 +209,7 @@ public class IndexSchema extends Derived implements IndexschemaConfig.Producer {
public boolean hasPrefix() { return prefix; }
public boolean hasPhrases() { return phrases; }
public boolean hasPositions() { return positions; }
- public boolean useExperimentalPostingListFormat() { return experimentalPostingListFormat; }
+ public boolean useInterleavedFeatures() { return interleavedFeatures; }
public BooleanIndexDefinition getBooleanIndexDefinition() {
return boolIndex;
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/IndexOperation.java b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/IndexOperation.java
index 459bb247e5f..39f543c7db3 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/IndexOperation.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/fieldoperation/IndexOperation.java
@@ -29,8 +29,7 @@ public class IndexOperation implements FieldOperation {
private OptionalLong lowerBound = OptionalLong.empty();
private OptionalLong upperBound = OptionalLong.empty();
private OptionalDouble densePostingListThreshold = OptionalDouble.empty();
- // TODO: Remove when experimental posting list format is made default
- private Optional<Boolean> experimentalPostingListFormat = Optional.empty();
+ private Optional<Boolean> enableBm25 = Optional.empty();
public String getIndexName() {
return indexName;
@@ -89,8 +88,8 @@ public class IndexOperation implements FieldOperation {
index.setBooleanIndexDefiniton(
new BooleanIndexDefinition(arity, lowerBound, upperBound, densePostingListThreshold));
}
- if (experimentalPostingListFormat.isPresent()) {
- index.setExperimentalPostingListFormat(experimentalPostingListFormat.get());
+ if (enableBm25.isPresent()) {
+ index.setInterleavedFeatures(enableBm25.get());
}
}
@@ -117,8 +116,8 @@ public class IndexOperation implements FieldOperation {
public void setDensePostingListThreshold(double densePostingListThreshold) {
this.densePostingListThreshold = OptionalDouble.of(densePostingListThreshold);
}
- public void setExperimentalPostingListFormat(boolean value) {
- experimentalPostingListFormat = Optional.of(value);
+ public void setEnableBm25(boolean value) {
+ enableBm25 = Optional.of(value);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index 9e0bbc395df..0dde5c99d4a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -56,7 +56,8 @@ public class HostSystem extends AbstractConfigProducer<Host> {
}
if (! hostname.contains(".")) {
deployLogger.log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " +
- "since it is not a canonical hostname");
+ "since it is not a canonical hostname." +
+ "Disregard this warning when testing in a Docker container.");
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
index 4a187c9b9bf..f36cc3b195c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
@@ -161,18 +161,19 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
this.applicationPackage = deployState.getApplicationPackage();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
- HostSystem hostSystem = root.getHostSystem();
+ createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(),
+ deployState.rankProfileRegistry(), deployState.getQueryProfiles());
+ this.rankProfileList = new RankProfileList(null, // null search -> global
+ rankingConstants,
+ AttributeFields.empty,
+ deployState.rankProfileRegistry(),
+ deployState.getQueryProfiles().getRegistry(),
+ deployState.getImportedModels());
+
- if (complete) { // create a a completed, frozen model
- createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(),
- deployState.rankProfileRegistry(), deployState.getQueryProfiles());
- this.rankProfileList = new RankProfileList(null, // null search -> global
- rankingConstants,
- AttributeFields.empty,
- deployState.rankProfileRegistry(),
- deployState.getQueryProfiles().getRegistry(),
- deployState.getImportedModels());
+ HostSystem hostSystem = root.getHostSystem();
+ if (complete) { // create a a completed, frozen model
configModelRepo.readConfigModels(deployState, this, builder, root, configModelRegistry);
addServiceClusters(deployState, builder);
setupRouting(deployState);
@@ -189,7 +190,6 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
else { // create a model with no services instantiated and the given file distributor
this.allocatedHosts = AllocatedHosts.withHosts(hostSystem.getHostSpecs());
this.fileDistributor = fileDistributor;
- this.rankProfileList = RankProfileList.empty;
}
}
@@ -621,7 +621,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
}
/**
- * @return this root's model repository
+ * Returns this root's model repository
*/
public ConfigModelRepo configModelRepo() {
return configModelRepo;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index af6400023cc..ca5c188ddb1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -20,6 +20,7 @@ import com.yahoo.config.model.api.ValidationParameters;
import com.yahoo.config.model.application.provider.ApplicationPackageXmlFilesValidator;
import com.yahoo.config.model.builder.xml.ConfigModelBuilder;
import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.VespaVersion;
import com.yahoo.vespa.model.application.validation.Validation;
@@ -47,7 +48,7 @@ public class VespaModelFactory implements ModelFactory {
private final Clock clock;
private final Version version;
- /** Creates a factory for vespa models for this version of the source */
+ /** Creates a factory for Vespa models for this version of the source */
@Inject
public VespaModelFactory(ComponentRegistry<ConfigModelPlugin> pluginRegistry,
ComponentRegistry<MlModelImporter> modelImporters,
@@ -62,6 +63,7 @@ public class VespaModelFactory implements ModelFactory {
this.configModelRegistry = new MapConfigModelRegistry(modelBuilders);
this.modelImporters = modelImporters.allComponents();
this.zone = zone;
+
this.clock = Clock.systemUTC();
}
@@ -139,6 +141,7 @@ public class VespaModelFactory implements ModelFactory {
.vespaVersion(version())
.modelHostProvisioner(createHostProvisioner(modelContext))
.rotations(modelContext.properties().rotations())
+ .endpoints(modelContext.properties().endpoints())
.modelImporters(modelImporters)
.zone(zone)
.now(clock.instant())
@@ -165,7 +168,7 @@ public class VespaModelFactory implements ModelFactory {
private List<ConfigChangeAction> validateModel(VespaModel model, DeployState deployState, ValidationParameters validationParameters) {
try {
return Validation.validate(model, validationParameters, deployState);
- } catch (IllegalArgumentException e) {
+ } catch (IllegalArgumentException | TransientException e) {
rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (Exception e) {
throw new RuntimeException(e);
@@ -173,7 +176,7 @@ public class VespaModelFactory implements ModelFactory {
return new ArrayList<>();
}
- private static void rethrowUnlessIgnoreErrors(IllegalArgumentException e, boolean ignoreValidationErrors) {
+ private static void rethrowUnlessIgnoreErrors(RuntimeException e, boolean ignoreValidationErrors) {
if (!ignoreValidationErrors) {
throw e;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java
index 29d1b557c49..b2ab7e19f85 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java
@@ -66,7 +66,7 @@ class ConsumersConfigGenerator {
return original != null ? newMetric.addDimensionsFrom(original) : newMetric;
}
- private static Consumer.Builder toConsumerBuilder(MetricsConsumer consumer) {
+ static Consumer.Builder toConsumerBuilder(MetricsConsumer consumer) {
Consumer.Builder builder = new Consumer.Builder().name(consumer.getId());
consumer.getMetrics().values().forEach(metric -> builder.metric(toConsumerMetricBuilder(metric)));
return builder;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
index 5a41696c6f2..f290911c6bd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
@@ -41,12 +41,14 @@ import java.util.logging.Logger;
import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.addMetrics;
import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.generateConsumers;
+import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.toConsumerBuilder;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.APPLICATION;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.APPLICATION_ID;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.INSTANCE;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.LEGACY_APPLICATION;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.TENANT;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.ZONE;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicConsumer.getDefaultPublicConsumer;
import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.getVespaMetricsConsumer;
import static com.yahoo.vespa.model.admin.monitoring.MetricSet.emptyMetricSet;
import static com.yahoo.vespa.model.container.xml.BundleMapper.JarSuffix.JAR_WITH_DEPS;
@@ -128,8 +130,10 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
@Override
public void getConfig(ConsumersConfig.Builder builder) {
- var amendedDefaultConsumer = addMetrics(getVespaMetricsConsumer(), getAdditionalDefaultMetrics().getMetrics());
- builder.consumer.addAll(generateConsumers(amendedDefaultConsumer, getUserMetricsConsumers()));
+ var amendedVespaConsumer = addMetrics(getVespaMetricsConsumer(), getAdditionalDefaultMetrics().getMetrics());
+ builder.consumer.addAll(generateConsumers(amendedVespaConsumer, getUserMetricsConsumers()));
+
+ if (! isHostedVespa()) builder.consumer.add(toConsumerBuilder(getDefaultPublicConsumer()));
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicConsumer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicConsumer.java
new file mode 100644
index 00000000000..fbd1c7455dd
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicConsumer.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+ */
+
+package com.yahoo.vespa.model.admin.monitoring;
+
+import ai.vespa.metricsproxy.http.GenericMetricsHandler;
+import com.google.common.collect.ImmutableList;
+
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicMetrics.defaultPublicMetricSet;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet;
+import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet;
+import static java.util.Collections.emptyList;
+
+/**
+ * @author gjoranv
+ */
+public class DefaultPublicConsumer {
+
+ public static final String DEFAULT_PUBLIC_CONSUMER_ID = GenericMetricsHandler.DEFAULT_PUBLIC_CONSUMER_ID.id;
+
+ private static final MetricSet publicConsumerMetrics = new MetricSet("public-consumer-metrics",
+ emptyList(),
+ ImmutableList.of(defaultPublicMetricSet,
+ defaultVespaMetricSet,
+ systemMetricSet));
+
+ public static MetricsConsumer getDefaultPublicConsumer() {
+ return new MetricsConsumer(DEFAULT_PUBLIC_CONSUMER_ID, publicConsumerMetrics);
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicMetrics.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicMetrics.java
new file mode 100644
index 00000000000..cec7f796302
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultPublicMetrics.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+ */
+
+package com.yahoo.vespa.model.admin.monitoring;
+
+
+import com.google.common.collect.ImmutableSet;
+
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import static java.util.Collections.emptyList;
+
+/**
+ * TODO: Add content metrics.
+ *
+ * @author gjoranv
+ */
+public class DefaultPublicMetrics {
+
+ public static MetricSet defaultPublicMetricSet = createMetricSet();
+
+ private static MetricSet createMetricSet() {
+ return new MetricSet("public",
+ getAllMetrics(),
+ emptyList());
+ }
+
+ private static Set<Metric> getAllMetrics() {
+ return ImmutableSet.<Metric>builder()
+ .addAll(getContentMetrics())
+ .addAll(getStorageMetrics())
+ .addAll(getContainerMetrics())
+ .addAll(getQrserverMetrics())
+ .build();
+ }
+
+ private static Set<Metric> getContainerMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.add(new Metric("http.status.1xx.rate"));
+ metrics.add(new Metric("http.status.2xx.rate"));
+ metrics.add(new Metric("http.status.3xx.rate"));
+ metrics.add(new Metric("http.status.4xx.rate"));
+ metrics.add(new Metric("http.status.5xx.rate"));
+ metrics.add(new Metric("jdisc.gc.ms.average"));
+ metrics.add(new Metric("mem.heap.free.average"));
+
+ return metrics;
+ }
+
+ private static Set<Metric> getQrserverMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.add(new Metric("queries.rate"));
+ metrics.add(new Metric("query_latency.average"));
+ metrics.add(new Metric("query_latency.95percentile"));
+ metrics.add(new Metric("query_latency.99percentile"));
+ metrics.add(new Metric("hits_per_query.average"));
+ metrics.add(new Metric("totalhits_per_query.average"));
+ metrics.add(new Metric("degraded_queries.rate"));
+ metrics.add(new Metric("failed_queries.rate"));
+ metrics.add(new Metric("serverActiveThreads.average"));
+
+ return metrics;
+ }
+
+ private static Set<Metric> getContentMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.add(new Metric("content.proton.docsum.docs.rate"));
+ metrics.add(new Metric("content.proton.docsum.latency.average"));
+
+ metrics.add(new Metric("content.proton.transport.query.count.rate"));
+ metrics.add(new Metric("content.proton.transport.query.latency.average"));
+
+ metrics.add(new Metric("content.proton.documentdb.documents.total.last"));
+ metrics.add(new Metric("content.proton.documentdb.documents.ready.last"));
+ metrics.add(new Metric("content.proton.documentdb.documents.active.last"));
+ metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last"));
+ metrics.add(new Metric("content.proton.documentdb.disk_usage.last"));
+
+ metrics.add(new Metric("content.proton.documentdb.job.total.average"));
+ metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average"));
+ metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average"));
+ metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average"));
+ metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average"));
+
+ metrics.add(new Metric("content.proton.resource_usage.disk.average"));
+ metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average"));
+ metrics.add(new Metric("content.proton.resource_usage.memory.average"));
+
+ metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average"));
+ metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average"));
+ metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average"));
+
+ metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate"));
+ metrics.add(new Metric("content.proton.documentdb.matching.docs_reranked.rate"));
+ metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average"));
+ metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average"));
+
+ return metrics;
+ }
+
+ private static Set<Metric> getStorageMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate"));
+ metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate"));
+ metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate"));
+ metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate"));
+ metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate"));
+
+ return metrics;
+ }
+
+ private DefaultPublicMetrics() { }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/SystemMetrics.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/SystemMetrics.java
index e857ce6da33..cce2891df76 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/SystemMetrics.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/SystemMetrics.java
@@ -8,10 +8,11 @@ import java.util.Set;
/**
* @author gjoranv
*/
-@SuppressWarnings("UnusedDeclaration") // Used by model amenders
public class SystemMetrics {
public static final String CPU_UTIL = "cpu.util";
public static final String CPU_SYS_UTIL = "cpu.sys.util";
+ public static final String CPU_THROTTLED_TIME = "cpu.throttled_time.rate";
+ public static final String CPU_THROTTLED_CPU_TIME = "cpu.throttled_cpu_time.rate";
public static final String CPU_VCPUS = "cpu.vcpus";
public static final String DISK_LIMIT = "disk.limit";
public static final String DISK_USED = "disk.used";
@@ -28,6 +29,8 @@ public class SystemMetrics {
Set<Metric> dockerNodeMetrics =
ImmutableSet.of(new Metric(CPU_UTIL),
new Metric(CPU_SYS_UTIL),
+ new Metric(CPU_THROTTLED_TIME),
+ new Metric(CPU_THROTTLED_CPU_TIME),
new Metric(CPU_VCPUS),
new Metric(DISK_LIMIT),
new Metric(DISK_USED),
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 0749df8ddf2..ca29e348094 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -13,7 +13,6 @@ import static java.util.Collections.singleton;
*
* @author gjoranv
*/
-@SuppressWarnings("UnusedDeclaration") // Used by model amenders
public class VespaMetricSet {
public static final MetricSet vespaMetricSet = new MetricSet("vespa",
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java
index 81e9cfcd6a0..9f3bfdc8ae8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java
@@ -10,7 +10,7 @@ import static com.yahoo.vespa.model.admin.monitoring.VespaMetricSet.vespaMetricS
import static java.util.Collections.emptyList;
/**
- * This class sets up the 'Vespa' metrics consumer.
+ * This class sets up the 'Vespa' metrics consumer, which is mainly used for Yamas in hosted Vespa.
*
* @author trygve
* @author gjoranv
@@ -19,14 +19,14 @@ public class VespaMetricsConsumer {
public static final String VESPA_CONSUMER_ID = VespaMetrics.VESPA_CONSUMER_ID.id;
- private static final MetricSet defaultConsumerMetrics = new MetricSet("vespa-consumer-metrics",
- emptyList(),
- ImmutableList.of(vespaMetricSet,
- systemMetricSet,
- networkMetricSet));
+ private static final MetricSet vespaConsumerMetrics = new MetricSet("vespa-consumer-metrics",
+ emptyList(),
+ ImmutableList.of(vespaMetricSet,
+ systemMetricSet,
+ networkMetricSet));
public static MetricsConsumer getVespaMetricsConsumer() {
- return new MetricsConsumer(VESPA_CONSUMER_ID, defaultConsumerMetrics);
+ return new MetricsConsumer(VESPA_CONSUMER_ID, vespaConsumerMetrics);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
index 4638353f39f..694108d4bb1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
@@ -7,6 +7,7 @@ import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicMetrics.defaultPublicMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.NetworkMetrics.networkMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet;
@@ -20,6 +21,7 @@ import static com.yahoo.vespa.model.admin.monitoring.VespaMetricSet.vespaMetricS
public class PredefinedMetricSets {
public static final Map<String, MetricSet> predefinedMetricSets = toMapById(
+ defaultPublicMetricSet,
defaultVespaMetricSet,
vespaMetricSet,
systemMetricSet,
@@ -28,8 +30,11 @@ public class PredefinedMetricSets {
private static Map<String, MetricSet> toMapById(MetricSet... metricSets) {
Map<String, MetricSet> availableMetricSets = new LinkedHashMap<>();
- for (MetricSet metricSet : metricSets)
- availableMetricSets.put(metricSet.getId(), metricSet);
+ for (MetricSet metricSet : metricSets) {
+ var existing = availableMetricSets.put(metricSet.getId(), metricSet);
+ if (existing != null)
+ throw new IllegalArgumentException("There are two predefined metric sets with id " + existing.getId());
+ }
return Collections.unmodifiableMap(availableMetricSets);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java
index 0ad0d57c1c3..b13fa4917e4 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java
@@ -14,6 +14,7 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicConsumer.DEFAULT_PUBLIC_CONSUMER_ID;
import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID;
import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet;
@@ -77,6 +78,10 @@ public class MetricsBuilder {
private void throwIfIllegalConsumerId(Metrics metrics, String consumerId) {
if (consumerId.equalsIgnoreCase(VESPA_CONSUMER_ID) && applicationType != ApplicationType.HOSTED_INFRASTRUCTURE)
throw new IllegalArgumentException("'Vespa' is not allowed as metrics consumer id (case is ignored.)");
+
+ if (consumerId.equalsIgnoreCase(DEFAULT_PUBLIC_CONSUMER_ID))
+ throw new IllegalArgumentException("'" + DEFAULT_PUBLIC_CONSUMER_ID + "' is not allowed as metrics consumer id (case is ignored.)");
+
if (metrics.hasConsumerIgnoreCase(consumerId))
throw new IllegalArgumentException("'" + consumerId + "' is used as id for two metrics consumers (case is ignored.)");
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidator.java
new file mode 100644
index 00000000000..1018099cf05
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidator.java
@@ -0,0 +1,17 @@
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.api.TlsSecrets;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.provision.CertificateNotReadyException;
+import com.yahoo.vespa.model.VespaModel;
+
+public class TlsSecretsValidator extends Validator {
+
+ /** This check is delayed until validation to allow node provisioning to complete while we are waiting for cert */
+ @Override
+ public void validate(VespaModel model, DeployState deployState) {
+ if (deployState.tlsSecrets().isPresent() && deployState.tlsSecrets().get() == TlsSecrets.MISSING) {
+ throw new CertificateNotReadyException("TLS enabled, but could not retrieve certificate yet");
+ }
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
index e44acf61466..042c7cc867c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
@@ -56,6 +56,7 @@ public class Validation {
new DeploymentFileValidator().validate(model, deployState);
new RankingConstantsValidator().validate(model, deployState);
new SecretStoreValidator().validate(model, deployState);
+ new TlsSecretsValidator().validate(model, deployState);
List<ConfigChangeAction> result = Collections.emptyList();
if (deployState.getProperties().isFirstTimeDeployment()) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
index b381168838f..48f7fa3c1a2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainer.java
@@ -1,8 +1,15 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.api.container.ContainerServiceType;
import com.yahoo.config.model.producer.AbstractConfigProducer;
+import com.yahoo.vespa.model.container.http.ConnectorFactory;
+import com.yahoo.vespa.model.container.http.Http;
+import com.yahoo.vespa.model.container.http.JettyHttpServer;
+import com.yahoo.vespa.model.container.http.ssl.ConfiguredDirectSslProvider;
+
+import java.util.Optional;
/**
* A container that is typically used by container clusters set up from the user application.
@@ -15,14 +22,23 @@ public final class ApplicationContainer extends Container {
private final boolean isHostedVespa;
-
- public ApplicationContainer(AbstractConfigProducer parent, String name, int index, boolean isHostedVespa) {
- this(parent, name, false, index, isHostedVespa);
+ public ApplicationContainer(AbstractConfigProducer parent, String name, int index, boolean isHostedVespa, Optional<TlsSecrets> tlsSecrets) {
+ this(parent, name, false, index, isHostedVespa, tlsSecrets);
}
- public ApplicationContainer(AbstractConfigProducer parent, String name, boolean retired, int index, boolean isHostedVespa) {
+ public ApplicationContainer(AbstractConfigProducer parent, String name, boolean retired, int index, boolean isHostedVespa, Optional<TlsSecrets> tlsSecrets) {
super(parent, name, retired, index);
this.isHostedVespa = isHostedVespa;
+
+ if (isHostedVespa && tlsSecrets.isPresent()) {
+ String connectorName = "tls4443";
+
+ JettyHttpServer server = Optional.ofNullable(getHttp())
+ .map(Http::getHttpServer)
+ .orElse(getDefaultHttpServer());
+ server.addConnector(new ConnectorFactory(connectorName, 4443,
+ new ConfiguredDirectSslProvider(server.getComponentId().getName(), tlsSecrets.get().key(), tlsSecrets.get().certificate(), null, null)));
+ }
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
index 9cbaa5f91af..e9db64f8e4b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.model.container;
import com.yahoo.component.ComponentId;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.ComponentInfo;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.container.BundlesConfig;
@@ -22,6 +23,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -45,13 +47,18 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
private ContainerModelEvaluation modelEvaluation;
+ private Optional<TlsSecrets> tlsSecrets;
+
public ApplicationContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) {
super(parent, subId, name, deployState);
+
+ this.tlsSecrets = deployState.tlsSecrets();
restApiGroup = new ConfigProducerGroup<>(this, "rest-api");
servletGroup = new ConfigProducerGroup<>(this, "servlet");
addSimpleComponent(DEFAULT_LINGUISTICS_PROVIDER);
addSimpleComponent("com.yahoo.container.jdisc.SecretStoreProvider");
+ addSimpleComponent("com.yahoo.container.jdisc.DeprecatedSecretStoreProvider");
addSimpleComponent("com.yahoo.container.jdisc.CertificateStoreProvider");
}
@@ -139,4 +146,8 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
if (modelEvaluation != null) modelEvaluation.getConfig(builder);
}
+ public Optional<TlsSecrets> getTlsSecrets() {
+ return tlsSecrets;
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredDirectSslProvider.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredDirectSslProvider.java
new file mode 100644
index 00000000000..28dba3331d3
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredDirectSslProvider.java
@@ -0,0 +1,66 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.container.http.ssl;
+
+import com.yahoo.component.ComponentId;
+import com.yahoo.container.bundle.BundleInstantiationSpecification;
+import com.yahoo.jdisc.http.ConnectorConfig;
+import com.yahoo.jdisc.http.ssl.impl.ConfiguredSslContextFactoryProvider;
+import com.yahoo.osgi.provider.model.ComponentModel;
+import com.yahoo.vespa.model.container.component.SimpleComponent;
+
+import java.util.Optional;
+
+import static com.yahoo.component.ComponentSpecification.fromString;
+
+/**
+ * Configure SSL with PEM encoded certificate/key strings
+ *
+ * @author mortent
+ * @author andreer
+ */
+public class ConfiguredDirectSslProvider extends SimpleComponent implements ConnectorConfig.Producer {
+ public static final String COMPONENT_ID_PREFIX = "configured-ssl-provider@";
+ public static final String COMPONENT_CLASS = ConfiguredSslContextFactoryProvider.class.getName();
+ public static final String COMPONENT_BUNDLE = "jdisc_http_service";
+
+ private final String privateKey;
+ private final String certificate;
+ private final String caCertificatePath;
+ private final ConnectorConfig.Ssl.ClientAuth.Enum clientAuthentication;
+
+ public ConfiguredDirectSslProvider(String servername, String privateKey, String certificate, String caCertificatePath, String clientAuthentication) {
+ super(new ComponentModel(
+ new BundleInstantiationSpecification(new ComponentId(COMPONENT_ID_PREFIX+servername),
+ fromString(COMPONENT_CLASS),
+ fromString(COMPONENT_BUNDLE))));
+ this.privateKey = privateKey;
+ this.certificate = certificate;
+ this.caCertificatePath = caCertificatePath;
+ this.clientAuthentication = mapToConfigEnum(clientAuthentication);
+ }
+
+ @Override
+ public void getConfig(ConnectorConfig.Builder builder) {
+ builder.ssl.enabled(true);
+ builder.ssl.privateKey(privateKey);
+ builder.ssl.certificate(certificate);
+ builder.ssl.caCertificateFile(Optional.ofNullable(caCertificatePath).orElse(""));
+ builder.ssl.clientAuth(clientAuthentication);
+ }
+
+ public SimpleComponent getComponent() {
+ return new SimpleComponent(new ComponentModel(getComponentId().stringValue(), COMPONENT_CLASS, COMPONENT_BUNDLE));
+ }
+
+ private static ConnectorConfig.Ssl.ClientAuth.Enum mapToConfigEnum(String clientAuthValue) {
+ if ("disabled".equals(clientAuthValue)) {
+ return ConnectorConfig.Ssl.ClientAuth.Enum.DISABLED;
+ } else if ("want".equals(clientAuthValue)) {
+ return ConnectorConfig.Ssl.ClientAuth.Enum.WANT_AUTH;
+ } else if ("need".equals(clientAuthValue)) {
+ return ConnectorConfig.Ssl.ClientAuth.Enum.NEED_AUTH;
+ } else {
+ return ConnectorConfig.Ssl.ClientAuth.Enum.DISABLED;
+ }
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredSslProvider.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredFilebasedSslProvider.java
index 3c36933c030..4f84a01ff94 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredSslProvider.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/ssl/ConfiguredFilebasedSslProvider.java
@@ -13,9 +13,11 @@ import java.util.Optional;
import static com.yahoo.component.ComponentSpecification.fromString;
/**
+ * Configure SSL using file references
+ *
* @author mortent
*/
-public class ConfiguredSslProvider extends SimpleComponent implements ConnectorConfig.Producer {
+public class ConfiguredFilebasedSslProvider extends SimpleComponent implements ConnectorConfig.Producer {
public static final String COMPONENT_ID_PREFIX = "configured-ssl-provider@";
public static final String COMPONENT_CLASS = ConfiguredSslContextFactoryProvider.class.getName();
public static final String COMPONENT_BUNDLE = "jdisc_http_service";
@@ -25,7 +27,7 @@ public class ConfiguredSslProvider extends SimpleComponent implements ConnectorC
private final String caCertificatePath;
private final ConnectorConfig.Ssl.ClientAuth.Enum clientAuthentication;
- public ConfiguredSslProvider(String servername, String privateKeyPath, String certificatePath, String caCertificatePath, String clientAuthentication) {
+ public ConfiguredFilebasedSslProvider(String servername, String privateKeyPath, String certificatePath, String caCertificatePath, String clientAuthentication) {
super(new ComponentModel(
new BundleInstantiationSpecification(new ComponentId(COMPONENT_ID_PREFIX+servername),
fromString(COMPONENT_CLASS),
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
index 23865eb9bdd..1b457b1250a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/http/xml/JettyConnectorBuilder.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder;
import com.yahoo.vespa.model.container.component.SimpleComponent;
import com.yahoo.vespa.model.container.http.ConnectorFactory;
import com.yahoo.vespa.model.container.http.ssl.CustomSslProvider;
-import com.yahoo.vespa.model.container.http.ssl.ConfiguredSslProvider;
+import com.yahoo.vespa.model.container.http.ssl.ConfiguredFilebasedSslProvider;
import com.yahoo.vespa.model.container.http.ssl.DefaultSslProvider;
import org.w3c.dom.Element;
@@ -39,7 +39,7 @@ public class JettyConnectorBuilder extends VespaDomBuilder.DomConfigProducerBuil
String certificateFile = XML.getValue(XML.getChild(sslConfigurator, "certificate-file"));
Optional<String> caCertificateFile = XmlHelper.getOptionalChildValue(sslConfigurator, "ca-certificates-file");
Optional<String> clientAuthentication = XmlHelper.getOptionalChildValue(sslConfigurator, "client-authentication");
- return new ConfiguredSslProvider(
+ return new ConfiguredFilebasedSslProvider(
serverName,
privateKeyFile,
certificateFile,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 642f882f3ed..57e0b969929 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -9,6 +9,7 @@ import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.model.ConfigModelContext;
import com.yahoo.config.model.api.ConfigServerSpec;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.application.provider.IncludeDirs;
import com.yahoo.config.model.builder.xml.ConfigModelBuilder;
import com.yahoo.config.model.builder.xml.ConfigModelId;
@@ -72,6 +73,7 @@ import org.w3c.dom.Node;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -212,13 +214,13 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
context.getDeployState().getProperties().athenzDnsSuffix(),
context.getDeployState().zone(),
deploymentSpec);
- addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
+ addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), context.getDeployState().getEndpoints(), deploymentSpec);
});
}
- private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
+ private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<Rotation> rotations, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
- setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
+ setRotations(container, rotations, endpoints, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
@@ -229,13 +231,30 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
declaredZone.active());
}
- private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
+ private void setRotations(Container container,
+ Set<Rotation> rotations,
+ Set<ContainerEndpoint> endpoints,
+ Optional<String> globalServiceId,
+ String containerClusterName) {
+ final Set<String> rotationsProperty = new HashSet<>();
+ // Add the legacy rotations to the list of available rotations. Using the same test
+ // as was used before to mirror the old business logic for global-service-id.
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
- container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
+ rotations.stream().map(Rotation::getId).forEach(rotationsProperty::add);
}
}
+
+ // For ContainerEndpoints this is more straight-forward, just add all that are present
+ endpoints.stream()
+ .filter(endpoint -> endpoint.clusterId().equals(containerClusterName))
+ .flatMap(endpoint -> endpoint.names().stream())
+ .forEach(rotationsProperty::add);
+
+ // Build the comma delimited list of endpoints this container should be known as.
+ // Confusingly called 'rotations' for legacy reasons.
+ container.setProp("rotations", String.join(",", rotationsProperty));
}
private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) {
@@ -412,7 +431,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
}
private void addStandaloneNode(ApplicationContainerCluster cluster) {
- ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa());
+ ApplicationContainer container = new ApplicationContainer(cluster, "standalone", cluster.getContainers().size(), cluster.isHostedVespa(), cluster.getTlsSecrets());
cluster.addContainers(Collections.singleton(container));
}
@@ -478,7 +497,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
Element nodesElement = XML.getChild(containerElement, "nodes");
Element rotationsElement = XML.getChild(containerElement, "rotations");
if (nodesElement == null) { // default single node on localhost
- ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa());
+ ApplicationContainer node = new ApplicationContainer(cluster, "container.0", 0, cluster.isHostedVespa(), cluster.getTlsSecrets());
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService(context.getDeployLogger());
@@ -667,7 +686,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
List<ApplicationContainer> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
- ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa());
+ ApplicationContainer container = new ApplicationContainer(cluster, id, entry.getValue().retired(), entry.getValue().index(), cluster.isHostedVespa(), cluster.getTlsSecrets());
container.setHostResource(entry.getKey());
container.initService(deployLogger);
nodes.add(container);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerServiceBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerServiceBuilder.java
index fd0797d6098..46271d3c0a2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerServiceBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerServiceBuilder.java
@@ -22,7 +22,7 @@ public class ContainerServiceBuilder extends VespaDomBuilder.DomConfigProducerBu
@Override
protected ApplicationContainer doBuild(DeployState deployState, AbstractConfigProducer parent, Element nodeElem) {
- return new ApplicationContainer(parent, id, index, deployState.isHosted());
+ return new ApplicationContainer(parent, id, index, deployState.isHosted(), deployState.tlsSecrets());
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
index 74caf2d8026..8eda707be99 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
@@ -324,7 +324,7 @@ public class Content extends ConfigModel {
if (!processedHosts.contains(host)) {
String containerName = String.valueOf(searchNode.getDistributionKey());
ApplicationContainer docprocService = new ApplicationContainer(indexingCluster, containerName, index,
- modelContext.getDeployState().isHosted());
+ modelContext.getDeployState().isHosted(), modelContext.getDeployState().tlsSecrets());
index++;
docprocService.useDynamicPorts();
docprocService.setHostResource(host);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
index a60c523b9f5..b0fe2877386 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
@@ -62,7 +62,8 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
public enum IoType {
NORMAL("NORMAL"),
DIRECTIO("DIRECTIO"),
- MMAP("MMAP");
+ MMAP("MMAP"),
+ POPULATE("POPULATE");
public final String name;
@@ -183,6 +184,11 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
if (read != null) {
builder.indexing.read.io(ProtonConfig.Indexing.Read.Io.Enum.valueOf(read.name));
}
+ if (search != null) {
+ if (search.equals(IoType.POPULATE)) {
+ builder.search.mmap.options.add(ProtonConfig.Search.Mmap.Options.POPULATE);
+ }
+ }
}
}
public static class Warmup implements ProtonConfig.Producer {
@@ -242,7 +248,12 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
builder.write.io(ProtonConfig.Summary.Write.Io.Enum.valueOf(write.name));
}
if (read != null) {
- builder.read.io(ProtonConfig.Summary.Read.Io.Enum.valueOf(read.name));
+ if (read.equals(IoType.POPULATE)) {
+ builder.read.io(ProtonConfig.Summary.Read.Io.MMAP);
+ builder.read.mmap.options.add(ProtonConfig.Summary.Read.Mmap.Options.POPULATE);
+ } else {
+ builder.read.io(ProtonConfig.Summary.Read.Io.Enum.valueOf(read.name));
+ }
}
}
}
diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj
index 571ad452b01..6dde12f0fac 100644
--- a/config-model/src/main/javacc/SDParser.jj
+++ b/config-model/src/main/javacc/SDParser.jj
@@ -331,7 +331,7 @@ TOKEN :
| < LOWERBOUND: "lower-bound" >
| < UPPERBOUND: "upper-bound" >
| < DENSEPOSTINGLISTTHRESHOLD: "dense-posting-list-threshold" >
-| < EXPERIMENTALPOSTINGLISTFORMAT: "experimental-posting-list-format" >
+| < ENABLE_BM25: "enable-bm25" >
| < SUMMARYFEATURES_SL: "summary-features" (" ")* ":" (~["}","\n"])* ("\n")? >
| < SUMMARYFEATURES_ML: "summary-features" (<SEARCHLIB_SKIP>)? "{" (~["}"])* "}" >
| < RANKFEATURES_SL: "rank-features" (" ")* ":" (~["}","\n"])* ("\n")? >
@@ -1782,7 +1782,7 @@ Object indexBody(IndexOperation index) :
| <LOWERBOUND> <COLON> num = consumeLong() { index.setLowerBound(num); }
| <UPPERBOUND> <COLON> num = consumeLong() { index.setUpperBound(num); }
| <DENSEPOSTINGLISTTHRESHOLD> <COLON> threshold = consumeFloat() { index.setDensePostingListThreshold(threshold); }
- | <EXPERIMENTALPOSTINGLISTFORMAT> { index.setExperimentalPostingListFormat(true); }
+ | <ENABLE_BM25> { index.setEnableBm25(true); }
)
{ return null; }
}
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 0686708a8a1..15ebd03d9e2 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -319,7 +319,7 @@ Tuning = element tuning {
element io {
element write { TuningIoOptionsLight }? &
element read { TuningIoOptionsLight }? &
- element search { TuningIoOptionsFull }?
+ element search { TuningIoOptionsSearch }?
}? &
element warmup {
element time { xsd:double { minInclusive = "0.0" } }? &
@@ -369,7 +369,8 @@ Tuning = element tuning {
}
TuningIoOptionsLight = string "normal" | string "directio"
-TuningIoOptionsFull = string "normal" | string "directio" | string "mmap" | string "mlock"
+TuningIoOptionsFull = string "normal" | string "directio" | string "mmap" | string "mlock" | string "populate"
+TuningIoOptionsSearch = string "mmap" | string "mlock" | string "populate"
TuningCompression = element compression {
element type { string "none" | string "lz4" | string "zstd" }? &
diff --git a/config-model/src/test/derived/indexschema/index-info.cfg b/config-model/src/test/derived/indexschema/index-info.cfg
index 46c2c3fc307..a83ec45c5e9 100644
--- a/config-model/src/test/derived/indexschema/index-info.cfg
+++ b/config-model/src/test/derived/indexschema/index-info.cfg
@@ -133,15 +133,15 @@ indexinfo[].command[].indexname "exact2"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "exact2"
indexinfo[].command[].command "exact @@"
-indexinfo[].command[].indexname "experimental"
+indexinfo[].command[].indexname "bm25_field"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "experimental"
+indexinfo[].command[].indexname "bm25_field"
indexinfo[].command[].command "lowercase"
-indexinfo[].command[].indexname "experimental"
+indexinfo[].command[].indexname "bm25_field"
indexinfo[].command[].command "stem:BEST"
-indexinfo[].command[].indexname "experimental"
+indexinfo[].command[].indexname "bm25_field"
indexinfo[].command[].command "normalize"
-indexinfo[].command[].indexname "experimental"
+indexinfo[].command[].indexname "bm25_field"
indexinfo[].command[].command "plain-tokens"
indexinfo[].command[].indexname "ia"
indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/indexschema/indexschema.cfg b/config-model/src/test/derived/indexschema/indexschema.cfg
index 612af087b0c..e8d064723da 100644
--- a/config-model/src/test/derived/indexschema/indexschema.cfg
+++ b/config-model/src/test/derived/indexschema/indexschema.cfg
@@ -5,7 +5,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sb"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -13,7 +13,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sc"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -21,7 +21,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sd"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -29,7 +29,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sf"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -37,7 +37,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sg"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -45,7 +45,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -53,7 +53,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "si"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -61,7 +61,7 @@ indexfield[].prefix true
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "exact1"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -69,7 +69,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "exact2"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -77,15 +77,15 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
-indexfield[].name "experimental"
+indexfield[].interleavedfeatures false
+indexfield[].name "bm25_field"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat true
+indexfield[].interleavedfeatures true
indexfield[].name "nostemstring1"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -93,7 +93,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "nostemstring2"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -101,7 +101,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "nostemstring3"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -109,7 +109,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "nostemstring4"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -117,7 +117,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "fs9"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -125,7 +125,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sd_literal"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -133,7 +133,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.fragment"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -141,7 +141,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.host"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -149,7 +149,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.hostname"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -157,7 +157,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.path"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -165,7 +165,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.port"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -173,7 +173,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.query"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -181,7 +181,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "sh.scheme"
indexfield[].datatype STRING
indexfield[].collectiontype SINGLE
@@ -189,7 +189,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
fieldset[].name "fs9"
fieldset[].field[].name "se"
fieldset[].name "fs1"
diff --git a/config-model/src/test/derived/indexschema/indexschema.sd b/config-model/src/test/derived/indexschema/indexschema.sd
index 44956f30e9e..49f0f7dfca6 100644
--- a/config-model/src/test/derived/indexschema/indexschema.sd
+++ b/config-model/src/test/derived/indexschema/indexschema.sd
@@ -56,9 +56,9 @@ search indexschema {
exact
}
}
- field experimental type string {
+ field bm25_field type string {
indexing: index
- index: experimental-posting-list-format
+ index: enable-bm25
}
# integer fields
diff --git a/config-model/src/test/derived/indexschema/vsmfields.cfg b/config-model/src/test/derived/indexschema/vsmfields.cfg
index 30ed67f61b7..9dcffd30313 100644
--- a/config-model/src/test/derived/indexschema/vsmfields.cfg
+++ b/config-model/src/test/derived/indexschema/vsmfields.cfg
@@ -55,7 +55,7 @@ fieldspec[].searchmethod AUTOUTF8
fieldspec[].arg1 "exact"
fieldspec[].maxlength 1048576
fieldspec[].fieldtype INDEX
-fieldspec[].name "experimental"
+fieldspec[].name "bm25_field"
fieldspec[].searchmethod AUTOUTF8
fieldspec[].arg1 ""
fieldspec[].maxlength 1048576
@@ -138,8 +138,8 @@ documenttype[].index[].name "exact1"
documenttype[].index[].field[].name "exact1"
documenttype[].index[].name "exact2"
documenttype[].index[].field[].name "exact2"
-documenttype[].index[].name "experimental"
-documenttype[].index[].field[].name "experimental"
+documenttype[].index[].name "bm25_field"
+documenttype[].index[].field[].name "bm25_field"
documenttype[].index[].name "ia"
documenttype[].index[].field[].name "ia"
documenttype[].index[].name "ib"
diff --git a/config-model/src/test/derived/uri_array/indexschema.cfg b/config-model/src/test/derived/uri_array/indexschema.cfg
index 1a556daf558..1d0012b89be 100644
--- a/config-model/src/test/derived/uri_array/indexschema.cfg
+++ b/config-model/src/test/derived/uri_array/indexschema.cfg
@@ -5,7 +5,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.fragment"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -13,7 +13,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.host"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -21,7 +21,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.hostname"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -29,7 +29,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.path"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -37,7 +37,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.port"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -45,7 +45,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.query"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -53,7 +53,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.scheme"
indexfield[].datatype STRING
indexfield[].collectiontype ARRAY
@@ -61,4 +61,4 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
diff --git a/config-model/src/test/derived/uri_wset/indexschema.cfg b/config-model/src/test/derived/uri_wset/indexschema.cfg
index 7fe7f7a4941..897f7004d48 100644
--- a/config-model/src/test/derived/uri_wset/indexschema.cfg
+++ b/config-model/src/test/derived/uri_wset/indexschema.cfg
@@ -5,7 +5,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.fragment"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -13,7 +13,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.host"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -21,7 +21,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.hostname"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -29,7 +29,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.path"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -37,7 +37,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.port"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -45,7 +45,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.query"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -53,7 +53,7 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
indexfield[].name "my_uri.scheme"
indexfield[].datatype STRING
indexfield[].collectiontype WEIGHTEDSET
@@ -61,4 +61,4 @@ indexfield[].prefix false
indexfield[].phrases false
indexfield[].positions true
indexfield[].averageelementlen 512
-indexfield[].experimentalpostinglistformat false
+indexfield[].interleavedfeatures false
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/HostResourceTest.java b/config-model/src/test/java/com/yahoo/vespa/model/HostResourceTest.java
index 2d116dde472..3e9bb6d0615 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/HostResourceTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/HostResourceTest.java
@@ -9,12 +9,10 @@ import com.yahoo.config.provision.ClusterSpec;
import org.junit.Test;
import java.util.Arrays;
-import java.util.Collections;
import static com.yahoo.config.provision.ClusterSpec.Type.admin;
import static com.yahoo.config.provision.ClusterSpec.Type.container;
import static com.yahoo.config.provision.ClusterSpec.Type.content;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
@@ -92,7 +90,7 @@ public class HostResourceTest {
}
private static ClusterSpec clusterSpec(ClusterSpec.Type type, String id) {
- return ClusterSpec.from(type, ClusterSpec.Id.from(id), ClusterSpec.Group.from(0), Version.fromString("6.42"), false, Collections.emptySet());
+ return ClusterSpec.from(type, ClusterSpec.Id.from(id), ClusterSpec.Group.from(0), Version.fromString("6.42"), false);
}
private HostResource mockHostResource(MockRoot root) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
index aaec4426746..febccaca3a5 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
@@ -109,7 +109,7 @@ public class VespaModelFactoryTest {
ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.admin,
new ClusterSpec.Id(routingClusterName),
ClusterSpec.Group.from(0),
- Version.fromString("6.42"), false, Collections.emptySet()),
+ Version.fromString("6.42"), false),
0));
}
@@ -120,7 +120,7 @@ public class VespaModelFactoryTest {
ClusterMembership.from(ClusterSpec.from(ClusterSpec.Type.container,
new ClusterSpec.Id(routingClusterName),
ClusterSpec.Group.from(0),
- Version.fromString("6.42"), false, Collections.emptySet()),
+ Version.fromString("6.42"), false),
0)));
}
};
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
index ff38a184eec..48c3e9fdda9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
@@ -25,14 +25,17 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.C
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.MY_APPLICATION;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.MY_INSTANCE;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.MY_TENANT;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.checkMetric;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.consumersConfigFromModel;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.consumersConfigFromXml;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getApplicationDimensionsConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getCustomConsumer;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getHostedModel;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getQrStartConfig;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicConsumer.DEFAULT_PUBLIC_CONSUMER_ID;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicMetrics.defaultPublicMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID;
import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet;
import static com.yahoo.vespa.model.admin.monitoring.NetworkMetrics.networkMetricSet;
@@ -54,14 +57,14 @@ public class MetricsProxyContainerClusterTest {
private static int numVespaMetrics = vespaMetricSet.getMetrics().size();
private static int numSystemMetrics = systemMetricSet.getMetrics().size();
private static int numNetworkMetrics = networkMetricSet.getMetrics().size();
- private static int numMetricsForDefaultConsumer = numVespaMetrics + numSystemMetrics + numNetworkMetrics;
+ private static int numMetricsForVespaConsumer = numVespaMetrics + numSystemMetrics + numNetworkMetrics;
@Rule
public ExpectedException thrown = ExpectedException.none();
@Test
public void metrics_proxy_bundle_is_included_in_bundles_config() {
- VespaModel model = getModel(servicesWithAdminOnly());
+ VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
var builder = new BundlesConfig.Builder();
model.getConfig(builder, CLUSTER_CONFIG_ID);
BundlesConfig config = builder.build();
@@ -71,7 +74,7 @@ public class MetricsProxyContainerClusterTest {
@Test
public void cluster_is_prepared_so_that_application_metadata_config_is_produced() {
- VespaModel model = getModel(servicesWithAdminOnly());
+ VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
var builder = new ApplicationMetadataConfig.Builder();
model.getConfig(builder, CLUSTER_CONFIG_ID);
ApplicationMetadataConfig config = builder.build();
@@ -82,26 +85,44 @@ public class MetricsProxyContainerClusterTest {
@Test
public void verbose_gc_logging_is_disabled() {
- VespaModel model = getModel(servicesWithAdminOnly());
+ VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
QrStartConfig config = getQrStartConfig(model);
assertFalse(config.jvm().verbosegc());
}
+
+ @Test
+ public void default_public_consumer_is_set_up_for_self_hosted() {
+ ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly(), self_hosted);
+ assertEquals(2, config.consumer().size());
+ assertEquals(config.consumer(1).name(), DEFAULT_PUBLIC_CONSUMER_ID);
+
+ int numMetricsForPublicDefaultConsumer = defaultPublicMetricSet.getMetrics().size() + numDefaultVespaMetrics + numSystemMetrics;
+ assertEquals(numMetricsForPublicDefaultConsumer, config.consumer(1).metric().size());
+ }
+
@Test
- public void default_consumer_is_always_present_and_has_all_vespa_metrics_and_all_system_metrics() {
- ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly());
+ public void default_public_consumer_is_not_set_up_for_hosted() {
+ ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly(), hosted);
+ assertEquals(1, config.consumer().size());
assertEquals(config.consumer(0).name(), VESPA_CONSUMER_ID);
- assertEquals(numMetricsForDefaultConsumer, config.consumer(0).metric().size());
}
@Test
- public void default_consumer_can_be_amended_via_admin_object() {
- VespaModel model = getModel(servicesWithAdminOnly());
+ public void vespa_consumer_is_always_present_and_has_all_vespa_metrics_and_all_system_metrics() {
+ ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly(), self_hosted);
+ assertEquals(config.consumer(0).name(), VESPA_CONSUMER_ID);
+ assertEquals(numMetricsForVespaConsumer, config.consumer(0).metric().size());
+ }
+
+ @Test
+ public void vespa_consumer_can_be_amended_via_admin_object() {
+ VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
var additionalMetric = new Metric("additional-metric");
model.getAdmin().setAdditionalDefaultMetrics(new MetricSet("amender-metrics", singleton(additionalMetric)));
ConsumersConfig config = consumersConfigFromModel(model);
- assertEquals(numMetricsForDefaultConsumer + 1, config.consumer(0).metric().size());
+ assertEquals(numMetricsForVespaConsumer + 1, config.consumer(0).metric().size());
ConsumersConfig.Consumer vespaConsumer = config.consumer(0);
assertTrue("Did not contain additional metric", checkMetric(vespaConsumer, additionalMetric));
@@ -109,19 +130,28 @@ public class MetricsProxyContainerClusterTest {
@Test
public void vespa_is_a_reserved_consumer_id() {
+ assertReservedConsumerId("Vespa");
+ }
+
+ @Test
+ public void default_is_a_reserved_consumer_id() {
+ assertReservedConsumerId("default");
+ }
+
+ private void assertReservedConsumerId(String consumerId) {
String services = String.join("\n",
- "<services>",
- " <admin version='2.0'>",
- " <adminserver hostalias='node1'/>",
- " <metrics>",
- " <consumer id='vespa'/>",
- " </metrics>",
- " </admin>",
- "</services>"
+ "<services>",
+ " <admin version='2.0'>",
+ " <adminserver hostalias='node1'/>",
+ " <metrics>",
+ " <consumer id='" + consumerId + "'/>",
+ " </metrics>",
+ " </admin>",
+ "</services>"
);
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("'Vespa' is not allowed as metrics consumer id");
- consumersConfigFromXml(services);
+ thrown.expectMessage("'" + consumerId + "' is not allowed as metrics consumer id");
+ consumersConfigFromXml(services, self_hosted);
}
@Test
@@ -138,12 +168,13 @@ public class MetricsProxyContainerClusterTest {
" </admin>",
"</services>"
);
- ConsumersConfig config = consumersConfigFromXml(services);
+ VespaModel hostedModel = getModel(services, hosted);
+ ConsumersConfig config = consumersConfigFromModel(hostedModel);
assertEquals(1, config.consumer().size());
// All default metrics are retained
ConsumersConfig.Consumer vespaConsumer = config.consumer(0);
- assertEquals(numMetricsForDefaultConsumer + 1, vespaConsumer.metric().size());
+ assertEquals(numMetricsForVespaConsumer + 1, vespaConsumer.metric().size());
Metric customMetric1 = new Metric("custom.metric1");
assertTrue("Did not contain metric: " + customMetric1, checkMetric(vespaConsumer, customMetric1));
@@ -164,7 +195,7 @@ public class MetricsProxyContainerClusterTest {
);
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("'a' is used as id for two metrics consumers");
- consumersConfigFromXml(services);
+ consumersConfigFromXml(services, self_hosted);
}
@Test
@@ -216,7 +247,7 @@ public class MetricsProxyContainerClusterTest {
@Test
public void hosted_application_propagates_application_dimensions() {
- VespaModel hostedModel = getHostedModel(servicesWithAdminOnly());
+ VespaModel hostedModel = getModel(servicesWithAdminOnly(), hosted);
ApplicationDimensionsConfig config = getApplicationDimensionsConfig(hostedModel);
assertEquals(zoneString(Zone.defaultZone()), config.dimensions(AppDimensionNames.ZONE));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
index f755871ac4b..5d3bcd58f3d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
@@ -11,7 +11,8 @@ import org.junit.Test;
import static com.yahoo.config.model.api.container.ContainerServiceType.METRICS_PROXY_CONTAINER;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CLUSTER_CONFIG_ID;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CONTAINER_CONFIG_ID;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getHostedModel;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getNodeDimensionsConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getRpcConnectorConfig;
@@ -49,7 +50,7 @@ public class MetricsProxyContainerTest {
@Test
public void metrics_proxy_requires_less_memory_than_other_containers() {
- VespaModel model = getModel(servicesWithContent());
+ VespaModel model = getModel(servicesWithContent(), self_hosted);
MetricsProxyContainer container = (MetricsProxyContainer)model.id2producer().get(CONTAINER_CONFIG_ID);
assertThat(container.getStartupCommand(), containsString("-Xms32m"));
assertThat(container.getStartupCommand(), containsString("-Xmx512m"));
@@ -57,7 +58,7 @@ public class MetricsProxyContainerTest {
@Test
public void http_server_is_running_on_expected_port() {
- VespaModel model = getModel(servicesWithContent());
+ VespaModel model = getModel(servicesWithContent(), self_hosted);
MetricsProxyContainer container = (MetricsProxyContainer)model.id2producer().get(CONTAINER_CONFIG_ID);
assertEquals(19092, container.getSearchPort());
assertEquals(19092, container.getHealthPort());
@@ -69,7 +70,7 @@ public class MetricsProxyContainerTest {
@Test
public void metrics_rpc_server_is_running_on_expected_port() {
- VespaModel model = getModel(servicesWithContent());
+ VespaModel model = getModel(servicesWithContent(), self_hosted);
MetricsProxyContainer container = (MetricsProxyContainer)model.id2producer().get(CONTAINER_CONFIG_ID);
int offset = container.metricsRpcPortOffset();
@@ -85,7 +86,7 @@ public class MetricsProxyContainerTest {
@Test
public void admin_rpc_server_is_running() {
- VespaModel model = getModel(servicesWithContent());
+ VespaModel model = getModel(servicesWithContent(), self_hosted);
MetricsProxyContainer container = (MetricsProxyContainer)model.id2producer().get(CONTAINER_CONFIG_ID);
int offset = container.metricsRpcPortOffset() - 1;
@@ -99,7 +100,7 @@ public class MetricsProxyContainerTest {
@Test
public void hosted_application_propagates_node_dimensions() {
String services = servicesWithContent();
- VespaModel hostedModel = getHostedModel(services);
+ VespaModel hostedModel = getModel(services, hosted);
assertEquals(1, hostedModel.getHosts().size());
String configId = CLUSTER_CONFIG_ID + "/" + hostedModel.getHosts().iterator().next().getHostname();
NodeDimensionsConfig config = getNodeDimensionsConfig(hostedModel, configId);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
index 59b7110e96e..ad6a7de935b 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
@@ -16,6 +16,9 @@ import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.admin.monitoring.Metric;
import com.yahoo.vespa.model.test.VespaModelTester;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
+import static com.yahoo.vespa.model.admin.monitoring.DefaultPublicConsumer.DEFAULT_PUBLIC_CONSUMER_ID;
import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID;
import static org.junit.Assert.assertEquals;
@@ -33,20 +36,17 @@ class MetricsProxyModelTester {
// Used for all configs that are produced by the container, not the cluster.
static final String CONTAINER_CONFIG_ID = CLUSTER_CONFIG_ID + "/localhost";
- static VespaModel getModel(String servicesXml) {
- var numberOfHosts = 1;
- var tester = new VespaModelTester();
- tester.addHosts(numberOfHosts);
- tester.setHosted(false);
- return tester.createModel(servicesXml, true);
+ enum TestMode {
+ self_hosted,
+ hosted
}
- static VespaModel getHostedModel(String servicesXml) {
- var numberOfHosts = 2;
+ static VespaModel getModel(String servicesXml, TestMode testMode) {
+ var numberOfHosts = testMode == hosted ? 2 : 1;
var tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
- tester.setHosted(true);
- tester.setApplicationId(MY_TENANT, MY_APPLICATION, MY_INSTANCE);
+ tester.setHosted(testMode == hosted);
+ if (testMode == hosted) tester.setApplicationId(MY_TENANT, MY_APPLICATION, MY_INSTANCE);
return tester.createModel(servicesXml, true);
}
@@ -59,17 +59,16 @@ class MetricsProxyModelTester {
}
static ConsumersConfig.Consumer getCustomConsumer(String servicesXml) {
- ConsumersConfig config = consumersConfigFromXml(servicesXml);
- assertEquals(2, config.consumer().size());
+ ConsumersConfig config = consumersConfigFromXml(servicesXml, self_hosted);
for (ConsumersConfig.Consumer consumer : config.consumer()) {
- if (! consumer.name().equals(VESPA_CONSUMER_ID))
+ if (! consumer.name().equals(VESPA_CONSUMER_ID) && ! consumer.name().equals(DEFAULT_PUBLIC_CONSUMER_ID))
return consumer;
}
- throw new RuntimeException("Two consumers with the reserved id - this cannot happen.");
+ throw new RuntimeException("Custom consumer not found!");
}
- static ConsumersConfig consumersConfigFromXml(String servicesXml) {
- return consumersConfigFromModel(getModel(servicesXml));
+ static ConsumersConfig consumersConfigFromXml(String servicesXml, TestMode testMode) {
+ return consumersConfigFromModel(getModel(servicesXml, testMode));
}
static ConsumersConfig consumersConfigFromModel(VespaModel model) {
@@ -89,7 +88,7 @@ class MetricsProxyModelTester {
}
static VespaServicesConfig getVespaServicesConfig(String servicesXml) {
- VespaModel model = getModel(servicesXml);
+ VespaModel model = getModel(servicesXml, self_hosted);
return new VespaServicesConfig((VespaServicesConfig.Builder) model.getConfig(new VespaServicesConfig.Builder(), CONTAINER_CONFIG_ID));
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidatorTest.java
new file mode 100644
index 00000000000..cdb4ce955e2
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/TlsSecretsValidatorTest.java
@@ -0,0 +1,88 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.api.TlsSecrets;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.CertificateNotReadyException;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.model.VespaModel;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.util.Optional;
+
+import static com.yahoo.config.model.test.TestUtil.joinLines;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author andreer
+ */
+public class TlsSecretsValidatorTest {
+ @Rule
+ public final ExpectedException exceptionRule = ExpectedException.none();
+
+ private static String servicesXml() {
+ return joinLines("<services version='1.0'>",
+ " <container id='default' version='1.0'>",
+ " </container>",
+ "</services>");
+ }
+
+ private static String deploymentXml() {
+ return joinLines("<deployment version='1.0' >",
+ " <prod />",
+ "</deployment>");
+ }
+
+ @Test
+ public void missing_certificate_fails_validation() throws Exception {
+ DeployState deployState = deployState(servicesXml(), deploymentXml(), Optional.of(TlsSecrets.MISSING));
+ VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
+
+ exceptionRule.expect(CertificateNotReadyException.class);
+ exceptionRule.expectMessage("TLS enabled, but could not retrieve certificate yet");
+
+ new TlsSecretsValidator().validate(model, deployState);
+ }
+
+ @Test
+ public void validation_succeeds_with_certificate() throws Exception {
+ DeployState deployState = deployState(servicesXml(), deploymentXml(), Optional.of(new TlsSecrets("cert", "key")));
+ VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
+
+ new TlsSecretsValidator().validate(model, deployState);
+ }
+
+ @Test
+ public void validation_succeeds_without_certificate() throws Exception {
+ DeployState deployState = deployState(servicesXml(), deploymentXml(), Optional.empty());
+ VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
+
+ new TlsSecretsValidator().validate(model, deployState);
+ }
+
+ private static DeployState deployState(String servicesXml, String deploymentXml, Optional<TlsSecrets> tlsSecrets) {
+ ApplicationPackage app = new MockApplicationPackage.Builder()
+ .withServices(servicesXml)
+ .withDeploymentSpec(deploymentXml)
+ .build();
+ DeployState.Builder builder = new DeployState.Builder()
+ .applicationPackage(app)
+ .zone(new Zone(Environment.prod, RegionName.from("foo")))
+ .properties(
+ new TestProperties()
+ .setHostedVespa(true)
+ .setTlsSecrets(tlsSecrets));
+ final DeployState deployState = builder.build();
+
+ assertTrue("Test must emulate a hosted deployment.", deployState.isHosted());
+ return deployState;
+ }
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
index e97fbfc65a5..b39e503f837 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
@@ -2,21 +2,17 @@
package com.yahoo.vespa.model.builder.xml.dom;
import com.yahoo.collections.CollectionUtil;
-import com.yahoo.config.ConfigInstance;
import com.yahoo.vespa.config.search.core.ProtonConfig;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
-import com.yahoo.text.StringUtilities;
import com.yahoo.vespa.model.search.Tuning;
import org.junit.Test;
import org.w3c.dom.Element;
import java.util.Arrays;
-import static org.hamcrest.Matchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
@@ -43,10 +39,10 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
return b.build(root.getDeployState(), root, xml);
}
- String getProtonCfg(Tuning tuning) {
+ private ProtonConfig getProtonCfg(Tuning tuning) {
ProtonConfig.Builder pb = new ProtonConfig.Builder();
tuning.getConfig(pb);
- return StringUtilities.implode(ConfigInstance.serialize(new ProtonConfig(pb)).toArray(new String[0]), "\n");
+ return new ProtonConfig(pb);
}
@Test
@@ -85,10 +81,10 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"</requestthreads>"));
assertEquals(123, t.searchNode.threads.numSearchThreads.longValue());
assertEquals(456, t.searchNode.threads.numSummaryThreads.longValue());
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("numsearcherthreads 123"));
- assertThat(cfg, containsString("numthreadspersearch 34"));
- assertThat(cfg, containsString("numsummarythreads 456"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.numsearcherthreads(), 123);
+ assertEquals(cfg.numthreadspersearch(), 34);
+ assertEquals(cfg.numsummarythreads(), 456);
}
@Test
@@ -112,22 +108,22 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"</conservative>",
"</native>","</flushstrategy>"));
assertEquals(900, t.searchNode.strategy.totalMaxMemoryGain.longValue());
- assertEquals(8.7, t.searchNode.strategy.totalDiskBloatFactor.doubleValue(), DELTA);
+ assertEquals(8.7, t.searchNode.strategy.totalDiskBloatFactor, DELTA);
assertEquals(600, t.searchNode.strategy.componentMaxMemoryGain.longValue());
- assertEquals(5.4, t.searchNode.strategy.componentDiskBloatFactor.doubleValue(), DELTA);
- assertEquals(300, t.searchNode.strategy.componentMaxage.doubleValue(), DELTA);
+ assertEquals(5.4, t.searchNode.strategy.componentDiskBloatFactor, DELTA);
+ assertEquals(300, t.searchNode.strategy.componentMaxage, DELTA);
assertEquals(1024, t.searchNode.strategy.transactionLogMaxSize.longValue());
- assertEquals(0.6, t.searchNode.strategy.conservativeMemoryLimitFactor.doubleValue(), DELTA);
- assertEquals(0.7, t.searchNode.strategy.conservativeDiskLimitFactor.doubleValue(), DELTA);
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("flush.memory.maxmemory 900"));
- assertThat(cfg, containsString("flush.memory.diskbloatfactor 8.7"));
- assertThat(cfg, containsString("flush.memory.each.maxmemory 600"));
- assertThat(cfg, containsString("flush.memory.each.diskbloatfactor 5.4"));
- assertThat(cfg, containsString("flush.memory.maxage.time 300"));
- assertThat(cfg, containsString("flush.memory.maxtlssize 1024"));
- assertThat(cfg, containsString("flush.memory.conservative.memorylimitfactor 0.6"));
- assertThat(cfg, containsString("flush.memory.conservative.disklimitfactor 0.7"));
+ assertEquals(0.6, t.searchNode.strategy.conservativeMemoryLimitFactor, DELTA);
+ assertEquals(0.7, t.searchNode.strategy.conservativeDiskLimitFactor, DELTA);
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.flush().memory().maxmemory(), 900);
+ assertEquals(cfg.flush().memory().diskbloatfactor(), 8.7, DELTA);
+ assertEquals(cfg.flush().memory().each().maxmemory(), 600);
+ assertEquals(cfg.flush().memory().each().diskbloatfactor(), 5.4, DELTA);
+ assertEquals(cfg.flush().memory().maxage().time(), 300, DELTA);
+ assertEquals(cfg.flush().memory().maxtlssize(), 1024);
+ assertEquals(cfg.flush().memory().conservative().memorylimitfactor(), 0.6, DELTA);
+ assertEquals(cfg.flush().memory().conservative().disklimitfactor(), 0.7, DELTA);
}
@Test
@@ -157,14 +153,30 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
assertEquals(Tuning.SearchNode.IoType.MMAP, t.searchNode.index.io.search);
assertEquals(178, t.searchNode.index.warmup.time, DELTA);
assertTrue(t.searchNode.index.warmup.unpack);
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("indexing.write.io DIRECTIO"));
- assertThat(cfg, containsString("indexing.read.io NORMAL"));
- assertThat(cfg, containsString("index.warmup.time 178"));
- assertThat(cfg, containsString("index.warmup.unpack true"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.indexing().write().io(), ProtonConfig.Indexing.Write.Io.DIRECTIO);
+ assertEquals(cfg.indexing().read().io(), ProtonConfig.Indexing.Read.Io.NORMAL);
+ assertEquals(cfg.index().warmup().time(), 178, DELTA);
+ assertTrue(cfg.index().warmup().unpack());
}
@Test
+ public void requireThatWeCanPopulateIndex() {
+ Tuning t = createTuning(parseXml("<index>", "<io>",
+ "<search>populate</search>",
+ "</io>",
+ "</index>"));
+ assertEquals(Tuning.SearchNode.IoType.POPULATE, t.searchNode.index.io.search);
+
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.indexing().write().io(), ProtonConfig.Indexing.Write.Io.DIRECTIO);
+ assertEquals(cfg.indexing().read().io(), ProtonConfig.Indexing.Read.Io.DIRECTIO);
+ assertEquals(cfg.search().mmap().options().size(), 1);
+ assertEquals(cfg.search().mmap().options().get(0), ProtonConfig.Search.Mmap.Options.POPULATE);
+ }
+
+
+ @Test
public void requireThatWeCanParseRemovedDBTag() {
Tuning t = createTuning(parseXml("<removed-db>", "<prune>",
"<age>19388</age>",
@@ -172,9 +184,9 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"</prune>", "</removed-db>"));
assertEquals(19388, t.searchNode.removedDB.prune.age, DELTA);
assertEquals(193, t.searchNode.removedDB.prune.interval, DELTA);
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("pruneremoveddocumentsinterval 193"));
- assertThat(cfg, containsString("pruneremoveddocumentsage 19388"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.pruneremoveddocumentsinterval(), 193, DELTA);
+ assertEquals(cfg.pruneremoveddocumentsage(), 19388, DELTA);
}
@Test
@@ -183,8 +195,8 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"<write>directio</write>",
"</io>", "</attribute>"));
assertEquals(Tuning.SearchNode.IoType.DIRECTIO, t.searchNode.attribute.io.write);
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("attribute.write.io DIRECTIO"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.attribute().write().io(), ProtonConfig.Attribute.Write.Io.DIRECTIO);
}
@Test
@@ -220,7 +232,7 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
assertEquals(Tuning.SearchNode.IoType.DIRECTIO, t.searchNode.summary.io.write);
assertEquals(Tuning.SearchNode.IoType.DIRECTIO, t.searchNode.summary.io.read);
assertEquals(128, t.searchNode.summary.store.cache.maxSize.longValue());
- assertEquals(30.7, t.searchNode.summary.store.cache.maxSizePercent.doubleValue(), DELTA);
+ assertEquals(30.7, t.searchNode.summary.store.cache.maxSizePercent, DELTA);
assertEquals(Tuning.SearchNode.Summary.Store.Compression.Type.NONE,
t.searchNode.summary.store.cache.compression.type);
assertEquals(3, t.searchNode.summary.store.cache.compression.level.intValue());
@@ -230,18 +242,18 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
assertEquals(Tuning.SearchNode.Summary.Store.Compression.Type.LZ4,
t.searchNode.summary.store.logStore.chunk.compression.type);
assertEquals(5, t.searchNode.summary.store.logStore.chunk.compression.level.intValue());
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("summary.write.io DIRECTIO"));
- assertThat(cfg, containsString("summary.read.io DIRECTIO"));
- assertThat(cfg, containsString("summary.cache.maxbytes 128"));
- assertThat(cfg, containsString("summary.cache.initialentries 64"));
- assertThat(cfg, containsString("summary.cache.compression.type NONE"));
- assertThat(cfg, containsString("summary.cache.compression.level 3"));
- assertThat(cfg, containsString("summary.log.maxfilesize 512"));
- assertThat(cfg, containsString("summary.log.minfilesizefactor 0.3"));
- assertThat(cfg, containsString("summary.log.chunk.maxbytes 256"));
- assertThat(cfg, containsString("summary.log.chunk.compression.type LZ4"));
- assertThat(cfg, containsString("summary.log.chunk.compression.level 5"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.summary().write().io(), ProtonConfig.Summary.Write.Io.DIRECTIO);
+ assertEquals(cfg.summary().read().io(), ProtonConfig.Summary.Read.Io.DIRECTIO);
+ assertEquals(cfg.summary().cache().maxbytes(), 128);
+ assertEquals(cfg.summary().cache().initialentries(), 64);
+ assertEquals(cfg.summary().cache().compression().type(), ProtonConfig.Summary.Cache.Compression.Type.NONE);
+ assertEquals(cfg.summary().cache().compression().level(), 3);
+ assertEquals(cfg.summary().log().maxfilesize(), 512);
+ assertEquals(cfg.summary().log().minfilesizefactor(), 0.3, DELTA);
+ assertEquals(cfg.summary().log().chunk().maxbytes(), 256);
+ assertEquals(cfg.summary().log().chunk().compression().type(), ProtonConfig.Summary.Log.Chunk.Compression.Type.LZ4);
+ assertEquals(cfg.summary().log().chunk().compression().level(), 5);
}
@Test
@@ -255,10 +267,25 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"</summary>"));
assertNull(t.searchNode.summary.store.cache.maxSize);
- assertEquals(30.7, t.searchNode.summary.store.cache.maxSizePercent.doubleValue(),DELTA);
+ assertEquals(30.7, t.searchNode.summary.store.cache.maxSizePercent,DELTA);
+
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.summary().cache().maxbytes(), -30);
+ }
+
+ @Test
+ public void requireThatWeCanPopulateSummary() {
+ Tuning t = createTuning(parseXml("<summary>",
+ "<io>",
+ "<read>populate</read>",
+ "</io>",
+ "</summary>"));
+
+ assertEquals(Tuning.SearchNode.IoType.POPULATE, t.searchNode.summary.io.read);
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("summary.cache.maxbytes -30"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(ProtonConfig.Summary.Read.Io.MMAP, cfg.summary().read().io());
+ assertEquals(ProtonConfig.Summary.Read.Mmap.Options.POPULATE, cfg.summary().read().mmap().options().get(0));
}
@@ -268,8 +295,8 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"<threads>7</threads>",
"</initialize>"));
assertEquals(7, t.searchNode.initialize.threads.intValue());
- String cfg = getProtonCfg(t);
- assertThat(cfg, containsString("initialize.threads 7"));
+ ProtonConfig cfg = getProtonCfg(t);
+ assertEquals(cfg.initialize().threads(), 7);
}
@Test
@@ -277,8 +304,8 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
Tuning t = createTuning(parseXml("<feeding>",
"<concurrency>0.7</concurrency>",
"</feeding>"));
- assertEquals(0.7, t.searchNode.feeding.concurrency.doubleValue(), DELTA);
- assertThat(getProtonCfg(t), containsString("feeding.concurrency 0.35"));
+ assertEquals(0.7, t.searchNode.feeding.concurrency, DELTA);
+ assertEquals(getProtonCfg(t).feeding().concurrency(), 0.35, DELTA);
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
index ba7fbef439c..ac85a958ed5 100755
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/ContainerClusterTest.java
@@ -5,6 +5,7 @@ import com.yahoo.cloud.config.ClusterInfoConfig;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.cloud.config.RoutingProviderConfig;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockRoot;
@@ -13,6 +14,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.handler.ThreadpoolConfig;
+import com.yahoo.jdisc.http.ConnectorConfig;
import com.yahoo.search.config.QrStartConfig;
import com.yahoo.vespa.model.Host;
import com.yahoo.vespa.model.HostResource;
@@ -20,15 +22,22 @@ import com.yahoo.vespa.model.admin.clustercontroller.ClusterControllerContainer;
import com.yahoo.vespa.model.admin.clustercontroller.ClusterControllerContainerCluster;
import com.yahoo.vespa.model.container.component.Component;
import com.yahoo.vespa.model.container.docproc.ContainerDocproc;
+import com.yahoo.vespa.model.container.http.ConnectorFactory;
import com.yahoo.vespa.model.container.search.ContainerSearch;
import com.yahoo.vespa.model.container.search.searchchain.SearchChains;
+import org.hamcrest.Matchers;
import org.junit.Test;
import java.util.Collection;
import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* @author Simon Thoresen Hult
@@ -210,9 +219,40 @@ public class ContainerClusterTest {
assertEquals(0, cluster.getAllComponents().stream().map(c -> c.getClassId().getName()).filter(c -> c.equals("com.yahoo.jdisc.http.filter.security.RoutingConfigProvider")).count());
}
+ @Test
+ public void requireThatProvidingTlsSecretOpensPort4443() {
+ DeployState state = new DeployState.Builder().properties(new TestProperties().setHostedVespa(true).setTlsSecrets(Optional.of(new TlsSecrets("CERT", "KEY")))).build();
+ MockRoot root = new MockRoot("foo", state);
+ ApplicationContainerCluster cluster = new ApplicationContainerCluster(root, "container0", "container1", state);
+
+ addContainer(state.getDeployLogger(), cluster, "c1", "host-c1");
+ Optional<ApplicationContainer> container = cluster.getContainers().stream().findFirst();
+ assertTrue(container.isPresent());
+
+ var httpServer = (container.get().getHttp() == null) ? container.get().getDefaultHttpServer() : container.get().getHttp().getHttpServer();
+
+ // Verify that there are two connectors
+ List<ConnectorFactory> connectorFactories = httpServer.getConnectorFactories();
+ assertEquals(2, connectorFactories.size());
+ List<Integer> ports = connectorFactories.stream()
+ .map(ConnectorFactory::getListenPort)
+ .collect(Collectors.toList());
+ assertThat(ports, Matchers.containsInAnyOrder(8080, 4443));
+
+ ConnectorFactory tlsPort = connectorFactories.stream().filter(connectorFactory -> connectorFactory.getListenPort() == 4443).findFirst().orElseThrow();
+
+ ConnectorConfig.Builder builder = new ConnectorConfig.Builder();
+ tlsPort.getConfig(builder);
+
+ ConnectorConfig connectorConfig = new ConnectorConfig(builder);
+ assertTrue(connectorConfig.ssl().enabled());
+ assertEquals("CERT", connectorConfig.ssl().certificate());
+ assertEquals("KEY", connectorConfig.ssl().privateKey());
+ assertEquals(4443, connectorConfig.listenPort());
+ }
private static void addContainer(DeployLogger deployLogger, ApplicationContainerCluster cluster, String name, String hostName) {
- ApplicationContainer container = new ApplicationContainer(cluster, name, 0, cluster.isHostedVespa());
+ ApplicationContainer container = new ApplicationContainer(cluster, name, 0, cluster.isHostedVespa(), cluster.getTlsSecrets());
container.setHostResource(new HostResource(new Host(null, hostName)));
container.initService(deployLogger);
cluster.addContainer(container);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java
index c7816c23119..f787453dfb6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java
@@ -6,6 +6,7 @@ import com.yahoo.component.ComponentId;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
@@ -33,6 +34,7 @@ import com.yahoo.vespa.model.AbstractService;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.Container;
import com.yahoo.vespa.model.container.ContainerCluster;
+import com.yahoo.vespa.model.container.ContainerModel;
import com.yahoo.vespa.model.container.SecretStore;
import com.yahoo.vespa.model.container.component.Component;
import com.yahoo.vespa.model.content.utils.ContentClusterUtils;
@@ -45,8 +47,11 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.logging.Level;
+import java.util.stream.Collectors;
+import static com.yahoo.config.model.test.TestUtil.joinLines;
import static com.yahoo.test.LinePatternMatcher.containsLineWithPattern;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
import static org.hamcrest.CoreMatchers.is;
@@ -611,6 +616,48 @@ public class ContainerModelBuilderTest extends ContainerModelBuilderTestBase {
}
@Test
+ public void endpoints_are_added_to_containers() throws IOException, SAXException {
+ final var servicesXml = joinLines("",
+ "<container id='comics-search' version='1.0'>",
+ " <nodes>",
+ " <node hostalias='host1' />",
+ " </nodes>",
+ "</container>"
+ );
+
+ final var deploymentXml = joinLines("",
+ "<deployment version='1.0'>",
+ " <prod />",
+ "</deployment>"
+ );
+
+ final var applicationPackage = new MockApplicationPackage.Builder()
+ .withServices(servicesXml)
+ .withDeploymentSpec(deploymentXml)
+ .build();
+
+ final var deployState = new DeployState.Builder()
+ .applicationPackage(applicationPackage)
+ .zone(new Zone(Environment.prod, RegionName.from("us-east-1")))
+ .endpoints(Set.of(new ContainerEndpoint("comics-search", List.of("nalle", "balle"))))
+ .properties(new TestProperties().setHostedVespa(true))
+ .build();
+
+ final var model = new VespaModel(new NullConfigModelRegistry(), deployState);
+ final var containers = model.getContainerClusters().values().stream()
+ .flatMap(cluster -> cluster.getContainers().stream())
+ .collect(Collectors.toList());
+
+ assertFalse("Missing container objects based on configuration", containers.isEmpty());
+
+ containers.forEach(container -> {
+ final var rotations = container.getServicePropertyString("rotations").split(",");
+ final var rotationsSet = Set.of(rotations);
+ assertEquals(Set.of("balle", "nalle"), rotationsSet);
+ });
+ }
+
+ @Test
public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException {
String servicesXml = "<container id='default' version='1.0' />";
ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
index 03e115f0608..880cccf02e4 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JettyContainerModelBuilderTest.java
@@ -1,16 +1,19 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.xml;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.container.ComponentsConfig;
import com.yahoo.container.jdisc.FilterBindingsProvider;
import com.yahoo.jdisc.http.ConnectorConfig;
-import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.container.ApplicationContainerCluster;
+import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.container.component.SimpleComponent;
import com.yahoo.vespa.model.container.http.ConnectorFactory;
import com.yahoo.vespa.model.container.http.JettyHttpServer;
-import com.yahoo.vespa.model.container.http.ssl.ConfiguredSslProvider;
+import com.yahoo.vespa.model.container.http.ssl.ConfiguredFilebasedSslProvider;
import org.junit.Test;
import org.w3c.dom.Element;
@@ -21,6 +24,7 @@ import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.nullValue;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
@@ -174,7 +178,7 @@ public class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBas
ContainerCluster cluster = (ContainerCluster) root.getChildren().get("default");
List<ConnectorFactory> connectorFactories = cluster.getChildrenByTypeRecursive(ConnectorFactory.class);
- connectorFactories.forEach(connectorFactory -> assertChildComponentExists(connectorFactory, ConfiguredSslProvider.COMPONENT_CLASS));
+ connectorFactories.forEach(connectorFactory -> assertChildComponentExists(connectorFactory, ConfiguredFilebasedSslProvider.COMPONENT_CLASS));
}
@Test
@@ -222,6 +226,37 @@ public class JettyContainerModelBuilderTest extends ContainerModelBuilderTestBas
assertTrue(sslProvider.ssl().enabled());
}
+ @Test
+ public void verify_that_container_setup_additional_tls4443(){
+ Element clusterElem = DomBuilderTest.parse(
+ "<jdisc id='default' version='1.0' jetty='true'>",
+ " <http>",
+ " <server port='9000' id='ssl'>",
+ " <ssl>",
+ " <private-key-file>/foo/key</private-key-file>",
+ " <certificate-file>/foo/cert</certificate-file>",
+ " </ssl>",
+ " </server>",
+ " </http>",
+ nodesXml,
+ "",
+ "</jdisc>");
+
+ DeployState deployState = new DeployState.Builder().properties(new TestProperties().setHostedVespa(true).setTlsSecrets(Optional.of(new TlsSecrets("CERT", "KEY")))).build();
+ createModel(root, deployState, null, clusterElem);
+ ConnectorConfig sslProvider = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/ssl");
+ assertTrue(sslProvider.ssl().enabled());
+ assertEquals("", sslProvider.ssl().certificate());
+ assertEquals("", sslProvider.ssl().privateKey());
+
+ ConnectorConfig providedTls = root.getConfig(ConnectorConfig.class, "default/http/jdisc-jetty/tls4443");
+ assertTrue(providedTls.ssl().enabled());
+ assertEquals("CERT", providedTls.ssl().certificate());
+ assertEquals("KEY", providedTls.ssl().privateKey());
+ assertEquals(4443, providedTls.listenPort());
+
+ }
+
private static void assertChildComponentExists(ConnectorFactory connectorFactory, String className) {
Optional<SimpleComponent> simpleComponent = connectorFactory.getChildren().values().stream()
.map(z -> (SimpleComponent) z)
diff --git a/config-provisioning/abi-spec.json b/config-provisioning/abi-spec.json
index cf3f2d35bd7..a9b45fe6bb4 100644
--- a/config-provisioning/abi-spec.json
+++ b/config-provisioning/abi-spec.json
@@ -149,6 +149,17 @@
],
"fields": []
},
+ "com.yahoo.config.provision.CertificateNotReadyException": {
+ "superClass": "com.yahoo.config.provision.TransientException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)"
+ ],
+ "fields": []
+ },
"com.yahoo.config.provision.CloudName": {
"superClass": "java.lang.Object",
"interfaces": [
@@ -263,9 +274,7 @@
"public com.yahoo.config.provision.ClusterSpec with(java.util.Optional)",
"public com.yahoo.config.provision.ClusterSpec exclusive(boolean)",
"public static com.yahoo.config.provision.ClusterSpec request(com.yahoo.config.provision.ClusterSpec$Type, com.yahoo.config.provision.ClusterSpec$Id, com.yahoo.component.Version, boolean)",
- "public static com.yahoo.config.provision.ClusterSpec request(com.yahoo.config.provision.ClusterSpec$Type, com.yahoo.config.provision.ClusterSpec$Id, com.yahoo.component.Version, boolean, java.util.Set)",
"public static com.yahoo.config.provision.ClusterSpec from(com.yahoo.config.provision.ClusterSpec$Type, com.yahoo.config.provision.ClusterSpec$Id, com.yahoo.config.provision.ClusterSpec$Group, com.yahoo.component.Version, boolean)",
- "public static com.yahoo.config.provision.ClusterSpec from(com.yahoo.config.provision.ClusterSpec$Type, com.yahoo.config.provision.ClusterSpec$Id, com.yahoo.config.provision.ClusterSpec$Group, com.yahoo.component.Version, boolean, java.util.Set)",
"public java.lang.String toString()",
"public int hashCode()",
"public boolean equals(java.lang.Object)",
@@ -669,7 +678,7 @@
"fields": []
},
"com.yahoo.config.provision.ParentHostUnavailableException": {
- "superClass": "java.lang.RuntimeException",
+ "superClass": "com.yahoo.config.provision.TransientException",
"interfaces": [],
"attributes": [
"public"
@@ -809,6 +818,19 @@
],
"fields": []
},
+ "com.yahoo.config.provision.TransientException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
"com.yahoo.config.provision.Zone": {
"superClass": "java.lang.Object",
"interfaces": [],
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/CertificateNotReadyException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/CertificateNotReadyException.java
new file mode 100644
index 00000000000..0d88a7aa435
--- /dev/null
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/CertificateNotReadyException.java
@@ -0,0 +1,17 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision;
+
+/**
+ * Exception thrown when trying to validate an application which is configured
+ * with a certificate that is not yet retrievable
+ *
+ * @author andreer
+ *
+ */
+public class CertificateNotReadyException extends TransientException {
+
+ public CertificateNotReadyException(String message) {
+ super(message);
+ }
+
+}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
index 8ed56b98705..496874e978b 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
@@ -5,7 +5,6 @@ import com.yahoo.component.Version;
import java.util.Objects;
import java.util.Optional;
-import java.util.Set;
/**
* A specification of a cluster - or group in a grouped cluster - to be run on a set of hosts.
@@ -62,20 +61,10 @@ public final class ClusterSpec {
return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive);
}
- // TODO: Remove after June 2019
- public static ClusterSpec request(Type type, Id id, Version vespaVersion, boolean exclusive, Set<RotationName> rotations) {
- return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive);
- }
-
public static ClusterSpec from(Type type, Id id, Group groupId, Version vespaVersion, boolean exclusive) {
return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive);
}
- // TODO: Remove after June 2019
- public static ClusterSpec from(Type type, Id id, Group groupId, Version vespaVersion, boolean exclusive, Set<RotationName> rotations) {
- return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive);
- }
-
@Override
public String toString() {
return type + " " + id + " " + groupId.map(group -> group + " ").orElse("") + vespaVersion;
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ParentHostUnavailableException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ParentHostUnavailableException.java
index 357897df898..f90b67c1525 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ParentHostUnavailableException.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ParentHostUnavailableException.java
@@ -9,7 +9,7 @@ package com.yahoo.config.provision;
* @author freva
*
*/
-public class ParentHostUnavailableException extends RuntimeException {
+public class ParentHostUnavailableException extends TransientException {
public ParentHostUnavailableException(String message) {
super(message);
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/TransientException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/TransientException.java
new file mode 100644
index 00000000000..bae7407587e
--- /dev/null
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/TransientException.java
@@ -0,0 +1,20 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision;
+
+/**
+ * A provisioning exception that is considered transient. Exceptions that we expect to recover from after a short
+ * duration should extend this. Throwing a subclass of this indicates that the operation can safely be retried.
+ *
+ * @author mpolden
+ */
+public abstract class TransientException extends RuntimeException {
+
+ public TransientException(String message) {
+ super(message);
+ }
+
+ public TransientException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneList.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneList.java
index 5f3f2e10898..776f925c424 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneList.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneList.java
@@ -1,10 +1,13 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision.zone;
+import com.google.common.collect.ImmutableList;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
import java.util.List;
+import java.util.stream.Collectors;
/**
* Provides filters for and access to a list of ZoneIds.
@@ -32,7 +35,9 @@ public interface ZoneList extends ZoneFilter {
/** Returns the ZoneApi of all zones in this list. */
List<? extends ZoneApi> zones();
- /** Returns the id of all zones in this list as — you guessed it — a list. */
- List<ZoneId> ids();
+ /** Returns the ZoneIds of all zones in this list. */
+ default List<ZoneId> ids() {
+ return zones().stream().map(ZoneApi::getId).collect(Collectors.toList());
+ }
}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
index 5eee55a1886..3a36afcfdce 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
@@ -5,8 +5,6 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import org.junit.Test;
-import java.util.Collections;
-
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -18,7 +16,7 @@ public class ClusterMembershipTest {
@Test
public void testContainerServiceInstance() {
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false);
assertContainerService(ClusterMembership.from(cluster, 3));
}
@@ -35,38 +33,18 @@ public class ClusterMembershipTest {
assertFalse(instance.retired());
assertTrue(instance.cluster().isExclusive());
}
-
- // TODO: Remove after June 2019. This ensures stale rotation data is handled
- {
- ClusterMembership instance = ClusterMembership.from("container/id1/4/37/rotation1,rotation2", Vtag.currentVersion);
- assertFalse(instance.retired());
- assertFalse(instance.cluster().isExclusive());
- }
-
- {
- ClusterMembership instance = ClusterMembership.from("container/id1/4/37/exclusive/rotation1,rotation2", Vtag.currentVersion);
- assertFalse(instance.retired());
- assertTrue(instance.cluster().isExclusive());
- }
-
- {
- ClusterMembership instance = ClusterMembership.from("container/id1/4/37/exclusive/retired/rotation1,rotation2", Vtag.currentVersion);
- assertTrue(instance.retired());
- assertTrue(instance.cluster().isExclusive());
- }
- // end TODO
}
@Test
public void testServiceInstance() {
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false);
assertContentService(ClusterMembership.from(cluster, 37));
}
@Test
public void testServiceInstanceWithGroup() {
ClusterSpec cluster = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"),
- ClusterSpec.Group.from(4), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec.Group.from(4), Version.fromString("6.42"), false);
assertContentServiceWithGroup(ClusterMembership.from(cluster, 37));
}
@@ -77,14 +55,14 @@ public class ClusterMembershipTest {
@Test
public void testServiceInstanceWithRetire() {
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"), Version.fromString("6.42"), false);
assertContentServiceWithRetire(ClusterMembership.retiredFrom(cluster, 37));
}
@Test
public void testServiceInstanceWithGroupAndRetire() {
ClusterSpec cluster = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("id1"),
- ClusterSpec.Group.from(4), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec.Group.from(4), Version.fromString("6.42"), false);
assertContentServiceWithGroupAndRetire(ClusterMembership.retiredFrom(cluster, 37));
}
diff --git a/configdefinitions/src/vespa/indexschema.def b/configdefinitions/src/vespa/indexschema.def
index c0c9f175837..245570ef7b5 100644
--- a/configdefinitions/src/vespa/indexschema.def
+++ b/configdefinitions/src/vespa/indexschema.def
@@ -16,8 +16,8 @@ indexfield[].phrases bool default=false
indexfield[].positions bool default=true
## Average element length
indexfield[].averageelementlen int default=512
-## Whether we should use a new experimental posting list format for this field.
-indexfield[].experimentalpostinglistformat bool default=false
+## Whether the index field should use posting lists with interleaved features or not.
+indexfield[].interleavedfeatures bool default=false
## The name of the field collection (aka logical view).
fieldset[].name string
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index cd0519ff115..d490b1b49e9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -7,11 +7,13 @@ import com.yahoo.component.AbstractComponent;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Deployment;
+import com.yahoo.config.provision.TransientException;
import com.yahoo.container.handler.VipStatus;
import com.yahoo.container.jdisc.state.StateMonitor;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.config.server.rpc.RpcServer;
import com.yahoo.vespa.config.server.version.VersionState;
+import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.time.Instant;
@@ -241,10 +243,13 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
}
for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) {
+ ApplicationId app = f.getKey();
try {
f.getValue().get();
+ } catch (TransientException e) {
+ log.log(LogLevel.INFO, "Redeploying " + app +
+ " failed with transient error, will retry after bootstrap: " + Exceptions.toMessageString(e));
} catch (ExecutionException e) {
- ApplicationId app = f.getKey();
log.log(LogLevel.WARNING, "Redeploying " + app + " failed, will retry", e);
failedDeployments.add(app);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/GlobalComponentRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/GlobalComponentRegistry.java
index d420c3f21fe..1eb18773898 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/GlobalComponentRegistry.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/GlobalComponentRegistry.java
@@ -7,6 +7,7 @@ import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
import com.yahoo.vespa.config.server.host.HostRegistries;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
@@ -46,4 +47,5 @@ public interface GlobalComponentRegistry {
StripedExecutor<TenantName> getZkWatcherExecutor();
FlagSource getFlagSource();
ExecutorService getZkCacheExecutor();
+ SecretStore getSecretStore();
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistry.java
index ff76afd1c98..9badd19009f 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistry.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistry.java
@@ -9,6 +9,7 @@ import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
import com.yahoo.vespa.config.server.host.HostRegistries;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
@@ -48,6 +49,7 @@ public class InjectedGlobalComponentRegistry implements GlobalComponentRegistry
private final Zone zone;
private final ConfigServerDB configServerDB;
private final FlagSource flagSource;
+ private final SecretStore secretStore;
private final StripedExecutor<TenantName> zkWatcherExecutor;
private final ExecutorService zkCacheExecutor;
@@ -67,7 +69,8 @@ public class InjectedGlobalComponentRegistry implements GlobalComponentRegistry
HostProvisionerProvider hostProvisionerProvider,
Zone zone,
ConfigServerDB configServerDB,
- FlagSource flagSource) {
+ FlagSource flagSource,
+ SecretStore secretStore) {
this.curator = curator;
this.configCurator = configCurator;
this.metrics = metrics;
@@ -82,6 +85,7 @@ public class InjectedGlobalComponentRegistry implements GlobalComponentRegistry
this.zone = zone;
this.configServerDB = configServerDB;
this.flagSource = flagSource;
+ this.secretStore = secretStore;
this.zkWatcherExecutor = new StripedExecutor<>();
this.zkCacheExecutor = Executors.newFixedThreadPool(1, ThreadFactoryFactory.getThreadFactory(TenantRepository.class.getName()));
}
@@ -137,4 +141,9 @@ public class InjectedGlobalComponentRegistry implements GlobalComponentRegistry
public ExecutorService getZkCacheExecutor() {
return zkCacheExecutor;
}
+
+ @Override
+ public SecretStore getSecretStore() {
+ return secretStore;
+ }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index fc6667087c6..d875385d14d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -7,9 +7,11 @@ import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.ConfigServerSpec;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.model.api.ModelContext;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.Rotation;
@@ -126,12 +128,14 @@ public class ModelContextImpl implements ModelContext {
private final boolean hostedVespa;
private final Zone zone;
private final Set<Rotation> rotations;
+ private final Set<ContainerEndpoint> endpoints;
private final boolean isBootstrap;
private final boolean isFirstTimeDeployment;
private final boolean useDedicatedNodeForLogserver;
private final boolean useFdispatchByDefault;
private final boolean useAdaptiveDispatch;
private final boolean dispatchWithProtobuf;
+ private final Optional<TlsSecrets> tlsSecrets;
public Properties(ApplicationId applicationId,
boolean multitenantFromConfig,
@@ -142,9 +146,11 @@ public class ModelContextImpl implements ModelContext {
boolean hostedVespa,
Zone zone,
Set<Rotation> rotations,
+ Set<ContainerEndpoint> endpoints,
boolean isBootstrap,
boolean isFirstTimeDeployment,
- FlagSource flagSource) {
+ FlagSource flagSource,
+ Optional<TlsSecrets> tlsSecrets) {
this.applicationId = applicationId;
this.multitenant = multitenantFromConfig || hostedVespa || Boolean.getBoolean("multitenant");
this.configServerSpecs = configServerSpecs;
@@ -154,6 +160,7 @@ public class ModelContextImpl implements ModelContext {
this.hostedVespa = hostedVespa;
this.zone = zone;
this.rotations = rotations;
+ this.endpoints = endpoints;
this.isBootstrap = isBootstrap;
this.isFirstTimeDeployment = isFirstTimeDeployment;
this.useDedicatedNodeForLogserver = Flags.USE_DEDICATED_NODE_FOR_LOGSERVER.bindTo(flagSource)
@@ -164,6 +171,7 @@ public class ModelContextImpl implements ModelContext {
.with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
this.useAdaptiveDispatch = Flags.USE_ADAPTIVE_DISPATCH.bindTo(flagSource)
.with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
+ this.tlsSecrets = tlsSecrets;
}
@Override
@@ -198,6 +206,9 @@ public class ModelContextImpl implements ModelContext {
public Set<Rotation> rotations() { return rotations; }
@Override
+ public Set<ContainerEndpoint> endpoints() { return endpoints; }
+
+ @Override
public boolean isBootstrap() { return isBootstrap; }
@Override
@@ -215,6 +226,8 @@ public class ModelContextImpl implements ModelContext {
@Override
public boolean useAdaptiveDispatch() { return useAdaptiveDispatch; }
+ @Override
+ public Optional<TlsSecrets> tlsSecrets() { return tlsSecrets; }
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
index a29891ae764..3d2ecd4a2ca 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpErrorResponse.java
@@ -48,7 +48,8 @@ public class HttpErrorResponse extends HttpResponse {
OUT_OF_CAPACITY,
REQUEST_TIMEOUT,
UNKNOWN_VESPA_VERSION,
- PARENT_HOST_NOT_READY
+ PARENT_HOST_NOT_READY,
+ CERTIFICATE_NOT_READY
}
public static HttpErrorResponse notFoundError(String msg) {
@@ -95,6 +96,10 @@ public class HttpErrorResponse extends HttpResponse {
return new HttpErrorResponse(CONFLICT, errorCodes.PARENT_HOST_NOT_READY.name(), msg);
}
+ public static HttpErrorResponse certificateNotReady(String msg) {
+ return new HttpErrorResponse(CONFLICT, errorCodes.CERTIFICATE_NOT_READY.name(), msg);
+ }
+
@Override
public void render(OutputStream stream) throws IOException {
new JsonFormat(true).encode(stream, slime);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
index cd2052653ed..20ee77be9fe 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server.http;
import com.yahoo.config.provision.ApplicationLockException;
+import com.yahoo.config.provision.CertificateNotReadyException;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
@@ -64,6 +65,8 @@ public class HttpHandler extends LoggingRequestHandler {
return HttpErrorResponse.applicationLockFailure(getMessage(e, request));
} catch (ParentHostUnavailableException e) {
return HttpErrorResponse.parentHostNotReady(getMessage(e, request));
+ } catch (CertificateNotReadyException e) {
+ return HttpErrorResponse.certificateNotReady(getMessage(e, request));
} catch (Exception e) {
log.log(LogLevel.WARNING, "Unexpected exception handling a config server request", e);
return HttpErrorResponse.internalServerError(getMessage(e, request));
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
index 6351a93e6e6..94cd30de28b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.modelfactory;
+import com.google.common.collect.ImmutableSet;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
@@ -12,6 +13,7 @@ import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.GlobalComponentRegistry;
@@ -24,8 +26,10 @@ import com.yahoo.vespa.config.server.monitoring.Metrics;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
import com.yahoo.vespa.config.server.session.SessionZooKeeperClient;
import com.yahoo.vespa.config.server.session.SilentDeployLogger;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache;
import com.yahoo.vespa.config.server.tenant.Rotations;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
+import com.yahoo.vespa.config.server.tenant.TlsSecretsKeys;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
@@ -53,6 +57,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
private final Curator curator;
private final DeployLogger logger;
private final FlagSource flagSource;
+ private final SecretStore secretStore;
public ActivatedModelsBuilder(TenantName tenant,
long appGeneration,
@@ -71,6 +76,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
this.curator = globalComponentRegistry.getCurator();
this.logger = new SilentDeployLogger();
this.flagSource = globalComponentRegistry.getFlagSource();
+ this.secretStore = globalComponentRegistry.getSecretStore();
}
@Override
@@ -127,9 +133,11 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
configserverConfig.hostedVespa(),
zone(),
new Rotations(curator, TenantRepository.getTenantPath(tenant)).readRotationsFromZooKeeper(applicationId),
+ ImmutableSet.copyOf(new ContainerEndpointsCache(TenantRepository.getTenantPath(tenant), curator).read(applicationId)),
false, // We may be bootstrapping, but we only know and care during prepare
false, // Always false, assume no one uses it when activating
- flagSource);
+ flagSource,
+ new TlsSecretsKeys(curator, TenantRepository.getTenantPath(tenant), secretStore).readTlsSecretsKeyFromZookeeper(applicationId));
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 07c06f22497..34dcefe05bd 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -12,6 +12,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.lang.SettableOptional;
import com.yahoo.log.LogLevel;
@@ -111,7 +112,7 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
break;
buildLatestModelForThisMajor = false; // We have successfully built latest model version, do it only for this major
}
- catch (OutOfCapacityException | ApplicationLockException e) {
+ catch (OutOfCapacityException | ApplicationLockException | TransientException e) {
// Don't wrap this exception, and don't try to load other model versions as this is (most likely)
// caused by the state of the system, not the model version/application combination
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
index 4cabf39edcc..5bf70c55f9e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
@@ -10,7 +10,7 @@ import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.config.server.TimeoutBudget;
import com.yahoo.vespa.config.server.http.SessionHandler;
-import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer;
import java.time.Clock;
@@ -35,6 +35,7 @@ public final class PrepareParams {
static final String VESPA_VERSION_PARAM_NAME = "vespaVersion";
static final String ROTATIONS_PARAM_NAME = "rotations";
static final String CONTAINER_ENDPOINTS_PARAM_NAME = "containerEndpoints";
+ static final String TLS_SECRETS_KEY_NAME_PARAM_NAME = "tlsSecretsKeyName";
private final ApplicationId applicationId;
private final TimeoutBudget timeoutBudget;
@@ -45,10 +46,11 @@ public final class PrepareParams {
private final Optional<Version> vespaVersion;
private final Set<Rotation> rotations;
private final List<ContainerEndpoint> containerEndpoints;
+ private final Optional<String> tlsSecretsKeyName;
private PrepareParams(ApplicationId applicationId, TimeoutBudget timeoutBudget, boolean ignoreValidationErrors,
- boolean dryRun, boolean verbose, boolean isBootstrap, Optional<Version> vespaVersion,
- Set<Rotation> rotations, List<ContainerEndpoint> containerEndpoints) {
+ boolean dryRun, boolean verbose, boolean isBootstrap, Optional<Version> vespaVersion, Set<Rotation> rotations,
+ List<ContainerEndpoint> containerEndpoints, Optional<String> tlsSecretsKeyName) {
this.timeoutBudget = timeoutBudget;
this.applicationId = applicationId;
this.ignoreValidationErrors = ignoreValidationErrors;
@@ -61,6 +63,7 @@ public final class PrepareParams {
if ((rotations != null && !rotations.isEmpty()) && !containerEndpoints.isEmpty()) {
throw new IllegalArgumentException("Cannot set both rotations and containerEndpoints");
}
+ this.tlsSecretsKeyName = tlsSecretsKeyName;
}
public static class Builder {
@@ -74,6 +77,7 @@ public final class PrepareParams {
private Optional<Version> vespaVersion = Optional.empty();
private Set<Rotation> rotations;
private List<ContainerEndpoint> containerEndpoints = List.of();
+ private Optional<String> tlsSecretsKeyName = Optional.empty();
public Builder() { }
@@ -136,12 +140,18 @@ public final class PrepareParams {
if (serialized == null) return this;
Slime slime = SlimeUtils.jsonToSlime(serialized);
containerEndpoints = ContainerEndpointSerializer.endpointListFromSlime(slime);
+ return this;
+ }
+
+ public Builder tlsSecretsKeyName(String tlsSecretsKeyName) {
+ this.tlsSecretsKeyName = Optional.ofNullable(tlsSecretsKeyName)
+ .filter(s -> ! s.isEmpty());
return this;
}
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
- verbose, isBootstrap, vespaVersion, rotations, containerEndpoints);
+ verbose, isBootstrap, vespaVersion, rotations, containerEndpoints, tlsSecretsKeyName);
}
}
@@ -155,6 +165,7 @@ public final class PrepareParams {
.vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
.rotations(request.getProperty(ROTATIONS_PARAM_NAME))
.containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
+ .tlsSecretsKeyName(request.getProperty(TLS_SECRETS_KEY_NAME_PARAM_NAME))
.build();
}
@@ -212,4 +223,7 @@ public final class PrepareParams {
return timeoutBudget;
}
+ public Optional<String> tlsSecretsKeyName() {
+ return tlsSecretsKeyName;
+ }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index 7af61a6efc1..54c96c0461d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server.session;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
import com.google.inject.Inject;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.Version;
@@ -12,15 +13,16 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.ModelContext;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.Zone;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.lang.SettableOptional;
import com.yahoo.log.LogLevel;
import com.yahoo.path.Path;
-import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.application.ApplicationSet;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
@@ -31,9 +33,10 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.modelfactory.PreparedModelsBuilder;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
-import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache;
import com.yahoo.vespa.config.server.tenant.Rotations;
+import com.yahoo.vespa.config.server.tenant.TlsSecretsKeys;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
import org.xml.sax.SAXException;
@@ -43,6 +46,7 @@ import javax.xml.transform.TransformerException;
import java.io.IOException;
import java.net.URI;
import java.time.Instant;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -68,6 +72,7 @@ public class SessionPreparer {
private final Curator curator;
private final Zone zone;
private final FlagSource flagSource;
+ private final SecretStore secretStore;
@Inject
public SessionPreparer(ModelFactoryRegistry modelFactoryRegistry,
@@ -78,7 +83,8 @@ public class SessionPreparer {
ConfigDefinitionRepo configDefinitionRepo,
Curator curator,
Zone zone,
- FlagSource flagSource) {
+ FlagSource flagSource,
+ SecretStore secretStore) {
this.modelFactoryRegistry = modelFactoryRegistry;
this.fileDistributionFactory = fileDistributionFactory;
this.hostProvisionerProvider = hostProvisionerProvider;
@@ -88,6 +94,7 @@ public class SessionPreparer {
this.curator = curator;
this.zone = zone;
this.flagSource = flagSource;
+ this.secretStore = secretStore;
}
/**
@@ -111,6 +118,7 @@ public class SessionPreparer {
if ( ! params.isDryRun()) {
preparation.writeStateZK();
preparation.writeRotZK();
+ preparation.writeTlsZK();
var globalServiceId = context.getApplicationPackage().getDeployment()
.map(DeploymentSpec::fromXml)
.flatMap(DeploymentSpec::globalServiceId);
@@ -142,7 +150,10 @@ public class SessionPreparer {
final Rotations rotations; // TODO: Remove this once we have migrated fully to container endpoints
final ContainerEndpointsCache containerEndpoints;
final Set<Rotation> rotationsSet;
+ final Set<ContainerEndpoint> endpointsSet;
final ModelContext.Properties properties;
+ private final TlsSecretsKeys tlsSecretsKeys;
+ private final Optional<TlsSecrets> tlsSecrets;
private ApplicationPackage applicationPackage;
private List<PreparedModelsBuilder.PreparedModelResult> modelResultList;
@@ -163,6 +174,10 @@ public class SessionPreparer {
this.rotations = new Rotations(curator, tenantPath);
this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.rotationsSet = getRotations(params.rotations());
+ this.tlsSecretsKeys = new TlsSecretsKeys(curator, tenantPath, secretStore);
+ this.tlsSecrets = tlsSecretsKeys.getTlsSecrets(params.tlsSecretsKeyName(), applicationId);
+ this.endpointsSet = getEndpoints(params.containerEndpoints());
+
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
ConfigServerSpec.fromConfig(configserverConfig),
@@ -172,9 +187,11 @@ public class SessionPreparer {
configserverConfig.hostedVespa(),
zone,
rotationsSet,
+ endpointsSet,
params.isBootstrap(),
! currentActiveApplicationSet.isPresent(),
- context.getFlagSource());
+ context.getFlagSource(),
+ tlsSecrets);
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
@@ -234,6 +251,11 @@ public class SessionPreparer {
checkTimeout("write rotations to zookeeper");
}
+ void writeTlsZK() {
+ tlsSecretsKeys.writeTlsSecretsKeyToZooKeeper(applicationId, params.tlsSecretsKeyName().orElse(null));
+ checkTimeout("write tlsSecretsKey to zookeeper");
+ }
+
void writeContainerEndpointsZK(Optional<String> globalServiceId) {
if (!params.containerEndpoints().isEmpty()) { // Use endpoints from parameter when explicitly given
containerEndpoints.write(applicationId, params.containerEndpoints());
@@ -266,10 +288,17 @@ public class SessionPreparer {
return rotations;
}
+ private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) {
+ if (endpoints == null || endpoints.isEmpty()) {
+ endpoints = this.containerEndpoints.read(applicationId);
+ }
+ return ImmutableSet.copyOf(endpoints);
+ }
+
}
private static List<ContainerEndpoint> toContainerEndpoints(String globalServceId, Set<Rotation> rotations) {
- return List.of(new ContainerEndpoint(new ClusterId(globalServceId),
+ return List.of(new ContainerEndpoint(globalServceId,
rotations.stream()
.map(Rotation::getId)
.collect(Collectors.toUnmodifiableList())));
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
index 91f9e3c8eed..4ffce8a697e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
@@ -1,11 +1,11 @@
// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.tenant;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
-import com.yahoo.vespa.applicationmodel.ClusterId;
import java.util.ArrayList;
import java.util.List;
@@ -49,7 +49,7 @@ public class ContainerEndpointSerializer {
names.add(containerName);
});
- return new ContainerEndpoint(new ClusterId(clusterId), names);
+ return new ContainerEndpoint(clusterId, names);
}
public static List<ContainerEndpoint> endpointListFromSlime(Slime slime) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
index 7e29f9abc1d..9bce1224d96 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
@@ -1,6 +1,7 @@
// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.tenant;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.path.Path;
import com.yahoo.vespa.config.SlimeUtils;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TlsSecretsKeys.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TlsSecretsKeys.java
new file mode 100644
index 00000000000..eaa4916d8fc
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TlsSecretsKeys.java
@@ -0,0 +1,86 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.tenant;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.yahoo.config.model.api.TlsSecrets;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
+import com.yahoo.path.Path;
+import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.curator.transaction.CuratorOperations;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
+
+import java.util.Optional;
+
+/**
+ * TLS Secret keys for applications (used to retrieve actual certificate/key from secret store). Persisted in ZooKeeper.
+ *
+ * @author andreer
+ */
+public class TlsSecretsKeys {
+
+ private final Path path;
+ private final SecretStore secretStore;
+ private final Curator curator;
+
+ public TlsSecretsKeys(Curator curator, Path tenantPath, SecretStore secretStore) {
+ this.curator = curator;
+ this.path = tenantPath.append("tlsSecretsKeys/");
+ this.secretStore = secretStore;
+ }
+
+ public Optional<TlsSecrets> readTlsSecretsKeyFromZookeeper(ApplicationId application) {
+ try {
+ Optional<byte[]> data = curator.getData(tlsSecretsKeyOf(application));
+ if (data.isEmpty() || data.get().length == 0) return Optional.empty();
+ String tlsSecretsKey = new ObjectMapper().readValue(data.get(), new TypeReference<String>() {});
+ return readFromSecretStore(Optional.ofNullable(tlsSecretsKey));
+ } catch (Exception e) {
+ throw new RuntimeException("Error reading TLS secret key of " + application, e);
+ }
+ }
+
+ public void writeTlsSecretsKeyToZooKeeper(ApplicationId application, String tlsSecretsKey) {
+ if (tlsSecretsKey == null) return;
+ try {
+ byte[] data = new ObjectMapper().writeValueAsBytes(tlsSecretsKey);
+ curator.set(tlsSecretsKeyOf(application), data);
+ } catch (Exception e) {
+ throw new RuntimeException("Could not write TLS secret key of " + application, e);
+ }
+ }
+
+ public Optional<TlsSecrets> getTlsSecrets(Optional<String> secretKeyname, ApplicationId applicationId) {
+ if (secretKeyname == null || secretKeyname.isEmpty()) {
+ return readTlsSecretsKeyFromZookeeper(applicationId);
+ }
+ return readFromSecretStore(secretKeyname);
+ }
+
+ private Optional<TlsSecrets> readFromSecretStore(Optional<String> secretKeyname) {
+ if(secretKeyname.isEmpty()) return Optional.empty();
+ TlsSecrets tlsSecretParameters = TlsSecrets.MISSING;
+ try {
+ String cert = secretStore.getSecret(secretKeyname.get() + "-cert");
+ String key = secretStore.getSecret(secretKeyname.get() + "-key");
+ tlsSecretParameters = new TlsSecrets(cert, key);
+ } catch (RuntimeException e) {
+ // Assume not ready yet
+// log.log(LogLevel.DEBUG, "Could not fetch certificate/key with prefix: " + secretKeyname.get(), e);
+ }
+ return Optional.of(tlsSecretParameters);
+ }
+
+ /** Returns a transaction which deletes these tls secrets key if they exist */
+ public CuratorTransaction delete(ApplicationId application) {
+ if (!curator.exists(tlsSecretsKeyOf(application))) return CuratorTransaction.empty(curator);
+ return CuratorTransaction.from(CuratorOperations.delete(tlsSecretsKeyOf(application).getAbsolute()), curator);
+ }
+
+ /** Returns the path storing the tls secrets key for an application */
+ private Path tlsSecretsKeyOf(ApplicationId application) {
+ return path.append(application.serializedForm());
+ }
+
+}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistryTest.java
index 9b113cae715..e4ff8702ff1 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/InjectedGlobalComponentRegistryTest.java
@@ -78,7 +78,7 @@ public class InjectedGlobalComponentRegistryTest {
globalComponentRegistry =
new InjectedGlobalComponentRegistry(curator, configCurator, metrics, modelFactoryRegistry, sessionPreparer, rpcServer, configserverConfig,
generationCounter, defRepo, permanentApplicationPackage, hostRegistries, hostProvisionerProvider, zone,
- new ConfigServerDB(configserverConfig), new InMemoryFlagSource());
+ new ConfigServerDB(configserverConfig), new InMemoryFlagSource(), new MockSecretStore());
}
@Test
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/MockSecretStore.java b/configserver/src/test/java/com/yahoo/vespa/config/server/MockSecretStore.java
new file mode 100644
index 00000000000..8a77b53875e
--- /dev/null
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/MockSecretStore.java
@@ -0,0 +1,35 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server;
+
+import com.yahoo.container.jdisc.secretstore.SecretStore;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class MockSecretStore implements SecretStore {
+ Map<String, String> secrets = new HashMap<>();
+
+ @Override
+ public String getSecret(String key) {
+ if(secrets.containsKey(key))
+ return secrets.get(key);
+ throw new RuntimeException("Key not found: " + key);
+ }
+
+ @Override
+ public String getSecret(String key, int version) {
+ return getSecret(key);
+ }
+
+ public void put(String key, String value) {
+ secrets.put(key, value);
+ }
+
+ public void remove(String key) {
+ secrets.remove(key);
+ }
+
+ public void clear() {
+ secrets.clear();
+ }
+}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
index 23326474371..860bbdc134c 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server;
import com.yahoo.component.Version;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.MockFileRegistry;
@@ -14,6 +15,7 @@ import com.yahoo.vespa.flags.InMemoryFlagSource;
import org.junit.Test;
import java.util.Collections;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -33,6 +35,10 @@ public class ModelContextImplTest {
final Rotation rotation = new Rotation("this.is.a.mock.rotation");
final Set<Rotation> rotations = Collections.singleton(rotation);
+
+ final ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b"));
+ final Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint);
+
final InMemoryFlagSource flagSource = new InMemoryFlagSource();
ModelContext context = new ModelContextImpl(
@@ -53,9 +59,11 @@ public class ModelContextImplTest {
false,
Zone.defaultZone(),
rotations,
+ endpoints,
false,
false,
- flagSource),
+ flagSource,
+ null),
Optional.empty(),
new Version(6),
new Version(6));
@@ -71,6 +79,7 @@ public class ModelContextImplTest {
assertNotNull(context.properties().zone());
assertFalse(context.properties().hostedVespa());
assertThat(context.properties().rotations(), equalTo(rotations));
+ assertThat(context.properties().endpoints(), equalTo(endpoints));
assertThat(context.properties().isFirstTimeDeployment(), equalTo(false));
assertThat(context.properties().useDedicatedNodeForLogserver(), equalTo(true));
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java b/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
index 62685734a47..a304f74858b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
@@ -5,12 +5,12 @@ import com.google.common.io.Files;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.concurrent.InThreadExecutorService;
import com.yahoo.concurrent.StripedExecutor;
-import com.yahoo.concurrent.ThreadFactoryFactory;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
import com.yahoo.vespa.config.server.host.HostRegistries;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
@@ -21,7 +21,6 @@ import com.yahoo.vespa.config.server.session.MockFileDistributionFactory;
import com.yahoo.vespa.config.server.session.SessionPreparer;
import com.yahoo.vespa.config.server.tenant.MockTenantListener;
import com.yahoo.vespa.config.server.tenant.TenantListener;
-import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TenantRequestHandlerTest;
import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.curator.Curator;
@@ -34,7 +33,6 @@ import java.time.Clock;
import java.util.Collections;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
/**
@@ -60,6 +58,7 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
private final ConfigServerDB configServerDB;
private final StripedExecutor<TenantName> zkWatcherExecutor;
private final ExecutorService zkCacheExecutor;
+ private final SecretStore secretStore;
private TestComponentRegistry(Curator curator, ConfigCurator configCurator, Metrics metrics,
ModelFactoryRegistry modelFactoryRegistry,
@@ -73,7 +72,8 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
ReloadListener reloadListener,
TenantListener tenantListener,
Zone zone,
- Clock clock) {
+ Clock clock,
+ SecretStore secretStore) {
this.curator = curator;
this.configCurator = configCurator;
this.metrics = metrics;
@@ -92,6 +92,7 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
this.configServerDB = new ConfigServerDB(configserverConfig);
this.zkWatcherExecutor = new StripedExecutor<>(new InThreadExecutorService());
this.zkCacheExecutor = new InThreadExecutorService();
+ this.secretStore = secretStore;
}
public static class Builder {
@@ -161,14 +162,15 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
.orElse(new MockFileDistributionFactory(configserverConfig));
HostProvisionerProvider hostProvisionerProvider = hostProvisioner.
map(HostProvisionerProvider::withProvisioner).orElseGet(HostProvisionerProvider::empty);
+ SecretStore secretStore = new MockSecretStore();
SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory,
hostProvisionerProvider, permApp,
configserverConfig, defRepo, curator,
- zone, new InMemoryFlagSource());
+ zone, new InMemoryFlagSource(), secretStore);
return new TestComponentRegistry(curator, ConfigCurator.create(curator), metrics, modelFactoryRegistry,
permApp, fileDistributionFactory, hostRegistries, configserverConfig,
sessionPreparer, hostProvisioner, defRepo, reloadListener, tenantListener,
- zone, clock);
+ zone, clock, secretStore);
}
}
@@ -220,6 +222,11 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
return zkCacheExecutor;
}
+ @Override
+ public SecretStore getSecretStore() {
+ return secretStore;
+ }
+
public FileDistributionFactory getFileDistributionFactory() { return fileDistributionFactory; }
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
index 395c1ecb80b..1f99f59eb8e 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
@@ -5,6 +5,7 @@ import com.yahoo.cloud.config.LbServicesConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
@@ -20,11 +21,14 @@ import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.model.VespaModel;
import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
import org.xml.sax.SAXException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
@@ -33,20 +37,34 @@ import java.util.Random;
import java.util.Set;
import static com.yahoo.config.model.api.container.ContainerServiceType.QRSERVER;
-import static org.hamcrest.Matchers.is;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeFalse;
+import static org.junit.Assume.assumeTrue;
/**
* @author Ulf Lilleengen
*/
+@RunWith(Parameterized.class)
public class LbServicesProducerTest {
private static final String rotation1 = "rotation-1";
private static final String rotation2 = "rotation-2";
private static final String rotationString = rotation1 + "," + rotation2;
private static final Set<Rotation> rotations = Collections.singleton(new Rotation(rotationString));
+ private static final Set<ContainerEndpoint> endpoints = Set.of(
+ new ContainerEndpoint("mydisc", List.of("rotation-1", "rotation-2"))
+ );
private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
+ private final boolean useGlobalServiceId;
+
+ @Parameterized.Parameters
+ public static Object[] useGlobalServiceId() {
+ return new Object[] { true, false };
+ }
+
+ public LbServicesProducerTest(boolean useGlobalServiceId) {
+ this.useGlobalServiceId = useGlobalServiceId;
+ }
@Test
public void testDeterministicGetConfig() throws IOException, SAXException {
@@ -123,20 +141,40 @@ public class LbServicesProducerTest {
@Test
public void testConfigAliasesWithRotations() throws IOException, SAXException {
+ assumeTrue(useGlobalServiceId);
+
Map<TenantName, Set<ApplicationInfo>> testModel = createTestModel(new DeployState.Builder()
.rotations(rotations)
.properties(new TestProperties().setHostedVespa(true)));
RegionName regionName = RegionName.from("us-east-1");
- LbServicesConfig conf = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel);
- final LbServicesConfig.Tenants.Applications.Hosts.Services services = conf.tenants("foo").applications("foo:prod:" + regionName.value() + ":default").hosts("foo.foo.yahoo.com").services(QRSERVER.serviceName);
- assertThat(services.servicealiases().size(), is(1));
- assertThat(services.endpointaliases().size(), is(4));
- assertThat(services.servicealiases(0), is("service1"));
- assertThat(services.endpointaliases(0), is("foo1.bar1.com"));
- assertThat(services.endpointaliases(1), is("foo2.bar2.com"));
- assertThat(services.endpointaliases(2), is(rotation1));
- assertThat(services.endpointaliases(3), is(rotation2));
+ var services = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel)
+ .tenants("foo")
+ .applications("foo:prod:" + regionName.value() + ":default")
+ .hosts("foo.foo.yahoo.com")
+ .services(QRSERVER.serviceName);
+
+ assertThat(services.servicealiases(), contains("service1"));
+ assertThat("Missing rotations in list: " + services.endpointaliases(), services.endpointaliases(), containsInAnyOrder("foo1.bar1.com", "foo2.bar2.com", rotation1, rotation2));
+ }
+
+ @Test
+ public void testConfigAliasesWithEndpoints() throws IOException, SAXException {
+ assumeFalse(useGlobalServiceId);
+
+ Map<TenantName, Set<ApplicationInfo>> testModel = createTestModel(new DeployState.Builder()
+ .endpoints(endpoints)
+ .properties(new TestProperties().setHostedVespa(true)));
+ RegionName regionName = RegionName.from("us-east-1");
+
+ var services = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel)
+ .tenants("foo")
+ .applications("foo:prod:" + regionName.value() + ":default")
+ .hosts("foo.foo.yahoo.com")
+ .services(QRSERVER.serviceName);
+
+ assertThat(services.servicealiases(), contains("service1"));
+ assertThat("Missing endpoints in list: " + services.endpointaliases(), services.endpointaliases(), containsInAnyOrder("foo1.bar1.com", "foo2.bar2.com", rotation1, rotation2));
}
private Map<TenantName, Set<ApplicationInfo>> randomizeApplications(Map<TenantName, Set<ApplicationInfo>> testModel, int seed) {
@@ -195,14 +233,32 @@ public class LbServicesProducerTest {
" <search/>" +
"</jdisc>" +
"</services>";
- String deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" +
- "<deployment version='1.0'>" +
- " <test />" +
- " <prod global-service-id='mydisc'>" +
- " <region active='true'>us-east-1</region>" +
- " <region active='false'>us-east-2</region>" +
- " </prod>" +
- "</deployment>";
+
+ String deploymentInfo;
+
+ if (useGlobalServiceId) {
+ deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" +
+ "<deployment version='1.0'>" +
+ " <test />" +
+ " <prod global-service-id='mydisc'>" +
+ " <region active='true'>us-east-1</region>" +
+ " <region active='false'>us-east-2</region>" +
+ " </prod>" +
+ "</deployment>";
+ } else {
+ deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" +
+ "<deployment version='1.0'>" +
+ " <test />" +
+ " <prod>" +
+ " <region active='true'>us-east-1</region>" +
+ " <region active='false'>us-east-2</region>" +
+ " </prod>" +
+ " <endpoints>" +
+ " <endpoint container-id='mydisc' />" +
+ " </endpoints>" +
+ "</deployment>";
+ }
+
return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).withDeploymentSpec(deploymentInfo).build();
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
index 6eba85af37e..f5fd6053b07 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
@@ -6,8 +6,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
-import com.yahoo.vespa.applicationmodel.ClusterId;
-import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.config.model.api.ContainerEndpoint;
import org.junit.Test;
import java.net.URLEncoder;
@@ -84,10 +83,10 @@ public class PrepareParamsTest {
@Test
public void testCorrectParsingWithContainerEndpoints() {
- var endpoints = List.of(new ContainerEndpoint(new ClusterId("qrs1"),
+ var endpoints = List.of(new ContainerEndpoint("qrs1",
List.of("c1.example.com",
"c2.example.com")),
- new ContainerEndpoint(new ClusterId("qrs2"),
+ new ContainerEndpoint("qrs2",
List.of("c3.example.com",
"c4.example.com")));
var param = "[\n" +
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
index 6b2810af66c..651dde375ee 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
@@ -4,10 +4,12 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ModelContext;
+import com.yahoo.config.model.api.TlsSecrets;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.CertificateNotReadyException;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
@@ -15,8 +17,8 @@ import com.yahoo.io.IOUtils;
import com.yahoo.log.LogLevel;
import com.yahoo.path.Path;
import com.yahoo.slime.Slime;
-import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.config.server.MockReloadHandler;
+import com.yahoo.vespa.config.server.MockSecretStore;
import com.yahoo.vespa.config.server.TestComponentRegistry;
import com.yahoo.vespa.config.server.TimeoutBudgetTest;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
@@ -27,9 +29,10 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.model.TestModelFactory;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
-import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache;
import com.yahoo.vespa.config.server.tenant.Rotations;
+import com.yahoo.vespa.config.server.tenant.TlsSecretsKeys;
import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
@@ -72,7 +75,7 @@ public class SessionPreparerTest {
private SessionPreparer preparer;
private TestComponentRegistry componentRegistry;
private MockFileDistributionFactory fileDistributionFactory;
-
+ private MockSecretStore secretStore = new MockSecretStore();
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@@ -107,7 +110,8 @@ public class SessionPreparerTest {
componentRegistry.getStaticConfigDefinitionRepo(),
curator,
componentRegistry.getZone(),
- flagSource);
+ flagSource,
+ secretStore);
}
@Test(expected = InvalidApplicationException.class)
@@ -218,7 +222,7 @@ public class SessionPreparerTest {
var params = new PrepareParams.Builder().applicationId(applicationId).rotations(rotations).build();
prepare(new File("src/test/resources/deploy/hosted-app"), params);
- var expected = List.of(new ContainerEndpoint(new ClusterId("qrs"),
+ var expected = List.of(new ContainerEndpoint("qrs",
List.of("app1.tenant1.global.vespa.example.com",
"rotation-042.vespa.global.routing")));
assertEquals(expected, readContainerEndpoints(applicationId));
@@ -248,15 +252,48 @@ public class SessionPreparerTest {
.build();
prepare(new File("src/test/resources/deploy/hosted-app"), params);
- var expected = List.of(new ContainerEndpoint(new ClusterId("foo"),
+ var expected = List.of(new ContainerEndpoint("foo",
List.of("foo.app1.tenant1.global.vespa.example.com",
"rotation-042.vespa.global.routing")),
- new ContainerEndpoint(new ClusterId("bar"),
+ new ContainerEndpoint("bar",
List.of("bar.app1.tenant1.global.vespa.example.com",
"rotation-043.vespa.global.routing")));
assertEquals(expected, readContainerEndpoints(applicationId));
}
+ @Test
+ public void require_that_tlssecretkey_is_written() throws IOException {
+ var tlskey = "vespa.tlskeys.tenant1--app1";
+ var applicationId = applicationId("test");
+ var params = new PrepareParams.Builder().applicationId(applicationId).tlsSecretsKeyName(tlskey).build();
+ secretStore.put(tlskey+"-cert", "CERT");
+ secretStore.put(tlskey+"-key", "KEY");
+ prepare(new File("src/test/resources/deploy/hosted-app"), params);
+
+ // Read from zk and verify cert and key are available
+ Optional<TlsSecrets> tlsSecrets = new TlsSecretsKeys(curator, tenantPath, secretStore).readTlsSecretsKeyFromZookeeper(applicationId);
+ assertTrue(tlsSecrets.isPresent());
+ assertEquals("KEY", tlsSecrets.get().key());
+ assertEquals("CERT", tlsSecrets.get().certificate());
+ }
+
+ @Test(expected = CertificateNotReadyException.class)
+ public void require_that_tlssecretkey_is_missing_when_not_in_secretstore() throws IOException {
+ var tlskey = "vespa.tlskeys.tenant1--app1";
+ var applicationId = applicationId("test");
+ var params = new PrepareParams.Builder().applicationId(applicationId).tlsSecretsKeyName(tlskey).build();
+ prepare(new File("src/test/resources/deploy/hosted-app"), params);
+ }
+
+ @Test(expected = CertificateNotReadyException.class)
+ public void require_that_tlssecretkey_is_missing_when_certificate_not_in_secretstore() throws IOException {
+ var tlskey = "vespa.tlskeys.tenant1--app1";
+ var applicationId = applicationId("test");
+ var params = new PrepareParams.Builder().applicationId(applicationId).tlsSecretsKeyName(tlskey).build();
+ secretStore.put(tlskey+"-key", "KEY");
+ prepare(new File("src/test/resources/deploy/hosted-app"), params);
+ }
+
private void prepare(File app) throws IOException {
prepare(app, new PrepareParams.Builder().build());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java
index 95f6c7718e2..b2ad0af8f9a 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionTest.java
@@ -21,7 +21,7 @@ public class SessionTest {
public boolean isPrepared = false;
public MockSessionPreparer() {
- super(null, null, null, null, null, null, new MockCurator(), null, null);
+ super(null, null, null, null, null, null, new MockCurator(), null, null, null);
}
@Override
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
index aac0b6d1a16..053a3f7a15d 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
@@ -1,7 +1,7 @@
package com.yahoo.vespa.config.server.tenant;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.slime.Slime;
-import com.yahoo.vespa.applicationmodel.ClusterId;
import org.junit.Test;
import java.util.List;
@@ -30,7 +30,7 @@ public class ContainerEndpointSerializerTest {
@Test
public void writeReadSingleEndpoint() {
- final var endpoint = new ContainerEndpoint(new ClusterId("foo"), List.of("a", "b"));
+ final var endpoint = new ContainerEndpoint("foo", List.of("a", "b"));
final var serialized = new Slime();
ContainerEndpointSerializer.endpointToSlime(serialized.setObject(), endpoint);
final var deserialized = ContainerEndpointSerializer.endpointFromSlime(serialized.get());
@@ -40,7 +40,7 @@ public class ContainerEndpointSerializerTest {
@Test
public void writeReadEndpoints() {
- final var endpoints = List.of(new ContainerEndpoint(new ClusterId("foo"), List.of("a", "b")));
+ final var endpoints = List.of(new ContainerEndpoint("foo", List.of("a", "b")));
final var serialized = ContainerEndpointSerializer.endpointListToSlime(endpoints);
final var deserialized = ContainerEndpointSerializer.endpointListFromSlime(serialized);
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java
index 3598b6e63c3..4400b424d1b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java
@@ -1,9 +1,9 @@
// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.tenant;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.path.Path;
-import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.curator.mock.MockCurator;
import org.junit.Test;
@@ -17,7 +17,7 @@ public class ContainerEndpointsCacheTest {
public void readWriteFromCache() {
final var cache = new ContainerEndpointsCache(Path.createRoot(), new MockCurator());
final var endpoints = List.of(
- new ContainerEndpoint(new ClusterId("the-cluster-1"), List.of("a", "b", "c"))
+ new ContainerEndpoint("the-cluster-1", List.of("a", "b", "c"))
);
cache.write(ApplicationId.defaultId(), endpoints);
diff --git a/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java b/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
index fb21626cc41..82c89276319 100644
--- a/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
+++ b/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
@@ -2,10 +2,10 @@
package com.yahoo.container.logging;
import com.yahoo.concurrent.ThreadFactoryFactory;
-import com.yahoo.container.core.AccessLogConfig;
import com.yahoo.io.NativeIO;
import com.yahoo.log.LogFileDb;
import com.yahoo.system.ProcessExecuter;
+import com.yahoo.yolean.Exceptions;
import java.io.File;
import java.io.FileInputStream;
@@ -22,8 +22,6 @@ import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
import java.util.logging.StreamHandler;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
import java.util.zip.GZIPOutputStream;
/**
@@ -41,15 +39,15 @@ public class LogFileHandler extends StreamHandler {
private final boolean compressOnRotation;
private long[] rotationTimes = {0}; //default to one log per day, at midnight
private String filePattern = "./log.%T"; // default to current directory, ms time stamp
- private long lastRotationTime = -1; // absolute time (millis since epoch) of current file start
- private int numberOfRecords = -1;
private long nextRotationTime = 0;
- private OutputStream currentOutputStream = null;
+ private FileOutputStream currentOutputStream = null;
private String fileName;
private String symlinkName = null;
private ArrayBlockingQueue<LogRecord> logQueue = new ArrayBlockingQueue<>(100000);
private LogRecord rotateCmd = new LogRecord(Level.SEVERE, "rotateNow");
private ExecutorService executor = Executors.newCachedThreadPool(ThreadFactoryFactory.getDaemonThreadFactory("logfilehandler.compression"));
+ private final NativeIO nativeIO = new NativeIO();
+ private long lastDropPosition = 0;
static private class LogThread extends Thread {
LogFileHandler logFileHandler;
@@ -122,7 +120,21 @@ public class LogFileHandler extends StreamHandler {
}
}
- private void internalPublish(LogRecord r) throws InterruptedException {
+ @Override
+ public synchronized void flush() {
+ super.flush();
+ try {
+ if (currentOutputStream != null) {
+ long newPos = currentOutputStream.getChannel().position();
+ nativeIO.dropPartialFileFromCache(currentOutputStream.getFD(), lastDropPosition, newPos, true);
+ lastDropPosition = newPos;
+ }
+ } catch (IOException e) {
+ logger.warning("Failed dropping from cache : " + Exceptions.toMessageString(e));
+ }
+ }
+
+ private void internalPublish(LogRecord r) {
// first check to see if new file needed.
// if so, use this.internalRotateNow() to do it
@@ -133,8 +145,6 @@ public class LogFileHandler extends StreamHandler {
if (now > nextRotationTime || currentOutputStream == null) {
internalRotateNow();
}
- // count records, and publish
- numberOfRecords++;
super.publish(r);
}
@@ -177,9 +187,9 @@ public class LogFileHandler extends StreamHandler {
}
long nowTod = timeOfDayMillis(now);
long next = 0;
- for (int i = 0; i<rotationTimes.length; i++) {
- if (nowTod < rotationTimes[i]) {
- next = rotationTimes[i]-nowTod + now;
+ for (long rotationTime : rotationTimes) {
+ if (nowTod < rotationTime) {
+ next = rotationTime-nowTod + now;
break;
}
}
@@ -197,7 +207,7 @@ public class LogFileHandler extends StreamHandler {
} catch (InterruptedException e) {
}
}
- super.flush();
+ flush();
}
private void checkAndCreateDir(String pathname) {
@@ -220,14 +230,14 @@ public class LogFileHandler extends StreamHandler {
// Throw InterruptedException upwards rather than relying on isInterrupted to stop the thread as
// isInterrupted() returns false after interruption in p.waitFor
- private void internalRotateNow() throws InterruptedException {
+ private void internalRotateNow() {
// figure out new file name, then
// use super.setOutputStream to switch to a new file
String oldFileName = fileName;
long now = System.currentTimeMillis();
fileName = LogFormatter.insertDate(filePattern, now);
- super.flush();
+ flush();
super.close();
try {
@@ -235,6 +245,7 @@ public class LogFileHandler extends StreamHandler {
FileOutputStream os = new FileOutputStream(fileName, true); // append mode, for safety
super.setOutputStream(os);
currentOutputStream = os;
+ lastDropPosition = 0;
LogFileDb.nowLoggingTo(fileName);
}
catch (IOException e) {
@@ -243,8 +254,6 @@ public class LogFileHandler extends StreamHandler {
createSymlinkToCurrentFile();
- numberOfRecords = 0;
- lastRotationTime = now;
nextRotationTime = 0; //figure it out later (lazy evaluation)
if ((oldFileName != null)) {
File oldFile = new File(oldFileName);
@@ -252,28 +261,30 @@ public class LogFileHandler extends StreamHandler {
if (compressOnRotation) {
executor.execute(() -> runCompression(oldFile));
} else {
- NativeIO nativeIO = new NativeIO();
nativeIO.dropFileFromCache(oldFile);
}
}
}
}
- private void runCompression(File oldFile) {
+
+ static void runCompression(File oldFile) {
File gzippedFile = new File(oldFile.getPath() + ".gz");
try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000);
FileInputStream inputStream = new FileInputStream(oldFile))
{
- byte [] buffer = new byte[0x100000];
+ byte [] buffer = new byte[0x400000]; // 4M buffer
+ long totalBytesRead = 0;
+ NativeIO nativeIO = new NativeIO();
for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) {
compressor.write(buffer, 0, read);
+ nativeIO.dropPartialFileFromCache(inputStream.getFD(), totalBytesRead, read, false);
+ totalBytesRead += read;
}
compressor.finish();
compressor.flush();
- NativeIO nativeIO = new NativeIO();
- nativeIO.dropFileFromCache(oldFile); // Drop from cache in case somebody else has a reference to it preventing from dying quickly.
oldFile.delete();
nativeIO.dropFileFromCache(gzippedFile);
} catch (IOException e) {
@@ -307,28 +318,6 @@ public class LogFileHandler extends StreamHandler {
}
/**
- * Name the current file to "name.n" where n
- * 1+ the largest integer in existing file names
- */
- private void moveCurrentFile() {
- File file=new File(fileName);
- if ( ! file.exists()) return; // no current file
- File dir=file.getParentFile();
- Pattern logFilePattern=Pattern.compile(".*\\.(\\d+)");
- long largestN=0;
- for (File existingFile : dir.listFiles()) {
- Matcher matcher=logFilePattern.matcher(existingFile.getName());
- if (!matcher.matches()) continue;
- long thisN=Long.parseLong(matcher.group(1));
- if (thisN>largestN)
- largestN=thisN;
- }
- File newFn = new File(dir, file.getName() + "." + (largestN + 1));
- LogFileDb.nowLoggingTo(newFn.getAbsolutePath());
- file.renameTo(newFn);
- }
-
- /**
* Calculate rotation times array, given times in minutes, as "0 60 ..."
*
*/
diff --git a/container-accesslogging/src/test/java/com/yahoo/container/logging/CompressWhileDrop.java b/container-accesslogging/src/test/java/com/yahoo/container/logging/CompressWhileDrop.java
new file mode 100644
index 00000000000..3ac1e00b1a0
--- /dev/null
+++ b/container-accesslogging/src/test/java/com/yahoo/container/logging/CompressWhileDrop.java
@@ -0,0 +1,10 @@
+package com.yahoo.container.logging;
+
+import java.io.File;
+
+public class CompressWhileDrop {
+ public static void main(String [] args) {
+ System.out.println("Start compressing file " + args[0]);
+ LogFileHandler.runCompression(new File(args[0]));
+ }
+}
diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
index 63cce619333..a18d1eee0fc 100644
--- a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
+++ b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
@@ -19,7 +19,10 @@ import com.yahoo.processing.response.Streamed;
import java.io.IOException;
import java.io.OutputStream;
-import java.util.*;
+import java.util.ArrayDeque;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/DeprecatedSecretStoreProvider.java b/container-disc/src/main/java/com/yahoo/container/jdisc/DeprecatedSecretStoreProvider.java
new file mode 100644
index 00000000000..0f47bfe2eb1
--- /dev/null
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/DeprecatedSecretStoreProvider.java
@@ -0,0 +1,34 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.container.jdisc;
+
+import com.yahoo.container.di.componentgraph.Provider;
+
+/**
+ * An secret store provider which provides a factory which throws exception on
+ * invocation - as no secret store is currently provided by default.
+ * The purpose of this is to provide a secret store for injection in the case where
+ * no secret store component is provided.
+ *
+ * @author bratseth
+ */
+@SuppressWarnings({"deprecation", "unused"})
+public class DeprecatedSecretStoreProvider implements Provider<com.yahoo.jdisc.http.SecretStore> {
+
+ private static final ThrowingSecretStore instance = new ThrowingSecretStore();
+
+ @Override
+ public com.yahoo.jdisc.http.SecretStore get() { return instance; }
+
+ @Override
+ public void deconstruct() { }
+
+ private static final class ThrowingSecretStore implements com.yahoo.jdisc.http.SecretStore {
+
+ @Override
+ public String getSecret(String key) {
+ throw new UnsupportedOperationException("A secret store is not available");
+ }
+
+ }
+
+}
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/SecretStoreProvider.java b/container-disc/src/main/java/com/yahoo/container/jdisc/SecretStoreProvider.java
index d966e66f502..6012fbe394c 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/SecretStoreProvider.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/SecretStoreProvider.java
@@ -1,34 +1,29 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.container.jdisc;
+import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.container.di.componentgraph.Provider;
-/**
- * An secret store provider which provides a factory which throws exception on
- * invocation - as no secret store is currently provided by default.
- * The purpose of this is to provide a secret store for injection in the case where
- * no secret store component is provided.
- *
- * @author bratseth
- */
-@SuppressWarnings({"deprecation", "unused"})
-public class SecretStoreProvider implements Provider<com.yahoo.jdisc.http.SecretStore> {
+public class SecretStoreProvider implements Provider<SecretStore> {
private static final ThrowingSecretStore instance = new ThrowingSecretStore();
@Override
- public com.yahoo.jdisc.http.SecretStore get() { return instance; }
+ public SecretStore get() { return instance; }
@Override
public void deconstruct() { }
- private static final class ThrowingSecretStore implements com.yahoo.jdisc.http.SecretStore {
+ private static final class ThrowingSecretStore implements SecretStore {
@Override
public String getSecret(String key) {
throw new UnsupportedOperationException("A secret store is not available");
}
+ @Override
+ public String getSecret(String key, int version) {
+ throw new UnsupportedOperationException("A secret store is not available");
+ }
}
-
}
diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh
index 65826e650c4..53b1e54dda5 100755
--- a/container-disc/src/main/sh/vespa-start-container-daemon.sh
+++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh
@@ -71,6 +71,11 @@ configure_memory() {
available="$VESPA_TOTAL_MEMORY_MB"
else
available=`free -m | grep Mem | tr -s ' ' | cut -f2 -d' '`
+ if hash cgget 2>/dev/null; then
+ available_cgroup_bytes=$(cgget -nv -r memory.limit_in_bytes /)
+ available_cgroup=$((available_cgroup_bytes >> 20))
+ available=$((available > available_cgroup ? available_cgroup : available))
+ fi
fi
jvm_heapsize=$((available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100))
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 06713d14d88..e69303c2d01 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -436,6 +436,7 @@
"public void lock()",
"public boolean isLocked()",
"public int getTermCount()",
+ "public java.util.Optional extractSingleChild()",
"public bridge synthetic com.yahoo.prelude.query.Item clone()",
"public bridge synthetic java.lang.Object clone()"
],
@@ -854,7 +855,8 @@
"abstract"
],
"methods": [
- "public void <init>()"
+ "public void <init>()",
+ "public java.util.Optional extractSingleChild()"
],
"fields": []
},
@@ -944,6 +946,7 @@
"public void addItem(com.yahoo.prelude.query.Item)",
"public void addItem(int, com.yahoo.prelude.query.Item)",
"public com.yahoo.prelude.query.Item setItem(int, com.yahoo.prelude.query.Item)",
+ "public java.util.Optional extractSingleChild()",
"public com.yahoo.prelude.query.WordItem getWordItem(int)",
"public com.yahoo.prelude.query.BlockItem getBlockItem(int)",
"protected void encodeThis(java.nio.ByteBuffer)",
@@ -974,6 +977,7 @@
"public void setIndexName(java.lang.String)",
"public void setWeight(int)",
"public void addItem(com.yahoo.prelude.query.Item)",
+ "public java.util.Optional extractSingleChild()",
"public com.yahoo.prelude.query.WordItem getWordItem(int)",
"protected void encodeThis(java.nio.ByteBuffer)",
"public int encode(java.nio.ByteBuffer)",
@@ -1239,6 +1243,7 @@
"protected void appendHeadingString(java.lang.StringBuilder)",
"protected void appendBodyString(java.lang.StringBuilder)",
"protected void adding(com.yahoo.prelude.query.Item)",
+ "public java.util.Optional extractSingleChild()",
"public com.yahoo.prelude.query.Item$ItemType getItemType()",
"public java.lang.String getName()",
"public java.lang.String getFieldName()"
@@ -6805,6 +6810,25 @@
],
"fields": []
},
+ "com.yahoo.search.rendering.JsonRenderer$FieldConsumer": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.search.result.Hit$RawUtf8Consumer"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.fasterxml.jackson.core.JsonGenerator, boolean)",
+ "public void accept(java.lang.String, java.lang.Object)",
+ "public void accept(java.lang.String, byte[], int, int)",
+ "protected boolean shouldRender(java.lang.String, java.lang.Object)",
+ "protected boolean shouldRenderUtf8Value(java.lang.String, int)",
+ "protected void renderFieldContents(java.lang.Object)",
+ "public bridge synthetic void accept(java.lang.Object, java.lang.Object)"
+ ],
+ "fields": []
+ },
"com.yahoo.search.rendering.JsonRenderer": {
"superClass": "com.yahoo.processing.rendering.AsynchronousSectionedRenderer",
"interfaces": [],
@@ -6817,13 +6841,25 @@
"protected static com.fasterxml.jackson.databind.ObjectMapper createJsonCodec()",
"public void init()",
"public void beginResponse(java.io.OutputStream)",
+ "protected void renderTrace(com.yahoo.processing.execution.Execution$Trace)",
"public void beginList(com.yahoo.processing.response.DataList)",
"protected void moreChildren()",
+ "protected void renderHitGroupHead(com.yahoo.search.result.HitGroup)",
+ "protected void renderErrors(java.util.Set)",
+ "protected void renderCoverage()",
+ "protected void renderHit(com.yahoo.search.result.Hit)",
+ "protected boolean shouldRender(com.yahoo.search.result.Hit)",
+ "protected void renderHitContents(com.yahoo.search.result.Hit)",
+ "protected void renderAllFields(com.yahoo.search.result.Hit)",
+ "protected void renderContinuations(java.util.Map)",
+ "protected void renderGroupMetadata(com.yahoo.search.grouping.result.GroupId)",
+ "protected void renderTotalHitCount(com.yahoo.search.result.Hit)",
"public void data(com.yahoo.processing.response.Data)",
"public void endList(com.yahoo.processing.response.DataList)",
"public void endResponse()",
"public java.lang.String getEncoding()",
- "public java.lang.String getMimeType()"
+ "public java.lang.String getMimeType()",
+ "protected com.yahoo.search.rendering.JsonRenderer$FieldConsumer createFieldConsumer(com.fasterxml.jackson.core.JsonGenerator, boolean)"
],
"fields": []
},
@@ -7146,12 +7182,14 @@
"methods": [
"public void <init>(com.yahoo.data.access.Inspector)",
"public com.yahoo.data.access.Inspector inspect()",
- "public java.lang.String toString()",
"public java.lang.String toJson()",
"public java.lang.StringBuilder writeJson(java.lang.StringBuilder)",
"public java.lang.Double getDouble(java.lang.String)",
"public com.yahoo.tensor.Tensor getTensor(java.lang.String)",
- "public java.util.Set featureNames()"
+ "public java.util.Set featureNames()",
+ "public java.lang.String toString()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)"
],
"fields": []
},
diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
index 3f45e8e8f00..5130cf7ff34 100644
--- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
@@ -145,7 +145,6 @@ public class ClusterSearcher extends Searcher {
documentDbConfig);
addBackendSearcher(searcher);
} else {
- System.out.println("Dispatchers: " + searchClusterConfig.dispatcher().size());
for (int dispatcherIndex = 0; dispatcherIndex < searchClusterConfig.dispatcher().size(); dispatcherIndex++) {
try {
if ( ! isRemote(searchClusterConfig.dispatcher(dispatcherIndex).host())) {
diff --git a/container-search/src/main/java/com/yahoo/prelude/hitfield/AnnotateStringFieldPart.java b/container-search/src/main/java/com/yahoo/prelude/hitfield/AnnotateStringFieldPart.java
index f1b91ab5b72..e61d2ad8af5 100644
--- a/container-search/src/main/java/com/yahoo/prelude/hitfield/AnnotateStringFieldPart.java
+++ b/container-search/src/main/java/com/yahoo/prelude/hitfield/AnnotateStringFieldPart.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.prelude.hitfield;
-/** TODO: Class header! */
public class AnnotateStringFieldPart implements FieldPart {
public static final char RAW_ANNOTATE_BEGIN_CHAR = '\uFFF9';
diff --git a/container-search/src/main/java/com/yahoo/prelude/hitfield/HitField.java b/container-search/src/main/java/com/yahoo/prelude/hitfield/HitField.java
index fd8648b758a..e81a6653b08 100644
--- a/container-search/src/main/java/com/yahoo/prelude/hitfield/HitField.java
+++ b/container-search/src/main/java/com/yahoo/prelude/hitfield/HitField.java
@@ -29,65 +29,48 @@ public class HitField {
private Object original;
- /**
- * @param f The field name
- * @param c The field content
- */
- public HitField(String f, String c) {
- this(f, c, c.indexOf(JuniperSearcher.RAW_HIGHLIGHT_CHAR) > -1);
+ public HitField(String fieldName, String content) {
+ this(fieldName, content, content.indexOf(JuniperSearcher.RAW_HIGHLIGHT_CHAR) > -1);
}
- /**
- * @param f The field name
- * @param c The field content
- */
- public HitField(String f, XMLString c) {
- this(f, c, c.toString().indexOf(JuniperSearcher.RAW_HIGHLIGHT_CHAR) > -1);
+ public HitField(String fieldName, XMLString content) {
+ this(fieldName, content, content.toString().indexOf(JuniperSearcher.RAW_HIGHLIGHT_CHAR) > -1);
}
- /**
- * @param f The field name
- * @param c The field content
- * @param cjk true if the content is CJK text
- */
- public HitField(String f, String c, boolean cjk) {
- this(f, c, cjk, false);
+ public HitField(String fieldName, String content, boolean cjk) {
+ this(fieldName, content, cjk, false);
}
/**
- * @param f The field name
- * @param c The field content
+ * Creates a hit field
+ *
* @param cjk true if the content is CJK text
*/
- public HitField(String f, XMLString c, boolean cjk) {
- this(f, c.toString(), cjk, true);
+ public HitField(String fieldName, XMLString content, boolean cjk) {
+ this(fieldName, content.toString(), cjk, true);
}
/**
- * @param f The field name
- * @param c The field content
+ * Creates a hit field
+ *
+ * @param fieldname The field name
+ * @param content The field content
* @param cjk true if the content is CJK text
* @param xmlProperty true if this should not quote XML syntax
*/
- public HitField(String f, String c, boolean cjk, boolean xmlProperty) {
- name = f;
- rawContent = c;
- content = null;
+ public HitField(String fieldname, String content, boolean cjk, boolean xmlProperty) {
+ name = fieldname;
+ rawContent = content;
+ this.content = null;
isCJK = cjk;
this.xmlProperty = xmlProperty;
}
- /**
- * @return the name of this field
- */
public String getName() {
return name;
}
- /**
- * @return the raw/original content of this field
- */
public String getRawContent() {
return rawContent;
}
@@ -243,18 +226,13 @@ public class HitField {
}
return tokenizedContent;
}
- /**
- * Return an iterator for the tokens, delimiters and markup elements
- * of the field.
- */
+ /** Return an iterator for the tokens, delimiters and markup elements of the field. */
public ListIterator<FieldPart> listIterator() {
return new FieldIterator(ensureTokenized(),
this);
}
- /**
- * Return an iterator for the tokens in the field
- */
+ /** Return an iterator over the tokens of this field */
public ListIterator<FieldPart> tokenIterator() {
return new TokenFieldIterator(ensureTokenized(),
this);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java b/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
index 907eabe60ce..64f759dcf9c 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
@@ -13,6 +13,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
+import java.util.Optional;
/**
@@ -379,4 +380,13 @@ public abstract class CompositeItem extends Item {
return terms;
}
+ /**
+ * Will return its single child if itself can safely be omitted.
+ *
+ * @return a valid Item or empty Optional if it can not be done
+ */
+ public Optional<Item> extractSingleChild() {
+ return getItemCount() == 1 ? Optional.of(getItem(0)) : Optional.empty();
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/NonReducibleCompositeItem.java b/container-search/src/main/java/com/yahoo/prelude/query/NonReducibleCompositeItem.java
index 84aa177369a..97d724953ea 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/NonReducibleCompositeItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/NonReducibleCompositeItem.java
@@ -1,6 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.prelude.query;
+import java.util.Optional;
+
/**
* A composite item which specifies semantics which are not maintained
* if an instance with a single child is replaced by the single child.
@@ -12,4 +14,8 @@ package com.yahoo.prelude.query;
* @author bratseth
*/
public abstract class NonReducibleCompositeItem extends CompositeItem {
+ @Override
+ public Optional<Item> extractSingleChild() {
+ return Optional.empty();
+ }
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java b/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
index 26da5eec7eb..4de0af1f408 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
@@ -5,6 +5,7 @@ import com.yahoo.prelude.query.textualrepresentation.Discloser;
import java.nio.ByteBuffer;
import java.util.Iterator;
+import java.util.Optional;
/**
* A term which contains a phrase - a collection of word terms
@@ -127,6 +128,13 @@ public class PhraseItem extends CompositeIndexedItem {
}
}
+ @Override
+ public Optional<Item> extractSingleChild() {
+ Optional<Item> extracted = super.extractSingleChild();
+ extracted.ifPresent(e -> e.setWeight(this.getWeight()));
+ return extracted;
+ }
+
private void addIndexedItem(IndexedItem word) {
word.setIndexName(this.getIndexName());
super.addItem((Item) word);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/PhraseSegmentItem.java b/container-search/src/main/java/com/yahoo/prelude/query/PhraseSegmentItem.java
index a19a6e53963..53a57a968f5 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/PhraseSegmentItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/PhraseSegmentItem.java
@@ -5,6 +5,7 @@ import com.yahoo.prelude.query.textualrepresentation.Discloser;
import java.nio.ByteBuffer;
import java.util.Iterator;
+import java.util.Optional;
/**
@@ -55,10 +56,12 @@ public class PhraseSegmentItem extends IndexedSegmentItem {
super(rawWord, current, isFromQuery, stemmed, substring);
}
+ @Override
public ItemType getItemType() {
return ItemType.PHRASE;
}
+ @Override
public String getName() {
return "SPHRASE";
}
@@ -87,6 +90,7 @@ public class PhraseSegmentItem extends IndexedSegmentItem {
*
* @throws IllegalArgumentException if the given item is not a WordItem or PhraseItem
*/
+ @Override
public void addItem(Item item) {
if (item instanceof WordItem) {
addWordItem((WordItem) item);
@@ -95,6 +99,13 @@ public class PhraseSegmentItem extends IndexedSegmentItem {
}
}
+ @Override
+ public Optional<Item> extractSingleChild() {
+ Optional<Item> extracted = super.extractSingleChild();
+ extracted.ifPresent(e -> e.setWeight(this.getWeight()));
+ return extracted;
+ }
+
private void addWordItem(WordItem word) {
word.setIndexName(this.getIndexName());
super.addItem(word);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java b/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
index 31e69e5b7cd..88bae76b26d 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/QueryCanonicalizer.java
@@ -4,7 +4,10 @@ package com.yahoo.prelude.query;
import com.yahoo.search.Query;
import com.yahoo.search.query.QueryTree;
-import java.util.*;
+import java.util.HashSet;
+import java.util.ListIterator;
+import java.util.Optional;
+import java.util.Set;
/**
* Query normalizer and sanity checker.
@@ -82,15 +85,11 @@ public class QueryCanonicalizer {
if (composite.getItemCount() == 0)
parentIterator.remove();
- if (composite.getItemCount() == 1 && ! (composite instanceof NonReducibleCompositeItem)) {
- if (composite instanceof PhraseItem || composite instanceof PhraseSegmentItem)
- composite.getItem(0).setWeight(composite.getWeight());
- parentIterator.set(composite.getItem(0));
- }
+ composite.extractSingleChild().ifPresent(extractedChild -> parentIterator.set(extractedChild));
return CanonicalizationResult.success();
}
-
+
private static void collapseLevels(CompositeItem composite) {
if (composite instanceof RankItem || composite instanceof NotItem) {
collapseLevels(composite, composite.getItemIterator()); // collapse the first item only
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java b/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
index aa446140da0..d2c19339298 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/SameElementItem.java
@@ -6,6 +6,7 @@ import com.yahoo.protect.Validator;
import java.nio.ByteBuffer;
import java.util.Iterator;
+import java.util.Optional;
/**
* This represents a query where all terms are required to match in the same element id.
@@ -54,6 +55,16 @@ public class SameElementItem extends NonReducibleCompositeItem {
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
Validator.ensureNonEmpty("Query term", asTerm.getIndexedString());
}
+
+ @Override
+ public Optional<Item> extractSingleChild() {
+ if (getItemCount() == 1) {
+ WordItem child = (WordItem) getItem(0);
+ child.setIndexName(getFieldName() + "." + child.getIndexName());
+ return Optional.of(child);
+ }
+ return Optional.empty();
+ }
@Override
public ItemType getItemType() {
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
index 1923fdbc50d..8297a566a72 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AbstractParser.java
@@ -94,10 +94,10 @@ public abstract class AbstractParser implements CustomParser {
}
/**
- * <p>Creates a new instance of this class, storing the given {@link ParserEnvironment} for parse-time access to the
- * environment.</p>
+ * Creates a new instance of this class, storing the given {@link ParserEnvironment} for parse-time access to the
+ * environment.
*
- * @param environment The environment settings to attach to the Parser.
+ * @param environment the environment settings to attach to the Parser
*/
protected AbstractParser(ParserEnvironment environment) {
this.environment = ParserEnvironment.fromParserEnvironment(environment);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
index 244d895f357..e3d1b280a5a 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AdvancedParser.java
@@ -11,7 +11,7 @@ import static com.yahoo.prelude.query.parser.Token.Kind.NUMBER;
* Parser for queries of type advanced.
*
* @author Steinar Knutsen
- * @deprecated since 5.11, YQL+ should be used for formal queries
+ * @deprecated YQL should be used for formal queries
*/
@Deprecated // DO NOT REMOVE (we'll keep this around longer)
public class AdvancedParser extends StructuredParser {
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
index 72ee4ae2c12..d9b969757c2 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
@@ -1,7 +1,15 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.prelude.query.parser;
-import com.yahoo.prelude.query.*;
+import com.yahoo.prelude.query.AndItem;
+import com.yahoo.prelude.query.IntItem;
+import com.yahoo.prelude.query.Item;
+import com.yahoo.prelude.query.NotItem;
+import com.yahoo.prelude.query.NullItem;
+import com.yahoo.prelude.query.OrItem;
+import com.yahoo.prelude.query.PhraseItem;
+import com.yahoo.prelude.query.QueryCanonicalizer;
+import com.yahoo.prelude.query.RankItem;
import com.yahoo.search.query.QueryTree;
import com.yahoo.search.query.parser.ParserEnvironment;
@@ -86,14 +94,14 @@ public class AllParser extends SimpleParser {
return and;
}
- protected OrItem addOr(Item item,OrItem or) {
+ protected OrItem addOr(Item item, OrItem or) {
if (or == null)
or = new OrItem();
or.addItem(item);
return or;
}
- protected NotItem addNot(Item item,NotItem not) {
+ protected NotItem addNot(Item item, NotItem not) {
if (not == null)
not = new NotItem();
not.addNegativeItem(item);
@@ -129,9 +137,9 @@ public class AllParser extends SimpleParser {
// Interpret -N as a positive item matching a negative number (by backtracking out of this)
// but not if there is an explicit index (such as -a:b)
// but interpret --N as a negative item matching a negative number
- if ( item instanceof IntItem &&
- ((IntItem)item).getIndexName().isEmpty() &&
- ! ((IntItem)item).getNumber().startsWith(("-")))
+ if (item instanceof IntItem &&
+ ((IntItem)item).getIndexName().isEmpty() &&
+ ! ((IntItem)item).getNumber().startsWith(("-")))
item = null;
return item;
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AnyParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AnyParser.java
index 9cb9f36b8a6..dd836e9c8e1 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AnyParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AnyParser.java
@@ -3,7 +3,15 @@ package com.yahoo.prelude.query.parser;
import com.yahoo.language.Language;
import com.yahoo.prelude.IndexFacts;
-import com.yahoo.prelude.query.*;
+import com.yahoo.prelude.query.AndItem;
+import com.yahoo.prelude.query.BlockItem;
+import com.yahoo.prelude.query.CompositeItem;
+import com.yahoo.prelude.query.Item;
+import com.yahoo.prelude.query.NotItem;
+import com.yahoo.prelude.query.OrItem;
+import com.yahoo.prelude.query.PhraseItem;
+import com.yahoo.prelude.query.RankItem;
+import com.yahoo.prelude.query.TermItem;
import com.yahoo.search.query.parser.ParserEnvironment;
import java.util.Collections;
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/CustomParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/CustomParser.java
index e57e06f6b12..91ccb4a5cca 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/CustomParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/CustomParser.java
@@ -12,7 +12,6 @@ import java.util.Set;
/**
* @author Simon Thoresen Hult
- * @since 5.1.4
*/
public interface CustomParser extends Parser {
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/textualrepresentation/TextualQueryRepresentation.java b/container-search/src/main/java/com/yahoo/prelude/query/textualrepresentation/TextualQueryRepresentation.java
index e299ccb5674..858a85aeaf4 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/textualrepresentation/TextualQueryRepresentation.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/textualrepresentation/TextualQueryRepresentation.java
@@ -4,7 +4,13 @@ package com.yahoo.prelude.query.textualrepresentation;
import com.yahoo.prelude.query.Item;
import java.lang.reflect.Array;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.IdentityHashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
import java.util.regex.Pattern;
/**
@@ -21,6 +27,7 @@ public class TextualQueryRepresentation {
/** Creates the textual representation for a single Item. */
private class ItemDiscloser implements Discloser {
+
private final Item item;
final Map<String, Object> properties = new TreeMap<>();
@@ -57,7 +64,7 @@ public class TextualQueryRepresentation {
StringBuilder builder = new StringBuilder();
builder.append(name);
- if (!properties.isEmpty() || itemReferences.get(item) != null) {
+ if ( ! properties.isEmpty() || itemReferences.get(item) != null) {
builder.append('[');
addPropertiesString(builder);
builder.append(']');
diff --git a/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java b/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
index 2d941681f2a..84c793a6df1 100644
--- a/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
+++ b/container-search/src/main/java/com/yahoo/prelude/querytransform/QueryRewrite.java
@@ -6,7 +6,6 @@ import com.yahoo.prelude.query.CompositeItem;
import com.yahoo.prelude.query.EquivItem;
import com.yahoo.prelude.query.Item;
import com.yahoo.prelude.query.NearItem;
-import com.yahoo.prelude.query.NonReducibleCompositeItem;
import com.yahoo.prelude.query.NotItem;
import com.yahoo.prelude.query.NullItem;
import com.yahoo.prelude.query.OrItem;
@@ -215,7 +214,7 @@ public class QueryRewrite {
parent.setItem(i, newChild);
}
}
- return ((numChildren == 1) && !(parent instanceof NonReducibleCompositeItem)) ? parent.getItem(0) : item;
+ return parent.extractSingleChild().orElse(item);
}
private static Item rewriteSddocname(Item item) {
diff --git a/container-search/src/main/java/com/yahoo/search/query/ranking/SoftTimeout.java b/container-search/src/main/java/com/yahoo/search/query/ranking/SoftTimeout.java
index 1bca3df4d77..0d47ef77ce5 100644
--- a/container-search/src/main/java/com/yahoo/search/query/ranking/SoftTimeout.java
+++ b/container-search/src/main/java/com/yahoo/search/query/ranking/SoftTimeout.java
@@ -45,7 +45,11 @@ public class SoftTimeout implements Cloneable {
this.enabled = enable;
}
- public Boolean getEnable() { return enabled; }
+ /** Returns whether softtimeout is enabled. Defauyt is true. */
+ public Boolean getEnable() {
+ if (enabled == null) return Boolean.TRUE;
+ return enabled;
+ }
/** Override the adaptive factor determined on the content nodes */
public void setFactor(double factor) {
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
index 2f241f9c7a3..54dfbfe1a85 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
@@ -39,6 +39,7 @@ import com.yahoo.search.result.Coverage;
import com.yahoo.search.result.DefaultErrorHit;
import com.yahoo.search.result.ErrorHit;
import com.yahoo.search.result.ErrorMessage;
+import com.yahoo.search.result.FeatureData;
import com.yahoo.search.result.Hit;
import com.yahoo.search.result.HitGroup;
import com.yahoo.search.result.NanNumber;
@@ -343,7 +344,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
return q != null && q.properties().getBoolean(DEBUG_RENDERING_KEY, false);
}
- private void renderTrace(Trace trace) throws IOException {
+ protected void renderTrace(Trace trace) throws IOException {
if (!trace.traceNode().children().iterator().hasNext()) return;
if (getResult().getQuery().getTraceLevel() == 0) return;
@@ -386,7 +387,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void renderHitGroupHead(HitGroup hitGroup) throws IOException {
+ protected void renderHitGroupHead(HitGroup hitGroup) throws IOException {
generator.writeStartObject();
renderHitContents(hitGroup);
@@ -400,7 +401,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
// the framework will invoke begin methods as needed from here
}
- private void renderErrors(Set<ErrorMessage> errors) throws IOException {
+ protected void renderErrors(Set<ErrorMessage> errors) throws IOException {
if (errors.isEmpty()) return;
generator.writeArrayFieldStart(ERRORS);
@@ -432,7 +433,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
- private void renderCoverage() throws IOException {
+ protected void renderCoverage() throws IOException {
Coverage c = getResult().getCoverage(false);
if (c == null) return;
@@ -454,7 +455,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeEndObject();
}
- private void renderHit(Hit hit) throws IOException {
+ protected void renderHit(Hit hit) throws IOException {
if (!shouldRender(hit)) return;
childrenArray();
@@ -463,11 +464,11 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeEndObject();
}
- private boolean shouldRender(Hit hit) {
+ protected boolean shouldRender(Hit hit) {
return ! (hit instanceof DefaultErrorHit);
}
- private void renderHitContents(Hit hit) throws IOException {
+ protected void renderHitContents(Hit hit) throws IOException {
String id = hit.getDisplayId();
if (id != null)
generator.writeStringField(ID, id);
@@ -491,7 +492,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
renderAllFields(hit);
}
- private void renderAllFields(Hit hit) throws IOException {
+ protected void renderAllFields(Hit hit) throws IOException {
fieldConsumer.startHitFields();
renderTotalHitCount(hit);
renderStandardFields(hit);
@@ -527,7 +528,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeStringField(LABEL, a.getLabel());
}
- private void renderContinuations(Map<String, Continuation> continuations) throws IOException {
+ protected void renderContinuations(Map<String, Continuation> continuations) throws IOException {
if (continuations.isEmpty()) return;
generator.writeObjectFieldStart(CONTINUATION);
@@ -537,7 +538,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeEndObject();
}
- private void renderGroupMetadata(GroupId id) throws IOException {
+ protected void renderGroupMetadata(GroupId id) throws IOException {
if (!(id instanceof ValueGroupId || id instanceof BucketGroupId)) return;
if (id instanceof ValueGroupId) {
@@ -564,7 +565,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id).getTo()) : id.getTo()).toString();
}
- private void renderTotalHitCount(Hit hit) throws IOException {
+ protected void renderTotalHitCount(Hit hit) throws IOException {
if ( ! (getRecursionLevel() == 1 && hit instanceof HitGroup)) return;
fieldConsumer.ensureFieldsField();
@@ -651,7 +652,11 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
private void setGenerator(JsonGenerator generator, boolean debugRendering) {
this.generator = generator;
- this.fieldConsumer = generator == null ? null : new FieldConsumer(generator, debugRendering);
+ this.fieldConsumer = generator == null ? null : createFieldConsumer(generator, debugRendering);
+ }
+
+ protected FieldConsumer createFieldConsumer(JsonGenerator generator, boolean debugRendering) {
+ return new FieldConsumer(generator, debugRendering);
}
/**
@@ -666,7 +671,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
* This instance is reused for all hits of a Result since we are in a single-threaded context
* and want to limit object creation.
*/
- private static class FieldConsumer implements Hit.RawUtf8Consumer {
+ public static class FieldConsumer implements Hit.RawUtf8Consumer {
private final JsonGenerator generator;
private final boolean debugRendering;
@@ -728,7 +733,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private boolean shouldRender(String name, Object value) {
+ protected boolean shouldRender(String name, Object value) {
if (debugRendering) return true;
if (name.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) return false;
if (value instanceof CharSequence && ((CharSequence) value).length() == 0) return false;
@@ -738,7 +743,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
return true;
}
- private boolean shouldRenderUtf8Value(String name, int length) {
+ protected boolean shouldRenderUtf8Value(String name, int length) {
if (debugRendering) return true;
if (name.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) return false;
if (length == 0) return false;
@@ -780,8 +785,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeRawValue(intermediate.toString());
}
- private void renderFieldContents(Object field) throws IOException {
- if (field instanceof Inspectable) {
+ protected void renderFieldContents(Object field) throws IOException {
+ if (field instanceof Inspectable && ! (field instanceof FeatureData)) {
renderInspector(((Inspectable)field).inspect());
} else {
renderFieldContentsDirect(field);
@@ -799,6 +804,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeTree((TreeNode) field);
} else if (field instanceof Tensor) {
renderTensor(Optional.of((Tensor)field));
+ } else if (field instanceof FeatureData) {
+ generator.writeRawValue(((FeatureData)field).toJson());
} else if (field instanceof Inspectable) {
renderInspectorDirect(((Inspectable)field).inspect());
} else if (field instanceof JsonProducer) {
@@ -811,8 +818,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
// the null below is the field which has already been written
((FieldValue) field).serialize(null, new JsonWriter(generator));
} else if (field instanceof JSONArray || field instanceof JSONObject) {
- // org.json returns null if the object would not result in
- // syntactically correct JSON
+ // org.json returns null if the object would not result in syntactically correct JSON
String s = field.toString();
if (s == null) {
generator.writeNull();
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
index c9b890e64f5..a5b51e60861 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
@@ -40,7 +40,7 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R
public final ListenableFuture<Boolean> render(OutputStream stream, Result response, Execution execution, Request request) {
Writer writer = null;
try {
- writer = createWriter(stream,response);
+ writer = createWriter(stream, response);
render(writer, response);
}
catch (IOException e) {
@@ -50,7 +50,7 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R
if (writer !=null)
try { writer.close(); } catch (IOException e2) {};
}
- SettableFuture<Boolean> completed=SettableFuture.create();
+ SettableFuture<Boolean> completed = SettableFuture.create();
completed.set(true);
return completed;
}
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/SyncDefaultRenderer.java b/container-search/src/main/java/com/yahoo/search/rendering/SyncDefaultRenderer.java
index 9a716e4b18b..9d0e110a6dd 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/SyncDefaultRenderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/SyncDefaultRenderer.java
@@ -12,7 +12,14 @@ import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.grouping.result.HitRenderer;
import com.yahoo.search.query.context.QueryContext;
-import com.yahoo.search.result.*;
+import com.yahoo.search.result.Coverage;
+import com.yahoo.search.result.DefaultErrorHit;
+import com.yahoo.search.result.ErrorHit;
+import com.yahoo.search.result.ErrorMessage;
+import com.yahoo.search.result.Hit;
+import com.yahoo.search.result.HitGroup;
+import com.yahoo.search.result.Relevance;
+import com.yahoo.search.result.StructuredData;
import com.yahoo.text.Utf8String;
import com.yahoo.text.XML;
import com.yahoo.text.XMLWriter;
diff --git a/container-search/src/main/java/com/yahoo/search/result/FeatureData.java b/container-search/src/main/java/com/yahoo/search/result/FeatureData.java
index 7e5d6b12f30..1fd8f6e7e17 100644
--- a/container-search/src/main/java/com/yahoo/search/result/FeatureData.java
+++ b/container-search/src/main/java/com/yahoo/search/result/FeatureData.java
@@ -18,7 +18,7 @@ import java.util.Set;
/**
* A wrapper for structured data representing feature values: A map of floats and tensors.
- * This class is not thread safe even when it is only consumed.
+ * This class is immutable but not thread safe.
*/
public class FeatureData implements Inspectable, JsonProducer {
@@ -26,6 +26,8 @@ public class FeatureData implements Inspectable, JsonProducer {
private Set<String> featureNames = null;
+ private String jsonForm = null;
+
public FeatureData(Inspector value) {
this.value = value;
}
@@ -39,14 +41,11 @@ public class FeatureData implements Inspectable, JsonProducer {
public Inspector inspect() { return value; }
@Override
- public String toString() {
- if (value.type() == Type.EMPTY) return "";
- return toJson();
- }
-
- @Override
public String toJson() {
- return writeJson(new StringBuilder()).toString();
+ if (jsonForm != null) return jsonForm;
+
+ jsonForm = writeJson(new StringBuilder()).toString();
+ return jsonForm;
}
@Override
@@ -95,6 +94,22 @@ public class FeatureData implements Inspectable, JsonProducer {
return featureNames;
}
+ @Override
+ public String toString() {
+ if (value.type() == Type.EMPTY) return "";
+ return toJson();
+ }
+
+ @Override
+ public int hashCode() { return toJson().hashCode(); }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this) return true;
+ if ( ! (other instanceof FeatureData)) return false;
+ return ((FeatureData)other).toJson().equals(this.toJson());
+ }
+
/** A JSON encoder which encodes DATA as a tensor */
private static class Encoder extends JsonRender.StringEncoder {
diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/SlimeSummaryTestCase.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/SlimeSummaryTestCase.java
index ad4d0cf82e5..e6cc3ac9e54 100644
--- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/SlimeSummaryTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/SlimeSummaryTestCase.java
@@ -3,9 +3,11 @@ package com.yahoo.prelude.fastsearch;
import com.google.common.collect.ImmutableSet;
import com.yahoo.config.subscription.ConfigGetter;
+import com.yahoo.data.access.slime.SlimeAdapter;
import com.yahoo.prelude.hitfield.RawData;
import com.yahoo.prelude.hitfield.XMLString;
import com.yahoo.prelude.hitfield.JSONString;
+import com.yahoo.search.result.FeatureData;
import com.yahoo.search.result.Hit;
import com.yahoo.search.result.NanNumber;
import com.yahoo.search.result.StructuredData;
@@ -17,9 +19,11 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
import com.yahoo.slime.BinaryFormat;
import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.serialization.TypedBinaryFormat;
@@ -62,6 +66,7 @@ public class SlimeSummaryTestCase {
assertNull(hit.getField("jsonstring_field"));
assertNull(hit.getField("tensor_field1"));
assertNull(hit.getField("tensor_field2"));
+ assertNull(hit.getField("summaryfeatures"));
}
@Test
@@ -75,7 +80,7 @@ public class SlimeSummaryTestCase {
@Test
public void testDecoding() {
Tensor tensor1 = Tensor.from("tensor(x{},y{}):{{x:foo,y:bar}:0.1}");
- Tensor tensor2 = Tensor.from("tensor(x[],y[1]):{{x:0,y:0}:-0.3}");
+ Tensor tensor2 = Tensor.from("tensor(x[1],y[1]):{{x:0,y:0}:-0.3}");
DocsumDefinitionSet docsum = createDocsumDefinitionSet(summary_cf);
FastHit hit = new FastHit();
assertNull(docsum.lazyDecode("default", fullSummary(tensor1, tensor2), hit));
@@ -111,6 +116,12 @@ public class SlimeSummaryTestCase {
}
assertEquals(tensor1, hit.getField("tensor_field1"));
assertEquals(tensor2, hit.getField("tensor_field2"));
+ FeatureData featureData = (FeatureData)hit.getField("summaryfeatures");
+ assertEquals("double_feature,tensor1_feature,tensor2_feature",
+ featureData.featureNames().stream().sorted().collect(Collectors.joining(",")));
+ assertEquals(0.5, featureData.getDouble("double_feature"), 0.00000001);
+ assertEquals(tensor1, featureData.getTensor("tensor1_feature"));
+ assertEquals(tensor2, featureData.getTensor("tensor2_feature"));
}
@Test
@@ -238,7 +249,9 @@ public class SlimeSummaryTestCase {
assertFields(expected, hit);
// --- Add full summary
- assertNull(fullDocsum.lazyDecode("default", fullishSummary(), hit));
+ Tensor tensor1 = Tensor.from("tensor(x{},y{}):{{x:foo,y:bar}:0.1}");
+ Tensor tensor2 = Tensor.from("tensor(x[1],y[1]):{{x:0,y:0}:-0.3}");
+ assertNull(fullDocsum.lazyDecode("default", fullishSummary(tensor1, tensor2), hit));
expected.put("integer_field", 4);
expected.put("short_field", (short)2);
expected.put("byte_field", (byte)1);
@@ -247,7 +260,15 @@ public class SlimeSummaryTestCase {
expected.put("int64_field", 8L);
expected.put("string_field", "string_value");
expected.put("longstring_field", "longstring_value");
- assertFields(expected, hit);
+ expected.put("tensor_field1", tensor1);
+ expected.put("tensor_field2", tensor2);
+
+ Slime slime = new Slime();
+ Cursor summaryFeatures = slime.setObject();
+ summaryFeatures.setDouble("double_feature", 0.5);
+ summaryFeatures.setData("tensor1_feature", TypedBinaryFormat.encode(tensor1));
+ summaryFeatures.setData("tensor2_feature", TypedBinaryFormat.encode(tensor2));
+ expected.put("summaryfeatures", new FeatureData(new SlimeAdapter(slime.get())));
hit.removeField("string_field");
hit.removeField("integer_field");
@@ -272,7 +293,7 @@ public class SlimeSummaryTestCase {
fail("Multiple callbacks for " + name);
traversed.put(name, value);
});
- assertEquals(expected, traversed);
+ assertEqualMaps(expected, traversed);
// raw utf8 field traverser
Map<String, Object> traversedUtf8 = new HashMap<>();
hit.forEachFieldAsRaw(new Utf8FieldTraverser(traversedUtf8));
@@ -288,7 +309,7 @@ public class SlimeSummaryTestCase {
// fieldKeys
assertEquals(expected.keySet(), hit.fieldKeys());
// fields
- assertEquals(expected, hit.fields());
+ assertEqualMaps(expected, hit.fields());
// fieldIterator
int fieldIteratorFieldCount = 0;
for (Iterator<Map.Entry<String, Object>> i = hit.fieldIterator(); i.hasNext(); ) {
@@ -302,6 +323,15 @@ public class SlimeSummaryTestCase {
assertEquals(field.getValue(), hit.getField(field.getKey()));
}
+ private void assertEqualMaps(Map<String, Object> expected, Map<String, Object> actual) {
+ assertEquals("Map sizes", expected.size(), actual.size());
+ assertEquals("Keys", expected.keySet(), actual.keySet());
+ for (var expectedEntry : expected.entrySet()) {
+ assertEquals("Key '" + expectedEntry.getKey() + "'",
+ expectedEntry.getValue(), actual.get(expectedEntry.getKey()));
+ }
+ }
+
private byte[] emptySummary() {
Slime slime = new Slime();
slime.setObject();
@@ -339,7 +369,7 @@ public class SlimeSummaryTestCase {
return encode((slime));
}
- private byte[] fullishSummary() {
+ private byte[] fullishSummary(Tensor tensor1, Tensor tensor2) {
Slime slime = new Slime();
Cursor docsum = slime.setObject();
docsum.setLong("integer_field", 4);
@@ -352,6 +382,7 @@ public class SlimeSummaryTestCase {
//docsum.setData("data_field", "data_value".getBytes(StandardCharsets.UTF_8));
docsum.setString("longstring_field", "longstring_value");
//docsum.setData("longdata_field", "longdata_value".getBytes(StandardCharsets.UTF_8));
+ addTensors(tensor1, tensor2, docsum);
return encode((slime));
}
@@ -374,11 +405,23 @@ public class SlimeSummaryTestCase {
field.setLong("foo", 1);
field.setLong("bar", 2);
}
+
+ addTensors(tensor1, tensor2, docsum);
+ return encode((slime));
+ }
+
+ private void addTensors(Tensor tensor1, Tensor tensor2, Cursor docsum) {
if (tensor1 != null)
docsum.setData("tensor_field1", TypedBinaryFormat.encode(tensor1));
if (tensor2 != null)
docsum.setData("tensor_field2", TypedBinaryFormat.encode(tensor2));
- return encode((slime));
+
+ if (tensor1 !=null && tensor2 != null) {
+ Cursor summaryFeatures = docsum.setObject("summaryfeatures");
+ summaryFeatures.setDouble("double_feature", 0.5);
+ summaryFeatures.setData("tensor1_feature", TypedBinaryFormat.encode(tensor1));
+ summaryFeatures.setData("tensor2_feature", TypedBinaryFormat.encode(tensor2));
+ }
}
private byte[] encode(Slime slime) {
diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/summary.cfg b/container-search/src/test/java/com/yahoo/prelude/fastsearch/summary.cfg
index e46904b17d0..e074eadcbc2 100644
--- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/summary.cfg
+++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/summary.cfg
@@ -3,7 +3,7 @@ documentdb[0].name test
documentdb[0].summaryclass[1]
documentdb[0].summaryclass[0].name default
documentdb[0].summaryclass[0].id 0
-documentdb[0].summaryclass[0].fields[14]
+documentdb[0].summaryclass[0].fields[15]
documentdb[0].summaryclass[0].fields[0].name integer_field
documentdb[0].summaryclass[0].fields[0].type integer
documentdb[0].summaryclass[0].fields[1].name short_field
@@ -32,3 +32,5 @@ documentdb[0].summaryclass[0].fields[12].name tensor_field1
documentdb[0].summaryclass[0].fields[12].type tensor
documentdb[0].summaryclass[0].fields[13].name tensor_field2
documentdb[0].summaryclass[0].fields[13].type tensor
+documentdb[0].summaryclass[0].fields[14].name summaryfeatures
+documentdb[0].summaryclass[0].fields[14].type featuredata
diff --git a/container-search/src/test/java/com/yahoo/search/query/SoftTimeoutTestCase.java b/container-search/src/test/java/com/yahoo/search/query/SoftTimeoutTestCase.java
index 6754494ba4e..dff6d4c26c3 100644
--- a/container-search/src/test/java/com/yahoo/search/query/SoftTimeoutTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/SoftTimeoutTestCase.java
@@ -13,7 +13,7 @@ public class SoftTimeoutTestCase {
@Test
public void testDefaultsInQuery() {
Query query=new Query("?query=test");
- assertNull(query.getRanking().getSoftTimeout().getEnable());
+ assertTrue(query.getRanking().getSoftTimeout().getEnable());
assertNull(query.getRanking().getSoftTimeout().getFactor());
assertNull(query.getRanking().getSoftTimeout().getTailcost());
}
@@ -21,7 +21,7 @@ public class SoftTimeoutTestCase {
@Test
public void testQueryOverride() {
Query query=new Query("?query=test&ranking.softtimeout.factor=0.7&ranking.softtimeout.tailcost=0.3");
- assertNull(query.getRanking().getSoftTimeout().getEnable());
+ assertTrue(query.getRanking().getSoftTimeout().getEnable());
assertEquals(Double.valueOf(0.7), query.getRanking().getSoftTimeout().getFactor());
assertEquals(Double.valueOf(0.3), query.getRanking().getSoftTimeout().getTailcost());
query.prepare();
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
index 9fb2e627e9c..a245d61bafb 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
@@ -36,6 +36,7 @@ import com.yahoo.search.grouping.result.RootGroup;
import com.yahoo.search.grouping.result.StringId;
import com.yahoo.search.result.Coverage;
import com.yahoo.search.result.ErrorMessage;
+import com.yahoo.search.result.FeatureData;
import com.yahoo.search.result.Hit;
import com.yahoo.search.result.HitGroup;
import com.yahoo.search.result.NanNumber;
@@ -51,6 +52,7 @@ import com.yahoo.slime.Cursor;
import com.yahoo.slime.Slime;
import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
+import com.yahoo.tensor.serialization.TypedBinaryFormat;
import com.yahoo.text.Utf8;
import com.yahoo.yolean.trace.TraceNode;
import org.json.JSONArray;
@@ -123,7 +125,7 @@ public class JsonRendererTestCase {
}
@Test
- public void testDataTypes() throws IOException, InterruptedException, ExecutionException, JSONException {
+ public void testDataTypes() throws IOException, InterruptedException, ExecutionException {
String expected = "{"
+ " \"root\": {"
+ " \"children\": ["
@@ -139,7 +141,13 @@ public class JsonRendererTestCase {
+ " \"predicate\": \"a in [b]\","
+ " \"tensor1\": { \"cells\": [ { \"address\": {\"x\": \"a\"}, \"value\":2.0 } ] },"
+ " \"tensor2\": { \"cells\": [] },"
- + " \"tensor3\": { \"cells\": [ { \"address\": {\"x\": \"a\", \"y\": \"0\"}, \"value\":2.0 }, { \"address\": {\"x\": \"a\", \"y\": \"1\"}, \"value\":-1.0 } ] }"
+ + " \"tensor3\": { \"cells\": [ { \"address\": {\"x\": \"a\", \"y\": \"0\"}, \"value\":2.0 }, { \"address\": {\"x\": \"a\", \"y\": \"1\"}, \"value\":-1.0 } ] },"
+ + " \"summaryfeatures\": {"
+ + " \"scalar1\":1.5,"
+ + " \"scalar2\":2.5,"
+ + " \"tensor1\":{\"type\":\"tensor(x[3])\",\"cells\":[{\"address\":{\"x\":\"0\"},\"value\":1.5},{\"address\":{\"x\":\"1\"},\"value\":2.0},{\"address\":{\"x\":\"2\"},\"value\":2.5}]},"
+ + " \"tensor2\":{\"type\":\"tensor()\",\"cells\":[{\"address\":{},\"value\":0.5}]}"
+ + " }"
+ " },"
+ " \"id\": \"datatypestuff\","
+ " \"relevance\": 1.0"
@@ -166,12 +174,24 @@ public class JsonRendererTestCase {
h.setField("tensor2", new TensorFieldValue(TensorType.empty));
h.setField("tensor3", Tensor.from("{ {x:a, y:0}: 2.0, {x:a, y:1}: -1 }"));
h.setField("object", new Thingie());
+ h.setField("summaryfeatures", createSummaryFeatures());
r.hits().add(h);
r.setTotalHitCount(1L);
String summary = render(r);
assertEqualJson(expected, summary);
}
+ private FeatureData createSummaryFeatures() {
+ Slime slime = new Slime();
+ Cursor features = slime.setObject();
+ features.setDouble("scalar1", 1.5);
+ features.setDouble("scalar2", 2.5);
+ Tensor tensor1 = Tensor.from("tensor(x[3]):[1.5, 2, 2.5]");
+ features.setData("tensor1", TypedBinaryFormat.encode(tensor1));
+ Tensor tensor2 = Tensor.from(0.5);
+ features.setData("tensor2", TypedBinaryFormat.encode(tensor2));
+ return new FeatureData(new SlimeAdapter(slime.get()));
+ }
@Test
public void testTracing() throws IOException, InterruptedException, ExecutionException {
@@ -679,12 +699,10 @@ public class JsonRendererTestCase {
+ "}";
Result r = newEmptyResult();
Hit h = new Hit("moredatatypestuff");
- h.setField("byte", Byte.valueOf((byte) 8));
- h.setField("short", Short.valueOf((short) 16));
- h.setField("bigInteger", new BigInteger(
- "340282366920938463463374607431768211455"));
- h.setField("bigDecimal", new BigDecimal(
- "340282366920938463463374607431768211456.5"));
+ h.setField("byte", (byte)8);
+ h.setField("short", (short)16);
+ h.setField("bigInteger", new BigInteger("340282366920938463463374607431768211455"));
+ h.setField("bigDecimal", new BigDecimal("340282366920938463463374607431768211456.5"));
h.setField("nanNumber", NanNumber.NaN);
r.hits().add(h);
r.setTotalHitCount(1L);
diff --git a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
index 70d50b23bed..00a17f963c6 100644
--- a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java
@@ -303,7 +303,7 @@ public class YqlParserTestCase {
assertCanonicalParse("select foo from bar where baz contains sameElement(key contains \"a\", value.f2 = 10);",
"baz:{key:a value.f2:10}");
assertCanonicalParse("select foo from bar where baz contains sameElement(key contains \"a\");",
- "baz:{key:a}");
+ "baz.key:a");
}
@Test
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificate.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificate.java
index e4d0c8246d9..dbcb44d1711 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificate.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificate.java
@@ -1,29 +1,36 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.certificates;
-import java.security.cert.X509Certificate;
-import java.util.List;
+import java.util.Objects;
/**
- * Represents a certificate chain and a reference to the private key used for generating the certificate
+ * Represents a reference to a certificate and private key.
*
* @author mortent
* @author andreer
*/
public class ApplicationCertificate {
- private final List<X509Certificate> certificateChain;
- private final KeyId keyId;
- public ApplicationCertificate(List<X509Certificate> certificateChain, KeyId keyId) {
- this.certificateChain = certificateChain;
- this.keyId = keyId;
+ private final String secretsKeyNamePrefix;
+
+ public ApplicationCertificate(String secretsKeyNamePrefix) {
+ this.secretsKeyNamePrefix = secretsKeyNamePrefix;
+ }
+
+ public String secretsKeyNamePrefix() {
+ return secretsKeyNamePrefix;
}
- public List<X509Certificate> certificateChain() {
- return certificateChain;
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ApplicationCertificate that = (ApplicationCertificate) o;
+ return Objects.equals(secretsKeyNamePrefix, that.secretsKeyNamePrefix);
}
- public KeyId keyId() {
- return keyId;
+ @Override
+ public int hashCode() {
+ return Objects.hash(secretsKeyNamePrefix);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificateProvider.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificateProvider.java
new file mode 100644
index 00000000000..fa489a6b754
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/ApplicationCertificateProvider.java
@@ -0,0 +1,12 @@
+package com.yahoo.vespa.hosted.controller.api.integration.certificates;
+
+import com.yahoo.config.provision.ApplicationId;
+
+/**
+ * Generates a certificate.
+ *
+ * @author andreer
+ */
+public interface ApplicationCertificateProvider {
+ ApplicationCertificate requestCaSignedCertificate(ApplicationId applicationId);
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/CertificateProvider.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/CertificateProvider.java
deleted file mode 100644
index d2462eb574f..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/CertificateProvider.java
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.yahoo.vespa.hosted.controller.api.integration.certificates;
-
-import java.security.KeyPair;
-import java.security.cert.X509Certificate;
-import java.util.List;
-
-/**
- * Generates a certificate.
- *
- * @author andreer
- */
-public interface CertificateProvider {
- List<X509Certificate> requestCaSignedCertificate(KeyPair keyPair, List<String> domains);
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyId.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyId.java
deleted file mode 100644
index 3ab22d4a5b7..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyId.java
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.certificates;
-
-/**
- * Identifier for a key pair. Used for persisting/retrieving a key pair.
- *
- * @author mortent
- * @author andreer
- */
-public class KeyId {
- private final String name;
- private final int version;
-
- public KeyId(String name, int version) {
- this.name = name;
- this.version = version;
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyPairProvider.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyPairProvider.java
deleted file mode 100644
index a872bf63343..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/KeyPairProvider.java
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.certificates;
-
-import com.yahoo.config.provision.ApplicationId;
-
-/**
- * Provides a key pair. Generates and persists the key pair if not found.
- *
- * @author mortent
- * @author andreer
- */
-public interface KeyPairProvider {
- VersionedKeyPair getKeyPair(ApplicationId applicationId);
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/VersionedKeyPair.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/VersionedKeyPair.java
deleted file mode 100644
index c95303b9497..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/VersionedKeyPair.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.certificates;
-
-import java.security.KeyPair;
-
-/**
- * Represents a key pair and an unique persistence identifier
- *
- * @author mortent
- * @author andreer
- */
-public class VersionedKeyPair {
- private final KeyId keyId;
- private final KeyPair keyPair;
-
- public VersionedKeyPair(KeyId keyId, KeyPair keyPair) {
- this.keyId = keyId;
- this.keyPair = keyPair;
- }
-
- public KeyId keyId() {
- return keyId;
- }
-
- public KeyPair keyPair() {
- return keyPair;
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/package-info.java
new file mode 100644
index 00000000000..0ba13524d33
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/certificates/package-info.java
@@ -0,0 +1,5 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.controller.api.integration.certificates;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
index 20469e6449a..ba00203ec34 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.Hostname;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.serviceview.bindings.ApplicationView;
import java.io.IOException;
@@ -29,7 +30,7 @@ public interface ConfigServer {
}
PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationNames,
- List<ContainerEndpoint> containerEndpoints, byte[] content);
+ List<ContainerEndpoint> containerEndpoints, ApplicationCertificate applicationCertificate, byte[] content);
void restart(DeploymentId deployment, Optional<Hostname> hostname);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
index a2d199a38a8..20599e92aa9 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerException.java
@@ -37,7 +37,8 @@ public class ConfigServerException extends RuntimeException {
OUT_OF_CAPACITY,
REQUEST_TIMEOUT,
UNKNOWN_VESPA_VERSION,
- PARENT_HOST_NOT_READY
+ PARENT_HOST_NOT_READY,
+ CERTIFICATE_NOT_READY
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/ArtifactId.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/ArtifactId.java
new file mode 100644
index 00000000000..21f38084c22
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/ArtifactId.java
@@ -0,0 +1,26 @@
+package com.yahoo.vespa.hosted.controller.api.integration.maven;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Identifier for an artifact.
+ *
+ * @author jonmv
+ */
+public class ArtifactId {
+
+ private final String groupId;
+ private final String artifactId;
+
+ public ArtifactId(String groupId, String artifactId) {
+ this.groupId = requireNonNull(groupId);
+ this.artifactId = requireNonNull(artifactId);
+ }
+
+ /** Group ID of this. */
+ public String groupId() { return groupId; }
+
+ /** Artifact ID of this. */
+ public String artifactId() { return artifactId; }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MavenRepository.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MavenRepository.java
new file mode 100644
index 00000000000..fb133f75654
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MavenRepository.java
@@ -0,0 +1,16 @@
+package com.yahoo.vespa.hosted.controller.api.integration.maven;
+
+/**
+ * A Maven repository which keeps released artifacts.
+ *
+ * @author jonmv
+ */
+public interface MavenRepository {
+
+ /** Returns metadata about all releases of a specific artifact to this repository. */
+ Metadata metadata();
+
+ /** Returns the id of the artifact whose releases this tracks. */
+ ArtifactId artifactId();
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/Metadata.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/Metadata.java
new file mode 100644
index 00000000000..fd84a05db6a
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/Metadata.java
@@ -0,0 +1,50 @@
+package com.yahoo.vespa.hosted.controller.api.integration.maven;
+
+import com.yahoo.component.Version;
+import com.yahoo.text.XML;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Metadata about a released artifact.
+ *
+ * @author jonmv
+ */
+public class Metadata {
+
+ private final ArtifactId id;
+ private final List<Version> versions;
+
+ public Metadata(ArtifactId id, List<Version> versions) {
+ this.id = requireNonNull(id);
+ this.versions = versions.stream().sorted().collect(Collectors.toUnmodifiableList());
+ }
+
+ /** Creates a new Metadata object from the given XML document. */
+ public static Metadata fromXml(String xml) {
+ Element metadata = XML.getDocument(xml).getDocumentElement();
+ ArtifactId id = new ArtifactId(XML.getValue(XML.getChild(metadata, "groupId")),
+ XML.getValue(XML.getChild(metadata, "artifactId")));
+ List<Version> versions = new ArrayList<>();
+ for (Element version : XML.getChildren(XML.getChild(XML.getChild(metadata, "versioning"), "versions")))
+ versions.add(Version.fromString(XML.getValue(version)));
+
+ return new Metadata(id, versions);
+ }
+
+ /** Id of the metadata this concerns. */
+ public ArtifactId id() { return id; }
+
+ /** List of available versions of this, sorted by ascending version order. */
+ public List<Version> versions() { return versions; }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/package-info.java
new file mode 100644
index 00000000000..d5abdf31f4b
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/maven/package-info.java
@@ -0,0 +1,5 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.controller.api.integration.maven;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMavenRepository.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMavenRepository.java
new file mode 100644
index 00000000000..be1deb3997a
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/MockMavenRepository.java
@@ -0,0 +1,31 @@
+package com.yahoo.vespa.hosted.controller.api.integration.stubs;
+
+import com.yahoo.component.Version;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.ArtifactId;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.Metadata;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.MavenRepository;
+
+import java.util.List;
+
+/**
+ * Mock repository for maven artifacts, that returns a static metadata.
+ *
+ * @author jonmv
+ */
+public class MockMavenRepository implements MavenRepository {
+
+ public static final ArtifactId id = new ArtifactId("ai.vespa", "search");
+
+ @Override
+ public Metadata metadata() {
+ return new Metadata(id, List.of(Version.fromString("6.0"),
+ Version.fromString("6.1"),
+ Version.fromString("6.2")));
+ }
+
+ @Override
+ public ArtifactId artifactId() {
+ return id;
+ }
+
+}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MetadataTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MetadataTest.java
new file mode 100644
index 00000000000..17d0694538c
--- /dev/null
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/maven/MetadataTest.java
@@ -0,0 +1,56 @@
+package com.yahoo.vespa.hosted.controller.api.integration.maven;
+
+import com.yahoo.component.Version;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.net.URI;
+import java.nio.file.Path;
+
+import static org.junit.Assert.assertEquals;
+
+public class MetadataTest {
+
+ @Test
+ public void testParsing() {
+ Metadata metadata = Metadata.fromXml(metadataXml);
+ assertEquals("com.yahoo.vespa", metadata.id().groupId());
+ assertEquals("tenant-base", metadata.id().artifactId());
+ assertEquals(Version.fromString("6.297.80"), metadata.versions().get(0));
+ assertEquals(Version.fromString("7.61.10"), metadata.versions().get(metadata.versions().size() - 1));
+ }
+
+ private static final String metadataXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
+ "<metadata>\n" +
+ " <groupId>com.yahoo.vespa</groupId>\n" +
+ " <artifactId>tenant-base</artifactId>\n" +
+ " <versioning>\n" +
+ " <latest>7.61.10</latest>\n" +
+ " <release>7.61.10</release>\n" +
+ " <versions>\n" +
+ " <version>6.297.80</version>\n" +
+ " <version>6.300.15</version>\n" +
+ " <version>6.301.8</version>\n" +
+ " <version>6.303.29</version>\n" +
+ " <version>6.304.14</version>\n" +
+ " <version>6.305.35</version>\n" +
+ " <version>6.328.65</version>\n" +
+ " <version>6.329.64</version>\n" +
+ " <version>6.330.51</version>\n" +
+ " <version>7.3.19</version>\n" +
+ " <version>7.18.17</version>\n" +
+ " <version>7.20.129</version>\n" +
+ " <version>7.21.18</version>\n" +
+ " <version>7.22.18</version>\n" +
+ " <version>7.38.38</version>\n" +
+ " <version>7.39.5</version>\n" +
+ " <version>7.40.41</version>\n" +
+ " <version>7.41.15</version>\n" +
+ " <version>7.57.40</version>\n" +
+ " <version>7.60.51</version>\n" +
+ " <version>7.61.10</version>\n" +
+ " </versions>\n" +
+ " <lastUpdated>20190619054245</lastUpdated>\n" +
+ " </versioning>\n" +
+ "</metadata>\n";
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
index b4e3b8b1a9a..9ca73d27120 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -11,17 +11,17 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.ApplicationActivity;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
-import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.RotationStatus;
-import com.yahoo.vespa.hosted.controller.rotation.Rotation;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import java.time.Instant;
@@ -58,9 +58,9 @@ public class Application {
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Optional<String> pemDeployKey;
- private final Optional<RotationId> legacyRotation;
- private final List<RotationId> rotations;
+ private final List<AssignedRotation> rotations;
private final Map<HostName, RotationStatus> rotationStatus;
+ private final Optional<ApplicationCertificate> applicationCertificate;
/** Creates an empty application */
public Application(ApplicationId id, Instant now) {
@@ -68,7 +68,7 @@ public class Application {
new DeploymentJobs(OptionalLong.empty(), Collections.emptyList(), Optional.empty(), false),
Change.empty(), Change.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(),
new ApplicationMetrics(0, 0),
- Optional.empty(), Optional.empty(), Collections.emptyList(), Collections.emptyMap());
+ Optional.empty(), Collections.emptyList(), Collections.emptyMap(), Optional.empty());
}
/** Used from persistence layer: Do not use */
@@ -76,18 +76,19 @@ public class Application {
List<Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner,
OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey,
- Optional<RotationId> legacyRotation, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) {
+ List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus,
+ Optional<ApplicationCertificate> applicationCertificate) {
this(id, createdAt, deploymentSpec, validationOverrides,
deployments.stream().collect(Collectors.toMap(Deployment::zone, Function.identity())),
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner,
OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey,
- Optional<RotationId> legacyRotation, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) {
+ List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus, Optional<ApplicationCertificate> applicationCertificate) {
this.id = Objects.requireNonNull(id, "id cannot be null");
this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null");
this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
@@ -101,9 +102,9 @@ public class Application {
this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null");
this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
this.pemDeployKey = pemDeployKey;
- this.legacyRotation = Objects.requireNonNull(legacyRotation, "legacyRotation cannot be null");
this.rotations = List.copyOf(Objects.requireNonNull(rotations, "rotations cannot be null"));
this.rotationStatus = ImmutableMap.copyOf(Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null"));
+ this.applicationCertificate = Objects.requireNonNull(applicationCertificate, "applicationCertificate cannot be null");
}
public ApplicationId id() { return id; }
@@ -200,11 +201,20 @@ public class Application {
/** Returns the global rotation id of this, if present */
public Optional<RotationId> legacyRotation() {
- return legacyRotation;
+ return rotations.stream()
+ .map(AssignedRotation::rotationId)
+ .findFirst();
}
/** Returns all rotations for this application */
public List<RotationId> rotations() {
+ return rotations.stream()
+ .map(AssignedRotation::rotationId)
+ .collect(Collectors.toList());
+ }
+
+ /** Returns all assigned rotations for this application */
+ public List<AssignedRotation> assignedRotations() {
return rotations;
}
@@ -235,6 +245,10 @@ public class Application {
.orElse(RotationStatus.unknown);
}
+ public Optional<ApplicationCertificate> applicationCertificate() {
+ return applicationCertificate;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 7b03664e0ab..197dda8c409 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -26,6 +26,8 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.Hostname;
import com.yahoo.vespa.hosted.controller.api.identifiers.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificateProvider;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
@@ -43,9 +45,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.JobList;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
@@ -123,6 +127,8 @@ public class ApplicationController {
private final Clock clock;
private final BooleanFlag redirectLegacyDnsFlag;
private final DeploymentTrigger deploymentTrigger;
+ private final BooleanFlag provisionApplicationCertificate;
+ private final ApplicationCertificateProvider applicationCertificateProvider;
ApplicationController(Controller controller, CuratorDb curator,
AccessControl accessControl, RotationsConfig rotationsConfig,
@@ -143,6 +149,9 @@ public class ApplicationController {
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, buildService, clock);
+ this.provisionApplicationCertificate = Flags.PROVISION_APPLICATION_CERTIFICATE.bindTo(controller.flagSource());
+ this.applicationCertificateProvider = controller.applicationCertificateProvider();
+
// Update serialization format of all applications
Once.after(Duration.ofMinutes(1), () -> {
Instant start = clock.instant();
@@ -285,6 +294,7 @@ public class ApplicationController {
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
+ ApplicationCertificate applicationCertificate;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
@@ -330,6 +340,10 @@ public class ApplicationController {
// Include rotation ID to ensure that deployment can respond to health checks with rotation ID as Host header
app.rotations().stream().map(RotationId::asString).forEach(rotationNames::add);
+ // Get application certificate (provisions a new certificate if missing)
+ application = withApplicationCertificate(application);
+ applicationCertificate = application.get().applicationCertificate().orElse(null);
+
// Update application with information from application package
if ( ! preferOldestVersion
&& ! application.get().deploymentJobs().deployedInternally()
@@ -340,7 +354,7 @@ public class ApplicationController {
// Carry out deployment without holding the application lock.
options = withVersion(platformVersion, options);
- ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames);
+ ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, applicationCertificate);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
@@ -407,7 +421,7 @@ public class ApplicationController {
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
- return deploy(application.id(), applicationPackage, zone, options, Set.of());
+ return deploy(application.id(), applicationPackage, zone, options, Set.of(), /* No application cert */ null);
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
@@ -415,22 +429,23 @@ public class ApplicationController {
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
- return deploy(tester.id(), applicationPackage, zone, options, Set.of());
+ return deploy(tester.id(), applicationPackage, zone, options, Set.of(), /* No application cert for tester*/ null);
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
- Set<String> rotationNames) {
+ Set<String> rotationNames, ApplicationCertificate applicationCertificate) {
DeploymentId deploymentId = new DeploymentId(application, zone);
- ConfigServer.PreparedApplication preparedApplication =
- configServer.deploy(deploymentId, deployOptions, rotationNames, List.of(), applicationPackage.zippedContent());
-
- // Refresh routing policies on successful deployment. At this point we can safely assume that the config server
- // has allocated load balancers for the deployment.
- routingPolicies.refresh(application, zone);
-
- return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
- applicationPackage.zippedContent().length);
+ try {
+ ConfigServer.PreparedApplication preparedApplication =
+ configServer.deploy(deploymentId, deployOptions, rotationNames, List.of(), applicationCertificate, applicationPackage.zippedContent());
+ return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
+ applicationPackage.zippedContent().length);
+ } finally {
+ // Even if prepare fails, a load balancer may have been provisioned. Always refresh routing policies so that
+ // any DNS updates can be propagated as early as possible.
+ routingPolicies.refresh(application, zone);
+ }
}
/** Makes sure the application has a global rotation, if eligible. */
@@ -438,7 +453,7 @@ public class ApplicationController {
if (zone.environment() == Environment.prod && application.get().deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application.get(), rotationLock);
- application = application.with(rotation.id());
+ application = application.with(List.of(new AssignedRotation(new ClusterSpec.Id(application.get().deploymentSpec().globalServiceId().get()), EndpointId.default_(), rotation.id())));
store(application); // store assigned rotation even if deployment fails
boolean redirectLegacyDns = redirectLegacyDnsFlag.with(FetchVector.Dimension.APPLICATION_ID, application.get().id().serializedForm())
@@ -460,6 +475,18 @@ public class ApplicationController {
return application;
}
+ private LockedApplication withApplicationCertificate(LockedApplication application) {
+ ApplicationId applicationId = application.get().id();
+
+ // TODO: Verify that the application is deploying to a zone where certificate provisioning is enabled
+ boolean provisionCertificate = provisionApplicationCertificate.with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value();
+ if (provisionCertificate) {
+ application = application.withApplicationCertificate(
+ Optional.of(applicationCertificateProvider.requestCaSignedCertificate(applicationId)));
+ }
+ return application;
+ }
+
private ActivateResult unexpectedDeployment(ApplicationId application, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 6f0ee75d098..ed81d08c533 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -9,18 +9,19 @@ import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneApi;
-import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
import com.yahoo.vespa.hosted.controller.api.integration.RunDataStore;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificateProvider;
import com.yahoo.vespa.hosted.controller.api.integration.chef.Chef;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationStore;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ArtifactRepository;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.github.GitHub;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.MavenRepository;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Mailer;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.user.Roles;
@@ -83,6 +84,8 @@ public class Controller extends AbstractComponent {
private final AuditLogger auditLogger;
private final FlagSource flagSource;
private final NameServiceForwarder nameServiceForwarder;
+ private final ApplicationCertificateProvider applicationCertificateProvider;
+ private final MavenRepository mavenRepository;
/**
* Creates a controller
@@ -95,11 +98,13 @@ public class Controller extends AbstractComponent {
RoutingGenerator routingGenerator, Chef chef,
AccessControl accessControl,
ArtifactRepository artifactRepository, ApplicationStore applicationStore, TesterCloud testerCloud,
- BuildService buildService, RunDataStore runDataStore, Mailer mailer, FlagSource flagSource) {
+ BuildService buildService, RunDataStore runDataStore, Mailer mailer, FlagSource flagSource,
+ MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider) {
this(curator, rotationsConfig, gitHub, zoneRegistry,
configServer, metricsService, routingGenerator, chef,
Clock.systemUTC(), accessControl, artifactRepository, applicationStore, testerCloud,
- buildService, runDataStore, com.yahoo.net.HostName::getLocalhost, mailer, flagSource);
+ buildService, runDataStore, com.yahoo.net.HostName::getLocalhost, mailer, flagSource,
+ mavenRepository, applicationCertificateProvider);
}
public Controller(CuratorDb curator, RotationsConfig rotationsConfig, GitHub gitHub,
@@ -109,7 +114,7 @@ public class Controller extends AbstractComponent {
AccessControl accessControl,
ArtifactRepository artifactRepository, ApplicationStore applicationStore, TesterCloud testerCloud,
BuildService buildService, RunDataStore runDataStore, Supplier<String> hostnameSupplier,
- Mailer mailer, FlagSource flagSource) {
+ Mailer mailer, FlagSource flagSource, MavenRepository mavenRepository, ApplicationCertificateProvider applicationCertificateProvider) {
this.hostnameSupplier = Objects.requireNonNull(hostnameSupplier, "HostnameSupplier cannot be null");
this.curator = Objects.requireNonNull(curator, "Curator cannot be null");
@@ -122,6 +127,8 @@ public class Controller extends AbstractComponent {
this.mailer = Objects.requireNonNull(mailer, "Mailer cannot be null");
this.flagSource = Objects.requireNonNull(flagSource, "FlagSource cannot be null");
this.nameServiceForwarder = new NameServiceForwarder(curator);
+ this.applicationCertificateProvider = Objects.requireNonNull(applicationCertificateProvider);
+ this.mavenRepository = Objects.requireNonNull(mavenRepository, "MavenRepository cannot be null");
jobController = new JobController(this, runDataStore, Objects.requireNonNull(testerCloud));
applicationController = new ApplicationController(this, curator, accessControl,
@@ -164,9 +171,9 @@ public class Controller extends AbstractComponent {
public ZoneRegistry zoneRegistry() { return zoneRegistry; }
- public NameServiceForwarder nameServiceForwarder() {
- return nameServiceForwarder;
- }
+ public NameServiceForwarder nameServiceForwarder() { return nameServiceForwarder; }
+
+ public MavenRepository mavenRepository() { return mavenRepository; }
public ApplicationView getApplicationView(String tenantName, String applicationName, String instanceName,
String environment, String region) {
@@ -299,6 +306,10 @@ public class Controller extends AbstractComponent {
return auditLogger;
}
+ public ApplicationCertificateProvider applicationCertificateProvider() {
+ return applicationCertificateProvider;
+ }
+
/** Returns all other roles the given tenant role implies. */
public Set<Role> impliedRoles(TenantRole role) {
return Stream.concat(Roles.tenantRoles(role.tenant()).stream(),
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
index 5f958b74c39..294dc10d0bd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
@@ -10,11 +10,13 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
@@ -56,9 +58,9 @@ public class LockedApplication {
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Optional<String> pemDeployKey;
- private final Optional<RotationId> legacyRotation;
- private final List<RotationId> rotations;
+ private final List<AssignedRotation> rotations;
private final Map<HostName, RotationStatus> rotationStatus;
+ private final Optional<ApplicationCertificate> applicationCertificate;
/**
* Used to create a locked application
@@ -72,7 +74,7 @@ public class LockedApplication {
application.deployments(),
application.deploymentJobs(), application.change(), application.outstandingChange(),
application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(),
- application.pemDeployKey(), application.legacyRotation(), application.rotations(), application.rotationStatus());
+ application.pemDeployKey(), application.assignedRotations(), application.rotationStatus(), application.applicationCertificate());
}
private LockedApplication(Lock lock, ApplicationId id, Instant createdAt,
@@ -80,7 +82,7 @@ public class LockedApplication {
Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner,
OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey,
- Optional<RotationId> legacyRotation, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) {
+ List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus, Optional<ApplicationCertificate> applicationCertificate) {
this.lock = lock;
this.id = id;
this.createdAt = createdAt;
@@ -95,44 +97,44 @@ public class LockedApplication {
this.majorVersion = majorVersion;
this.metrics = metrics;
this.pemDeployKey = pemDeployKey;
- this.legacyRotation = legacyRotation;
this.rotations = rotations;
this.rotationStatus = rotationStatus;
+ this.applicationCertificate = applicationCertificate;
}
/** Returns a read-only copy of this */
public Application get() {
return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change,
outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withBuiltInternally(boolean builtInternally) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withProjectId(OptionalLong projectId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withProjectId(projectId), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withDeploymentIssueId(IssueId issueId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.with(issueId), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion,
@@ -140,14 +142,14 @@ public class LockedApplication {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withCompletion(projectId, jobType, completion, jobError),
change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics,
- pemDeployKey, legacyRotation, rotations, rotationStatus);
+ pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withTriggering(jobType, job), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version,
@@ -198,45 +200,45 @@ public class LockedApplication {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.without(jobType), change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication with(DeploymentSpec deploymentSpec) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
ownershipIssueId, owner, majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
public LockedApplication with(ValidationOverrides validationOverrides) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withChange(Change change) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withOutstandingChange(Change outstandingChange) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withOwnershipIssueId(IssueId issueId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner,
- majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ majorVersion, metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withOwner(User owner) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId,
Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey,
- legacyRotation, rotations, rotationStatus);
+ rotations, rotationStatus, applicationCertificate);
}
/** Set a major version for this, or set to null to remove any major version override */
@@ -244,33 +246,40 @@ public class LockedApplication {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner,
majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion),
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication with(MetricsService.ApplicationMetrics metrics) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
public LockedApplication withPemDeployKey(String pemDeployKey) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, Optional.ofNullable(pemDeployKey), legacyRotation, rotations, rotationStatus);
+ metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus, applicationCertificate);
}
- public LockedApplication with(RotationId rotation) {
+ public LockedApplication with(List<AssignedRotation> assignedRotations) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, Optional.of(rotation), List.of(rotation), rotationStatus);
+ metrics, pemDeployKey, assignedRotations, rotationStatus, applicationCertificate);
}
public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
+ public LockedApplication withApplicationCertificate(Optional<ApplicationCertificate> applicationCertificate) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
+ deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
+ }
+
+
/** Don't expose non-leaf sub-objects. */
private LockedApplication with(Deployment deployment) {
Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments);
@@ -281,7 +290,7 @@ public class LockedApplication {
private LockedApplication with(Map<ZoneId, Deployment> deployments) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
- metrics, pemDeployKey, legacyRotation, rotations, rotationStatus);
+ metrics, pemDeployKey, rotations, rotationStatus, applicationCertificate);
}
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java
new file mode 100644
index 00000000000..e1ed278a79e
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/AssignedRotation.java
@@ -0,0 +1,61 @@
+package com.yahoo.vespa.hosted.controller.application;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.hosted.controller.rotation.RotationId;
+
+import java.util.Objects;
+
+/**
+ * Contains the tuple of [clusterId, endpointId, rotationId], to keep track
+ * of which services have assigned which rotations under which name.
+ *
+ * @author ogronnesby
+ */
+public class AssignedRotation {
+ private final ClusterSpec.Id clusterId;
+ private final EndpointId endpointId;
+ private final RotationId rotationId;
+
+ public AssignedRotation(ClusterSpec.Id clusterId, EndpointId endpointId, RotationId rotationId) {
+ this.clusterId = requireNonEmpty(clusterId, clusterId.value(), "clusterId");
+ this.endpointId = Objects.requireNonNull(endpointId);
+ this.rotationId = Objects.requireNonNull(rotationId);
+ }
+
+ public ClusterSpec.Id clusterId() { return clusterId; }
+ public EndpointId endpointId() { return endpointId; }
+ public RotationId rotationId() { return rotationId; }
+
+ @Override
+ public String toString() {
+ return "AssignedRotation{" +
+ "clusterId=" + clusterId +
+ ", endpointId='" + endpointId + '\'' +
+ ", rotationId=" + rotationId +
+ '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AssignedRotation that = (AssignedRotation) o;
+ return clusterId.equals(that.clusterId) &&
+ endpointId.equals(that.endpointId) &&
+ rotationId.equals(that.rotationId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(clusterId, endpointId, rotationId);
+ }
+
+ private static <T> T requireNonEmpty(T object, String value, String field) {
+ Objects.requireNonNull(object);
+ Objects.requireNonNull(value);
+ if (value.isEmpty()) {
+ throw new IllegalArgumentException("Field '" + field + "' was empty");
+ }
+ return object;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointId.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointId.java
new file mode 100644
index 00000000000..13c242c7b5f
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointId.java
@@ -0,0 +1,53 @@
+package com.yahoo.vespa.hosted.controller.application;
+
+import java.util.Objects;
+
+/**
+ * A type to represent the ID of an endpoint. This is typically the first part of
+ * an endpoint name.
+ *
+ * @author ogronnesby
+ */
+public class EndpointId {
+ private static final EndpointId DEFAULT = new EndpointId("default");
+
+ private final String id;
+
+ public EndpointId(String id) {
+ this.id = requireNotEmpty(id);
+ }
+
+ public String id() { return id; }
+
+ @Override
+ public String toString() {
+ return "EndpointId{" +
+ "id='" + id + '\'' +
+ '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ EndpointId that = (EndpointId) o;
+ return Objects.equals(id, that.id);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(id);
+ }
+
+ private static String requireNotEmpty(String input) {
+ Objects.requireNonNull(input);
+ if (input.isEmpty()) {
+ throw new IllegalArgumentException("The value EndpointId was empty");
+ }
+ return input;
+ }
+
+ public static EndpointId default_() { return DEFAULT; }
+
+ public static EndpointId of(String id) { return new EndpointId(id); }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index bf2460284ab..ce0e7c0dbab 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -61,6 +61,7 @@ import static com.yahoo.log.LogLevel.DEBUG;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.ACTIVATION_CONFLICT;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.APPLICATION_LOCK_FAILURE;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.BAD_REQUEST;
+import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.CERTIFICATE_NOT_READY;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.OUT_OF_CAPACITY;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.PARENT_HOST_NOT_READY;
@@ -231,7 +232,8 @@ public class InternalStepRunner implements StepRunner {
if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest()
|| e.getErrorCode() == ACTIVATION_CONFLICT
|| e.getErrorCode() == APPLICATION_LOCK_FAILURE
- || e.getErrorCode() == PARENT_HOST_NOT_READY) {
+ || e.getErrorCode() == PARENT_HOST_NOT_READY
+ || e.getErrorCode() == CERTIFICATE_NOT_READY) {
logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage());
return Optional.empty();
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
index 2b26e93aeb8..30bca180c0f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
@@ -12,7 +12,8 @@ import com.yahoo.vespa.hosted.controller.restapi.cost.config.SelfHostedCostConfi
import java.time.Clock;
import java.time.Duration;
-import java.util.*;
+import java.util.EnumSet;
+import java.util.Objects;
import java.util.logging.Logger;
/**
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index 9302ecbe738..c4f0597572b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.config.provision.ApplicationId;
@@ -81,8 +82,8 @@ public class ResourceMeterMaintainer extends Maintainer {
private List<NodeRepositoryNode> getNodes() {
return controller().zoneRegistry().zones()
.ofCloud(CloudName.from("aws"))
- .reachable().ids().stream()
- .flatMap(zoneId -> uncheck(() -> nodeRepository.listNodes(zoneId, true).nodes().stream()))
+ .reachable().zones().stream()
+ .flatMap(zone -> uncheck(() -> nodeRepository.listNodes(zone.getId(), true).nodes().stream()))
.filter(node -> node.getOwner() != null && !node.getOwner().getTenant().equals("hosted-vespa"))
.filter(node -> node.getState() == NodeState.active)
.collect(Collectors.toList());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 1f20bdf5533..6ecf60e7404 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -15,12 +15,14 @@ import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService.ApplicationMetrics;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
@@ -29,6 +31,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentActivity;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
@@ -38,6 +41,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -77,9 +81,14 @@ public class ApplicationSerializer {
private final String writeQualityField = "writeQuality";
private final String queryQualityField = "queryQuality";
private final String pemDeployKeyField = "pemDeployKey";
+ private final String assignedRotationsField = "assignedRotations";
+ private final String assignedRotationEndpointField = "endpointId";
+ private final String assignedRotationClusterField = "clusterId";
+ private final String assignedRotationRotationField = "rotationId";
private final String rotationsField = "endpoints";
private final String deprecatedRotationField = "rotation";
private final String rotationStatusField = "rotationStatus";
+ private final String applicationCertificateField = "applicationCertificate";
// Deployment fields
private final String zoneField = "zone";
@@ -171,9 +180,10 @@ public class ApplicationSerializer {
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey));
application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString()));
- Cursor rotations = root.setArray(rotationsField);
- application.rotations().forEach(rotation -> rotations.addString(rotation.asString()));
+ rotationsToSlime(application.assignedRotations(), root, rotationsField);
+ assignedRotationsToSlime(application.assignedRotations(), root, assignedRotationsField);
toSlime(application.rotationStatus(), root.setArray(rotationStatusField));
+ application.applicationCertificate().ifPresent(cert -> root.setString(applicationCertificateField, cert.secretsKeyNamePrefix()));
return slime;
}
@@ -320,6 +330,21 @@ public class ApplicationSerializer {
});
}
+ private void rotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
+ final var rotationsArray = parent.setArray(fieldName);
+ rotations.forEach(rot -> rotationsArray.addString(rot.rotationId().asString()));
+ }
+
+ private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) {
+ final var rotationsArray = parent.setArray(fieldName);
+ for (var rotation : rotations) {
+ final var object = rotationsArray.addObject();
+ object.setString(assignedRotationEndpointField, rotation.endpointId().id());
+ object.setString(assignedRotationRotationField, rotation.rotationId().asString());
+ object.setString(assignedRotationClusterField, rotation.clusterId().value());
+ }
+ }
+
// ------------------ Deserialization
public Application fromSlime(Slime slime) {
@@ -339,13 +364,13 @@ public class ApplicationSerializer {
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField));
- Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new);
- List<RotationId> rotations = rotationsFromSlime(root);
+ List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, root);
Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField));
+ Optional<ApplicationCertificate> applicationCertificate = optionalString(root.field(applicationCertificateField)).map(ApplicationCertificate::new);
return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs,
deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics,
- pemDeployKey, legacyRotation, rotations, rotationStatus);
+ pemDeployKey, assignedRotations, rotationStatus, applicationCertificate);
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
@@ -525,15 +550,36 @@ public class ApplicationSerializer {
Instant.ofEpochMilli(object.field(atField).asLong())));
}
- private List<RotationId> rotationsFromSlime(Inspector root) {
- final var rotations = rotationListFromSlime(root.field(rotationsField));
+ private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) {
+ final var assignedRotations = new LinkedHashSet<AssignedRotation>();
+
+ // Add the legacy rotation field to the set - this needs to be first
+ // TODO: Remove when we retire the rotations field
final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField));
+ if (legacyRotation.isPresent() && deploymentSpec.globalServiceId().isPresent()) {
+ final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get());
+ assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), legacyRotation.get()));
+ }
- if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) {
- rotations.add(legacyRotation.get());
+ // Now add the same entries from "stupid" list of rotations
+ // TODO: Remove when we retire the rotations field
+ final var rotations = rotationListFromSlime(root.field(rotationsField));
+ for (var rotation : rotations) {
+ if (deploymentSpec.globalServiceId().isPresent()) {
+ final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get());
+ assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), rotation));
+ }
}
- return rotations;
+ // Last - add the actual entries we want. Do _not_ remove this during clean-up
+ root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> {
+ final var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString());
+ final var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString());
+ final var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString());
+ assignedRotations.add(new AssignedRotation(clusterId, endpointId, rotationId));
+ });
+
+ return List.copyOf(assignedRotations);
}
private List<RotationId> rotationListFromSlime(Inspector field) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
index 405a2e452d0..207a5f8dcf9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
@@ -42,6 +42,7 @@ public class VersionStatusSerializer {
private static final String committedAtField = "releasedAt";
private static final String isControllerVersionField = "isCurrentControllerVersion";
private static final String isSystemVersionField = "isCurrentSystemVersion";
+ private static final String isReleasedField = "isReleased";
private static final String deploymentStatisticsField = "deploymentStatistics";
private static final String confidenceField = "confidence";
private static final String configServersField = "configServerHostnames";
@@ -73,6 +74,7 @@ public class VersionStatusSerializer {
object.setLong(committedAtField, version.committedAt().toEpochMilli());
object.setBool(isControllerVersionField, version.isControllerVersion());
object.setBool(isSystemVersionField, version.isSystemVersion());
+ object.setBool(isReleasedField, version.isReleased());
deploymentStatisticsToSlime(version.statistics(), object.setObject(deploymentStatisticsField));
object.setString(confidenceField, version.confidence().name());
configServersToSlime(version.systemApplicationHostnames(), object.setArray(configServersField));
@@ -105,6 +107,7 @@ public class VersionStatusSerializer {
Instant.ofEpochMilli(object.field(committedAtField).asLong()),
object.field(isControllerVersionField).asBool(),
object.field(isSystemVersionField).asBool(),
+ object.field(isReleasedField).valid() ? object.field(isReleasedField).asBool() : true,
configServersFromSlime(object.field(configServersField)),
VespaVersion.Confidence.valueOf(object.field(confidenceField).asString())
);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/proxy/ConfigServerRestExecutorImpl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/proxy/ConfigServerRestExecutorImpl.java
index a208249b410..73a029ad3b3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/proxy/ConfigServerRestExecutorImpl.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/proxy/ConfigServerRestExecutorImpl.java
@@ -5,6 +5,7 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.inject.Inject;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.config.provision.zone.ZoneList;
import com.yahoo.jdisc.http.HttpRequest.Method;
@@ -114,9 +115,9 @@ public class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor {
if ( ! environmentName.isEmpty())
zones = zones.in(Environment.from(environmentName));
- for (ZoneId zoneId : zones.ids()) {
+ for (ZoneApi zone : zones.zones()) {
responseStructure.uris.add(proxyRequest.getScheme() + "://" + proxyRequest.getControllerPrefix() +
- zoneId.environment().value() + "/" + zoneId.region().value());
+ zone.getEnvironment().value() + "/" + zone.getRegionName().value());
}
JsonNode node = mapper.valueToTree(responseStructure);
return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 3db8c447572..9f091061596 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -97,6 +97,7 @@ import java.time.Instant;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
+import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -104,6 +105,7 @@ import java.util.Scanner;
import java.util.Set;
import java.util.StringJoiner;
import java.util.logging.Level;
+import java.util.stream.Collectors;
import static java.util.stream.Collectors.joining;
@@ -515,7 +517,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
});
// Compile version. The version that should be used when building an application
- object.setString("compileVersion", controller.applications().oldestInstalledPlatform(application.id()).toFullString());
+ object.setString("compileVersion", compileVersion(application.id()).toFullString());
application.majorVersion().ifPresent(majorVersion -> object.setLong("majorVersion", majorVersion));
@@ -693,6 +695,30 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
+ /**
+ * Returns a non-broken, released version at least as old as the oldest platform the given application is on.
+ *
+ * If no known version is applicable, the newest version at least as old as the oldest platform is selected,
+ * among all versions released for this system. If no such versions exists, throws an IllegalStateException.
+ */
+ private Version compileVersion(ApplicationId id) {
+ Version oldestPlatform = controller.applications().oldestInstalledPlatform(id);
+ return controller.versionStatus().versions().stream()
+ .filter(version -> version.confidence().equalOrHigherThan(VespaVersion.Confidence.low))
+ .filter(VespaVersion::isReleased)
+ .map(VespaVersion::versionNumber)
+ .filter(version -> ! version.isAfter(oldestPlatform))
+ .max(Comparator.naturalOrder())
+ .orElseGet(() -> controller.mavenRepository().metadata().versions().stream()
+ .filter(version -> ! version.isAfter(oldestPlatform))
+ .filter(version -> ! controller.versionStatus().versions().stream()
+ .map(VespaVersion::versionNumber)
+ .collect(Collectors.toSet()).contains(version))
+ .max(Comparator.naturalOrder())
+ .orElseThrow(() -> new IllegalStateException("No available releases of " +
+ controller.mavenRepository().artifactId())));
+ }
+
private HttpResponse setGlobalRotationOverride(String tenantName, String applicationName, String instanceName, String environment, String region, boolean inService, HttpRequest request) {
Application application = controller.applications().require(ApplicationId.from(tenantName, applicationName, instanceName));
ZoneId zone = ZoneId.from(environment, region);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/cost/CostCalculator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/cost/CostCalculator.java
index 18c00d69b62..c44a80f7a20 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/cost/CostCalculator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/cost/CostCalculator.java
@@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.controller.restapi.cost;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.Property;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeOwner;
@@ -34,8 +35,8 @@ public class CostCalculator {
String date = LocalDate.now(clock).toString();
List<NodeRepositoryNode> nodes = controller.zoneRegistry().zones()
- .reachable().in(Environment.prod).ofCloud(cloudName).ids().stream()
- .flatMap(zoneId -> uncheck(() -> nodeRepository.listNodes(zoneId, true).nodes().stream()))
+ .reachable().in(Environment.prod).ofCloud(cloudName).zones().stream()
+ .flatMap(zone -> uncheck(() -> nodeRepository.listNodes(zone.getId(), true).nodes().stream()))
.filter(node -> node.getOwner() != null && !node.getOwner().getTenant().equals("hosted-vespa"))
.collect(Collectors.toList());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java
index 5454d71185a..bc360fe3c6f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiHandler.java
@@ -5,6 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.io.IOUtils;
@@ -30,6 +31,7 @@ import java.util.List;
import java.util.Set;
import java.util.StringJoiner;
import java.util.logging.Level;
+import java.util.stream.Collectors;
/**
* This implements the /os/v1 API which provides operators with information about, and scheduling of OS upgrades for
@@ -123,7 +125,7 @@ public class OsApiHandler extends AuditLoggingRequestHandler {
ZoneList zones = controller.zoneRegistry().zones().controllerUpgraded();
if (path.get("region") != null) zones = zones.in(RegionName.from(path.get("region")));
if (path.get("environment") != null) zones = zones.in(Environment.from(path.get("environment")));
- return zones.ids();
+ return zones.zones().stream().map(ZoneApi::getId).collect(Collectors.toList());
}
private Slime setOsVersion(HttpRequest request) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v1/ZoneApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v1/ZoneApiHandler.java
index b115e659c28..6cfaed93fa9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v1/ZoneApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v1/ZoneApiHandler.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.hosted.controller.restapi.zone.v1;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
@@ -70,8 +72,8 @@ public class ZoneApiHandler extends LoggingRequestHandler {
}
private HttpResponse root(HttpRequest request) {
- List<Environment> environments = zoneRegistry.zones().all().ids().stream()
- .map(ZoneId::environment)
+ List<Environment> environments = zoneRegistry.zones().all().zones().stream()
+ .map(ZoneApi::getEnvironment)
.distinct()
.sorted(Comparator.comparing(Environment::value))
.collect(Collectors.toList());
@@ -90,17 +92,16 @@ public class ZoneApiHandler extends LoggingRequestHandler {
}
private HttpResponse environment(HttpRequest request, Environment environment) {
- List<ZoneId> zones = zoneRegistry.zones().all().in(environment).ids();
Slime slime = new Slime();
Cursor root = slime.setArray();
- zones.forEach(zone -> {
+ zoneRegistry.zones().all().in(environment).zones().forEach(zone -> {
Cursor object = root.addObject();
- object.setString("name", zone.region().value());
+ object.setString("name", zone.getRegionName().value());
object.setString("url", request.getUri()
.resolve("/zone/v2/environment/")
.resolve(environment.value() + "/")
.resolve("region/")
- .resolve(zone.region().value())
+ .resolve(zone.getRegionName().value())
.toString());
});
return new SlimeJsonResponse(slime);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v2/ZoneApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v2/ZoneApiHandler.java
index 9d95383fbfb..f0259fc4d51 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v2/ZoneApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/zone/v2/ZoneApiHandler.java
@@ -94,16 +94,16 @@ public class ZoneApiHandler extends AuditLoggingRequestHandler {
Cursor root = slime.setObject();
Cursor uris = root.setArray("uris");
ZoneList zoneList = zoneRegistry.zones().reachable();
- zoneList.ids().forEach(zoneId -> uris.addString(request.getUri()
+ zoneList.zones().forEach(zone -> uris.addString(request.getUri()
.resolve("/zone/v2/")
- .resolve(zoneId.environment().value() + "/")
- .resolve(zoneId.region().value())
+ .resolve(zone.getEnvironment().value() + "/")
+ .resolve(zone.getRegionName().value())
.toString()));
Cursor zones = root.setArray("zones");
- zoneList.ids().forEach(zoneId -> {
+ zoneList.zones().forEach(zone -> {
Cursor object = zones.addObject();
- object.setString("environment", zoneId.environment().value());
- object.setString("region", zoneId.region().value());
+ object.setString("environment", zone.getEnvironment().value());
+ object.setString("region", zone.getRegionName().value());
});
return new SlimeJsonResponse(slime);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClient.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClient.java
new file mode 100644
index 00000000000..9f3addd4992
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClient.java
@@ -0,0 +1,64 @@
+package com.yahoo.vespa.hosted.controller.versions;
+
+import com.yahoo.vespa.hosted.controller.api.integration.maven.ArtifactId;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.MavenRepository;
+import com.yahoo.vespa.hosted.controller.api.integration.maven.Metadata;
+import com.yahoo.vespa.hosted.controller.maven.repository.config.MavenRepositoryConfig;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Http client implementation of a {@link MavenRepository}, which uses a configured repository and artifact ID.
+ *
+ * @author jonmv
+ */
+public class MavenRepositoryClient implements MavenRepository {
+
+ private final HttpClient client;
+ private final URI apiUrl;
+ private final ArtifactId id;
+
+ public MavenRepositoryClient(MavenRepositoryConfig config) {
+ this.client = HttpClient.newBuilder().connectTimeout(Duration.ofSeconds(5)).build();
+ this.apiUrl = URI.create(config.apiUrl() + "/").normalize();
+ this.id = new ArtifactId(config.groupId(), config.artifactId());
+ }
+
+ @Override
+ public Metadata metadata() {
+ try {
+ HttpRequest request = HttpRequest.newBuilder(withArtifactPath(apiUrl, id)).build();
+ HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString(UTF_8));
+ if (response.statusCode() != 200)
+ throw new RuntimeException("Status code '" + response.statusCode() + "' and body\n'''\n" +
+ response.body() + "\n'''\nfor request " + request);
+
+ return Metadata.fromXml(response.body());
+ }
+ catch (IOException | InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public ArtifactId artifactId() {
+ return id;
+ }
+
+ static URI withArtifactPath(URI baseUrl, ArtifactId id) {
+ List<String> parts = new ArrayList<>(List.of(id.groupId().split("\\.")));
+ parts.add(id.artifactId());
+ parts.add("maven-metadata.xml");
+ return baseUrl.resolve(String.join("/", parts));
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
index 5e57e9ebe8e..ab5fd2714e5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
@@ -6,6 +6,7 @@ import com.yahoo.collections.ListMap;
import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.hosted.controller.Application;
@@ -129,14 +130,17 @@ public class VersionStatus {
Collection<DeploymentStatistics> deploymentStatistics = computeDeploymentStatistics(infrastructureVersions,
controller.applications().asList());
List<VespaVersion> versions = new ArrayList<>();
+ List<Version> releasedVersions = controller.mavenRepository().metadata().versions();
for (DeploymentStatistics statistics : deploymentStatistics) {
if (statistics.version().isEmpty()) continue;
try {
+ boolean isReleased = Collections.binarySearch(releasedVersions, statistics.version()) >= 0;
VespaVersion vespaVersion = createVersion(statistics,
statistics.version().equals(controllerVersion),
statistics.version().equals(systemVersion),
+ isReleased,
systemApplicationVersions.getList(statistics.version()),
controller);
versions.add(vespaVersion);
@@ -145,26 +149,24 @@ public class VersionStatus {
statistics.version().toFullString(), e);
}
}
+
Collections.sort(versions);
return new VersionStatus(versions);
}
private static ListMap<Version, HostName> findSystemApplicationVersions(Controller controller) {
- List<ZoneId> zones = controller.zoneRegistry().zones()
- .controllerUpgraded()
- .ids();
ListMap<Version, HostName> versions = new ListMap<>();
- for (ZoneId zone : zones) {
+ for (ZoneApi zone : controller.zoneRegistry().zones().controllerUpgraded().zones()) {
for (SystemApplication application : SystemApplication.all()) {
List<Node> eligibleForUpgradeApplicationNodes = controller.configServer().nodeRepository()
- .list(zone, application.id()).stream()
+ .list(zone.getId(), application.id()).stream()
.filter(SystemUpgrader::eligibleForUpgrade)
.collect(Collectors.toList());
if (eligibleForUpgradeApplicationNodes.isEmpty())
continue;
- boolean configConverged = application.configConvergedIn(zone, controller, Optional.empty());
+ boolean configConverged = application.configConvergedIn(zone.getId(), controller, Optional.empty());
if (!configConverged) {
log.log(LogLevel.WARNING, "Config for " + application.id() + " in " + zone + " has not converged");
}
@@ -238,10 +240,11 @@ public class VersionStatus {
}
return versionMap.values();
}
-
+
private static VespaVersion createVersion(DeploymentStatistics statistics,
boolean isControllerVersion,
- boolean isSystemVersion,
+ boolean isSystemVersion,
+ boolean isReleased,
Collection<HostName> configServerHostnames,
Controller controller) {
GitSha gitSha = controller.gitHub().getCommit(VESPA_REPO_OWNER, VESPA_REPO, statistics.version().toFullString());
@@ -260,6 +263,7 @@ public class VersionStatus {
gitSha.sha, committedAt,
isControllerVersion,
isSystemVersion,
+ isReleased,
configServerHostnames,
confidence
);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
index ffbf24be12a..117ce80adaa 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
@@ -27,12 +27,13 @@ public class VespaVersion implements Comparable<VespaVersion> {
private final Instant committedAt;
private final boolean isControllerVersion;
private final boolean isSystemVersion;
+ private final boolean isReleased;
private final DeploymentStatistics statistics;
private final ImmutableSet<HostName> systemApplicationHostnames;
private final Confidence confidence;
public VespaVersion(DeploymentStatistics statistics, String releaseCommit, Instant committedAt,
- boolean isControllerVersion, boolean isSystemVersion,
+ boolean isControllerVersion, boolean isSystemVersion, boolean isReleased,
Collection<HostName> systemApplicationHostnames,
Confidence confidence) {
this.statistics = statistics;
@@ -40,6 +41,7 @@ public class VespaVersion implements Comparable<VespaVersion> {
this.committedAt = committedAt;
this.isControllerVersion = isControllerVersion;
this.isSystemVersion = isSystemVersion;
+ this.isReleased = isReleased;
this.systemApplicationHostnames = ImmutableSet.copyOf(systemApplicationHostnames);
this.confidence = confidence;
}
@@ -102,6 +104,9 @@ public class VespaVersion implements Comparable<VespaVersion> {
*/
public boolean isSystemVersion() { return isSystemVersion; }
+ /** Returns whether the artifacts of this release are available in the configured maven repository. */
+ public boolean isReleased() { return isReleased; }
+
/** Returns the hosts allocated to system applications (across all zones) which are currently of this version */
public Set<HostName> systemApplicationHostnames() { return systemApplicationHostnames; }
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/package-info.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/package-info.java
new file mode 100644
index 00000000000..c6f2c1e427d
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/package-info.java
@@ -0,0 +1,5 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.controller.versions;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-server/src/main/resources/configdefinitions/maven-repository.def b/controller-server/src/main/resources/configdefinitions/maven-repository.def
new file mode 100644
index 00000000000..0fd2d410e9b
--- /dev/null
+++ b/controller-server/src/main/resources/configdefinitions/maven-repository.def
@@ -0,0 +1,15 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+namespace=vespa.hosted.controller.maven.repository.config
+
+
+# URL to the Maven repository API that holds artifacts for tenants in the controller's system
+#
+apiUrl string default=https://repo.maven.apache.org/maven2/
+
+# Group ID of the artifact to list versions for
+#
+groupId string default=com.yahoo.vespa
+
+# Artifact ID of the artifact to list versions for
+#
+artifactId string default=tenant-base
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index 2b0ee741e7e..dbf983a5bab 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.slime.Slime;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.athenz.api.AthenzDomain;
@@ -32,13 +33,14 @@ import com.yahoo.vespa.hosted.controller.api.integration.organization.MockIssueH
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer;
+import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMavenRepository;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockTesterCloud;
-import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.athenz.impl.AthenzFacade;
import com.yahoo.vespa.hosted.controller.athenz.mock.AthenzClientFactoryMock;
import com.yahoo.vespa.hosted.controller.athenz.mock.AthenzDbMock;
+import com.yahoo.vespa.hosted.controller.integration.ApplicationCertificateMock;
import com.yahoo.vespa.hosted.controller.integration.ApplicationStoreMock;
import com.yahoo.vespa.hosted.controller.integration.ArtifactRepositoryMock;
import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
@@ -351,7 +353,9 @@ public final class ControllerTester {
new MockRunDataStore(),
() -> "test-controller",
new MockMailer(),
- new InMemoryFlagSource());
+ new InMemoryFlagSource(),
+ new MockMavenRepository(),
+ new ApplicationCertificateMock());
// Calculate initial versions
controller.updateVersionStatus(VersionStatus.compute(controller));
return controller;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
index 3ce32347e35..887406ecba8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
@@ -5,6 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.Application;
@@ -123,10 +124,10 @@ public class DeploymentTester {
/** Upgrade system applications in all zones to given version */
public void upgradeSystemApplications(Version version) {
- for (ZoneId zone : tester.zoneRegistry().zones().all().ids()) {
+ for (ZoneApi zone : tester.zoneRegistry().zones().all().zones()) {
for (SystemApplication application : SystemApplication.all()) {
- tester.configServer().setVersion(application.id(), zone, version);
- tester.configServer().convergeServices(application.id(), zone);
+ tester.configServer().setVersion(application.id(), zone.getId(), version);
+ tester.configServer().convergeServices(application.id(), zone.getId());
}
}
computeVersionStatus();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationCertificateMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationCertificateMock.java
new file mode 100644
index 00000000000..3246a260217
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationCertificateMock.java
@@ -0,0 +1,14 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.integration;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificateProvider;
+
+public class ApplicationCertificateMock implements ApplicationCertificateProvider {
+
+ @Override
+ public ApplicationCertificate requestCaSignedCertificate(ApplicationId applicationId) {
+ return new ApplicationCertificate(String.format("vespa.tls.%s.%s", applicationId.tenant(),applicationId.application()));
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index d4df9c20ead..cdbc45c4d8f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -15,6 +15,7 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.Hostname;
import com.yahoo.vespa.hosted.controller.api.identifiers.Identifier;
import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
@@ -225,7 +226,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
@Override
public PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationNames,
- List<ContainerEndpoint> containerEndpoints, byte[] content) {
+ List<ContainerEndpoint> containerEndpoints, ApplicationCertificate applicationCertificate, byte[] content) {
lastPrepareVersion = deployOptions.vespaVersion.map(Version::fromString).orElse(null);
if (prepareException != null) {
RuntimeException prepareException = this.prepareException;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
index 57f29fb72af..00e6162d5e5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
@@ -82,11 +82,6 @@ public class ZoneFilterMock implements ZoneList {
}
@Override
- public List<ZoneId> ids() {
- return List.copyOf(zones.stream().map(ZoneApi::getId).collect(Collectors.toList()));
- }
-
- @Override
public ZoneList ofCloud(CloudName cloud) {
return filter(zone -> zone.getCloudName().equals(cloud));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
index 449ca509ee4..f0344cb8d12 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
@@ -11,7 +11,6 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index be9624fc693..347fe6064df 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -10,12 +10,14 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.ApplicationCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
@@ -24,6 +26,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentActivity;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
@@ -116,9 +119,9 @@ public class ApplicationSerializerTest {
OptionalInt.of(7),
new MetricsService.ApplicationMetrics(0.5, 0.9),
Optional.of("-----BEGIN PUBLIC KEY-----\n∠( ᐛ 」∠)_\n-----END PUBLIC KEY-----"),
- Optional.of(new RotationId("my-rotation")),
- List.of(new RotationId("my-rotation")),
- rotationStatus);
+ List.of(new AssignedRotation(new ClusterSpec.Id("foo"), EndpointId.default_(), new RotationId("my-rotation"))),
+ rotationStatus,
+ Optional.of(new ApplicationCertificate("vespa.certificate")));
Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original));
@@ -156,6 +159,8 @@ public class ApplicationSerializerTest {
assertEquals(original.rotations(), serialized.rotations());
assertEquals(original.rotationStatus(), serialized.rotationStatus());
+ assertEquals(original.applicationCertificate(), serialized.applicationCertificate());
+
// Test cluster utilization
assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size());
assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size());
@@ -261,14 +266,21 @@ public class ApplicationSerializerTest {
rotations.addString("multiple-rotation-1");
rotations.addString("multiple-rotation-2");
+ final var assignedRotations = cursor.setArray("assignedRotations");
+ final var assignedRotation = assignedRotations.addObject();
+ assignedRotation.setString("clusterId", "foobar");
+ assignedRotation.setString("endpointId", "nice-endpoint");
+ assignedRotation.setString("rotationId", "assigned-rotation");
+
// Parse and test the output from parsing contains both legacy rotation and multiple rotations
final var application = applicationSerializer.fromSlime(slime);
assertEquals(
List.of(
- new RotationId("multiple-rotation-1"),
- new RotationId("multiple-rotation-2"),
- new RotationId("single-rotation")
+ new RotationId("single-rotation"),
+ new RotationId("multiple-rotation-1"),
+ new RotationId("multiple-rotation-2"),
+ new RotationId("assigned-rotation")
),
application.rotations()
);
@@ -276,6 +288,16 @@ public class ApplicationSerializerTest {
assertEquals(
Optional.of(new RotationId("single-rotation")), application.legacyRotation()
);
+
+ assertEquals(
+ List.of(
+ new AssignedRotation(new ClusterSpec.Id("foo"), EndpointId.of("default"), new RotationId("single-rotation")),
+ new AssignedRotation(new ClusterSpec.Id("foo"), EndpointId.of("default"), new RotationId("multiple-rotation-1")),
+ new AssignedRotation(new ClusterSpec.Id("foo"), EndpointId.of("default"), new RotationId("multiple-rotation-2")),
+ new AssignedRotation(new ClusterSpec.Id("foobar"), EndpointId.of("nice-endpoint"), new RotationId("assigned-rotation"))
+ ),
+ application.assignedRotations()
+ );
}
@Test
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializerTest.java
index 5e6f9811376..a1e22b4fc64 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializerTest.java
@@ -36,9 +36,9 @@ public class VersionStatusSerializerTest {
ApplicationId.from("tenant2", "success2", "default"))
);
vespaVersions.add(new VespaVersion(statistics, "dead", Instant.now(), false, false,
- asHostnames("cfg1", "cfg2", "cfg3"), VespaVersion.Confidence.normal));
+ true, asHostnames("cfg1", "cfg2", "cfg3"), VespaVersion.Confidence.normal));
vespaVersions.add(new VespaVersion(statistics, "cafe", Instant.now(), true, true,
- asHostnames("cfg1", "cfg2", "cfg3"), VespaVersion.Confidence.normal));
+ false, asHostnames("cfg1", "cfg2", "cfg3"), VespaVersion.Confidence.normal));
VersionStatus status = new VersionStatus(vespaVersions);
VersionStatusSerializer serializer = new VersionStatusSerializer();
VersionStatus deserialized = serializer.fromSlime(serializer.toSlime(status));
@@ -51,6 +51,7 @@ public class VersionStatusSerializerTest {
assertEquals(a.committedAt().truncatedTo(MILLIS), b.committedAt());
assertEquals(a.isControllerVersion(), b.isControllerVersion());
assertEquals(a.isSystemVersion(), b.isSystemVersion());
+ assertEquals(a.isReleased(), b.isReleased());
assertEquals(a.statistics(), b.statistics());
assertEquals(a.systemApplicationHostnames(), b.systemApplicationHostnames());
assertEquals(a.confidence(), b.confidence());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
index 76c505ff8f8..427428a3a94 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
@@ -54,7 +54,7 @@ public class ContainerControllerTester {
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
- CuratorDb curatorDb = new MockCuratorDb();
+ CuratorDb curatorDb = controller().curator();
curatorDb.writeUpgradesPerMinute(100);
upgrader = new Upgrader(controller(), Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
index ef86ffa125f..c7be543dd00 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
@@ -6,6 +6,7 @@ import com.yahoo.application.container.handler.Request;
import com.yahoo.application.container.handler.Response;
import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.http.filter.FilterChainRepository;
import com.yahoo.jdisc.http.filter.SecurityRequestFilter;
@@ -59,10 +60,10 @@ public class ContainerTester {
public void upgradeSystem(Version version) {
controller().curator().writeControllerVersion(controller().hostname(), version);
- for (ZoneId zone : controller().zoneRegistry().zones().all().ids()) {
+ for (ZoneApi zone : controller().zoneRegistry().zones().all().zones()) {
for (SystemApplication application : SystemApplication.all()) {
- configServer().setVersion(application.id(), zone, version);
- configServer().convergeServices(application.id(), zone);
+ configServer().setVersion(application.id(), zone.getId(), version);
+ configServer().convergeServices(application.id(), zone.getId());
}
}
computeVersionStatus();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
index 68f0738f7a5..53476a2e42f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
@@ -92,6 +92,8 @@ public class ControllerContainerTest {
" <component id='com.yahoo.vespa.hosted.controller.integration.ApplicationStoreMock'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockTesterCloud'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer'/>\n" +
+ " <component id='com.yahoo.vespa.hosted.controller.integration.ApplicationCertificateMock'/>\n" +
+ " <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMavenRepository'/>\n" +
" <handler id='com.yahoo.vespa.hosted.controller.restapi.deployment.DeploymentApiHandler'>\n" +
" <binding>http://*/deployment/v1/*</binding>\n" +
" </handler>\n" +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 71c4b41a276..16fd10277d2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -61,6 +61,8 @@ import com.yahoo.vespa.hosted.controller.restapi.ContainerControllerTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
+import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
+import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
import org.junit.Before;
import org.junit.Test;
@@ -369,6 +371,9 @@ public class ApplicationApiTest extends ControllerContainerTest {
.oktaAccessToken(OKTA_AT),
"");
+ // Set version 6.1 to broken to change compile version for.
+ controllerTester.upgrader().overrideConfidence(Version.fromString("6.1"), VespaVersion.Confidence.broken);
+ tester.computeVersionStatus();
setDeploymentMaintainedInfo(controllerTester);
// GET tenant application deployments
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1", GET)
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
index 5d1819bf0f2..1d719133ac3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
@@ -217,7 +217,7 @@
]
}
],
- "compileVersion": "(ignore)",
+ "compileVersion": "6.0.0",
"globalRotations": [
"https://application1--tenant1.global.vespa.oath.cloud:4443/"
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
index ba81c5cf4e4..74d637499bd 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
@@ -80,7 +80,7 @@ public class ControllerApiTest extends ControllerContainerTest {
public void testUpgraderApi() {
// Get current configuration
tester.assertResponse(authenticatedRequest("http://localhost:8080/controller/v1/jobs/upgrader", new byte[0], Request.Method.GET),
- "{\"upgradesPerMinute\":0.125,\"confidenceOverrides\":[]}",
+ "{\"upgradesPerMinute\":100.0,\"confidenceOverrides\":[]}",
200);
// Set invalid configuration
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
index fa3848a6ba5..d6620733efe 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
@@ -74,6 +74,7 @@ public class DeploymentApiTest extends ControllerContainerTest {
version.committedAt(),
version.isControllerVersion(),
version.isSystemVersion(),
+ version.isReleased(),
ImmutableSet.of("config1.test", "config2.test").stream()
.map(HostName::from)
.collect(Collectors.toSet()),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
index 02a82e35f10..8f02fa74c6e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
@@ -15,7 +15,6 @@ import org.junit.rules.ExpectedException;
import java.net.URI;
import java.util.List;
-import java.util.Optional;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClientTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClientTest.java
new file mode 100644
index 00000000000..026d174cb73
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/MavenRepositoryClientTest.java
@@ -0,0 +1,22 @@
+package com.yahoo.vespa.hosted.controller.versions;
+
+import com.yahoo.vespa.hosted.controller.api.integration.maven.ArtifactId;
+import org.junit.Test;
+
+import java.net.URI;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author jonmv
+ */
+public class MavenRepositoryClientTest {
+
+ @Test
+ public void testUri() {
+ assertEquals(URI.create("https://domain:123/base/group/id/artifact-id/maven-metadata.xml"),
+ MavenRepositoryClient.withArtifactPath(URI.create("https://domain:123/base/"),
+ new ArtifactId("group.id", "artifact-id")));
+ }
+
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index a365285b752..655c16ccceb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -6,6 +6,7 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
@@ -60,10 +61,10 @@ public class VersionStatusTest {
Version version0 = Version.fromString("6.1");
Version version1 = Version.fromString("6.5");
// Upgrade some config servers
- for (ZoneId zone : tester.zoneRegistry().zones().all().ids()) {
- for (Node node : tester.configServer().nodeRepository().list(zone, SystemApplication.configServer.id())) {
- tester.configServer().nodeRepository().putByHostname(zone, new Node(node.hostname(), node.state(), node.type(),
- node.owner(), version1, node.wantedVersion()));
+ for (ZoneApi zone : tester.zoneRegistry().zones().all().zones()) {
+ for (Node node : tester.configServer().nodeRepository().list(zone.getId(), SystemApplication.configServer.id())) {
+ Node upgradedNode = new Node(node.hostname(), node.state(), node.type(), node.owner(), version1, node.wantedVersion());
+ tester.configServer().nodeRepository().putByHostname(zone.getId(), upgradedNode);
break;
}
}
@@ -105,10 +106,10 @@ public class VersionStatusTest {
// Downgrade one config server in each zone
Version ancientVersion = Version.fromString("5.1");
- for (ZoneId zone : tester.controller().zoneRegistry().zones().all().ids()) {
- for (Node node : tester.configServer().nodeRepository().list(zone, SystemApplication.configServer.id())) {
- tester.configServer().nodeRepository().putByHostname(zone, new Node(node.hostname(), node.state(), node.type(),
- node.owner(), ancientVersion, node.wantedVersion()));
+ for (ZoneApi zone : tester.controller().zoneRegistry().zones().all().zones()) {
+ for (Node node : tester.configServer().nodeRepository().list(zone.getId(), SystemApplication.configServer.id())) {
+ Node downgradedNode = new Node(node.hostname(), node.state(), node.type(), node.owner(), ancientVersion, node.wantedVersion());
+ tester.configServer().nodeRepository().putByHostname(zone.getId(), downgradedNode);
break;
}
}
@@ -254,7 +255,7 @@ public class VersionStatusTest {
assertTrue("Status for version without applications is removed",
tester.controller().versionStatus().versions().stream()
.noneMatch(vespaVersion -> vespaVersion.versionNumber().equals(version1)));
-
+
// Another default application upgrades, raising confidence to high
tester.completeUpgrade(default8, version2, "default");
tester.completeUpgrade(default9, version2, "default");
@@ -294,6 +295,11 @@ public class VersionStatusTest {
assertEquals("6.2", versions.get(0).versionNumber().toString());
assertEquals("6.4", versions.get(1).versionNumber().toString());
assertEquals("6.5", versions.get(2).versionNumber().toString());
+
+ // Check release status is correct (static data in MockMavenRepository).
+ assertTrue(versions.get(0).isReleased());
+ assertFalse(versions.get(1).isReleased());
+ assertFalse(versions.get(2).isReleased());
}
@Test
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 99b24c2ff8e..3426eff459d 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -70,7 +70,6 @@ BuildRequires: java-11-openjdk-devel
BuildRequires: openssl-devel
BuildRequires: rpm-build
BuildRequires: make
-BuildRequires: vespa-cppunit-devel >= 1.12.1-6
BuildRequires: systemd
BuildRequires: flex >= 2.5.0
BuildRequires: bison >= 3.0.0
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
index 70ba58cd9cf..bd8ffb0163c 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
@@ -9,7 +9,7 @@ import java.util.Objects;
public class ContainerResources {
public static final ContainerResources UNLIMITED = ContainerResources.from(0, 0, 0);
- private static final int CPU_PERIOD = 100_000; // 100 µs
+ public static final int CPU_PERIOD_US = 100_000; // 100 ms
/**
* Hard limit on container's CPU usage: Implemented using Completely Fair Scheduler (CFS) by allocating a given
@@ -65,11 +65,12 @@ public class ContainerResources {
// Although docker allows to update cpu quota to 0, this is not a legal value, must be set -1 for unlimited
// See: https://github.com/docker/for-linux/issues/558
public int cpuQuota() {
- return cpus > 0 ? (int) (cpus * CPU_PERIOD) : -1;
+ return cpus > 0 ? (int) (cpus * CPU_PERIOD_US) : -1;
}
+ /** Duration (in µs) of a single period used as the basis for process scheduling */
public int cpuPeriod() {
- return CPU_PERIOD;
+ return CPU_PERIOD_US;
}
public int cpuShares() {
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java
index d33ddadb52c..797dffdef1f 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java
@@ -95,6 +95,9 @@ public class ContainerStats {
private final long systemCpuUsage;
private final long totalUsage;
private final long usageInKernelMode;
+ private final long throttledTime;
+ private final long throttlingActivePeriods;
+ private final long throttledPeriods;
public CpuStats(CpuStatsConfig cpuStats) {
// Added in 1.27
@@ -102,12 +105,30 @@ public class ContainerStats {
this.systemCpuUsage = cpuStats.getSystemCpuUsage();
this.totalUsage = cpuStats.getCpuUsage().getTotalUsage();
this.usageInKernelMode = cpuStats.getCpuUsage().getUsageInKernelmode();
+ this.throttledTime = cpuStats.getThrottlingData().getThrottledTime();
+ this.throttlingActivePeriods = cpuStats.getThrottlingData().getPeriods();
+ this.throttledPeriods = cpuStats.getThrottlingData().getThrottledPeriods();
}
public int getOnlineCpus() { return this.onlineCpus; }
+
+ /** Total CPU time (in ns) spent executing all the processes on this host */
public long getSystemCpuUsage() { return this.systemCpuUsage; }
+
+ /** Total CPU time (in ns) spent running all the processes in this container */
public long getTotalUsage() { return totalUsage; }
+
+ /** Total CPU time (in ns) spent in kernel mode while executing processes in this container */
public long getUsageInKernelMode() { return usageInKernelMode; }
+
+ /** Total CPU time (in ns) processes in this container were throttled for */
+ public long getThrottledTime() { return throttledTime; }
+
+ /** Number of periods with throttling enabled for this container */
+ public long getThrottlingActivePeriods() { return throttlingActivePeriods; }
+
+ /** Number of periods this container hit the throttling limit */
+ public long getThrottledPeriods() { return throttledPeriods; }
}
// For testing only, create ContainerStats from JSON returned by docker daemon stats API
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java
index ef59c4b17d6..46a0f9b9b10 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java
@@ -9,6 +9,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;
+import java.util.stream.Collectors;
/**
* @author freva
@@ -24,7 +25,9 @@ public class DimensionMetrics {
DimensionMetrics(String application, Dimensions dimensions, Map<String, Number> metrics) {
this.application = Objects.requireNonNull(application);
this.dimensions = Objects.requireNonNull(dimensions);
- this.metrics = Objects.requireNonNull(metrics);
+ this.metrics = metrics.entrySet().stream()
+ .filter(DimensionMetrics::metricIsFinite)
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
public String toSecretAgentReport() throws JsonProcessingException {
@@ -65,6 +68,10 @@ public class DimensionMetrics {
return Objects.hash(application, dimensions, metrics);
}
+ private static boolean metricIsFinite(Map.Entry<String, Number> metric) {
+ return ! (metric.getValue() instanceof Double) || Double.isFinite((double) metric.getValue());
+ }
+
public static class Builder {
private final String application;
private final Dimensions dimensions;
diff --git a/docproc/abi-spec.json b/docproc/abi-spec.json
index 65ca886efaf..dee2d2172e4 100644
--- a/docproc/abi-spec.json
+++ b/docproc/abi-spec.json
@@ -11,7 +11,8 @@
"public abstract java.util.Map documentTypes()",
"public abstract java.util.Map structTypes()",
"public abstract java.util.Map annotationTypes()",
- "public abstract com.yahoo.document.Document getDocumentCopy(java.lang.String, com.yahoo.document.datatypes.StructuredFieldValue, com.yahoo.document.DocumentId)"
+ "public abstract com.yahoo.document.Document getDocumentCopy(java.lang.String, com.yahoo.document.datatypes.StructuredFieldValue, com.yahoo.document.DocumentId)",
+ "public com.yahoo.document.datatypes.FieldValue optionallyUpgrade(com.yahoo.document.Field, com.yahoo.document.datatypes.FieldValue)"
],
"fields": []
},
diff --git a/docproc/src/main/java/com/yahoo/docproc/AbstractConcreteDocumentFactory.java b/docproc/src/main/java/com/yahoo/docproc/AbstractConcreteDocumentFactory.java
index 8f771418959..3e720f9e0aa 100644
--- a/docproc/src/main/java/com/yahoo/docproc/AbstractConcreteDocumentFactory.java
+++ b/docproc/src/main/java/com/yahoo/docproc/AbstractConcreteDocumentFactory.java
@@ -1,15 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.docproc;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
import java.util.Map;
+
+import com.yahoo.document.DataType;
import com.yahoo.document.Document;
import com.yahoo.document.DocumentId;
+import com.yahoo.document.Field;
import com.yahoo.document.annotation.Annotation;
+import com.yahoo.document.datatypes.Array;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.MapFieldValue;
import com.yahoo.document.datatypes.Struct;
import com.yahoo.document.datatypes.StructuredFieldValue;
-import com.yahoo.yolean.Exceptions;
/**
* Subtyped by factory classes for concrete document types. The factory classes are auto-generated
@@ -26,7 +29,44 @@ public abstract class AbstractConcreteDocumentFactory extends com.yahoo.componen
/**
* Used by the docproc framework to get an instance of a concrete document type without resorting to reflection in a bundle
*
- * @return A concrete document instance
+ * @return a concrete document instance
+ */
+ public abstract Document getDocumentCopy(java.lang.String type, StructuredFieldValue src, DocumentId id);
+
+ /**
+ * If the FieldValue is a StructuredFieldValue it will upgrade to the concrete type
+ * @param field
+ * @param fv
+ * @return fv or upgraded fv
*/
- public abstract com.yahoo.document.Document getDocumentCopy(java.lang.String type, com.yahoo.document.datatypes.StructuredFieldValue src, com.yahoo.document.DocumentId id);
+ public FieldValue optionallyUpgrade(Field field, FieldValue fv) {
+ return optionallyUpgrade(field.getDataType(), fv);
+ }
+
+ @SuppressWarnings({"unchecked", "rawtypes"})
+ private FieldValue optionallyUpgrade(DataType dataType, FieldValue fv) {
+ if (fv instanceof StructuredFieldValue) {
+ try {
+ return structTypes().get(dataType.getName())
+ .getConstructor(StructuredFieldValue.class)
+ .newInstance(fv);
+ } catch (java.lang.Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ } else if (fv instanceof Array) {
+ Array<FieldValue> array = (Array<FieldValue>) fv;
+ DataType nestedType = array.getDataType().getNestedType();
+ if (nestedType.getPrimitiveType() == null) {
+ array.replaceAll((item) -> optionallyUpgrade(nestedType, item));
+ }
+ } else if (fv instanceof MapFieldValue) {
+ MapFieldValue<FieldValue, FieldValue> map = (MapFieldValue<FieldValue, FieldValue>) fv;
+ DataType valueTypeType = map.getDataType().getValueType();
+ if (valueTypeType.getPrimitiveType() == null) {
+ map.replaceAll((key, value) -> optionallyUpgrade(valueTypeType, value));
+ }
+ }
+ return fv;
+ }
+
}
diff --git a/docproc/src/main/java/com/yahoo/docproc/Accesses.java b/docproc/src/main/java/com/yahoo/docproc/Accesses.java
index 4c19784ea37..747b129679b 100644
--- a/docproc/src/main/java/com/yahoo/docproc/Accesses.java
+++ b/docproc/src/main/java/com/yahoo/docproc/Accesses.java
@@ -20,13 +20,14 @@ public @interface Accesses {
/**
* Describes the annotations produced and consumed on one field in a document
- * @author vegardh
*
+ * @author vegardh
*/
@Documented
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
- public @interface Field {
+ @interface Field {
+
/** The name of the document field */
String name();
/** The datatype of the field */
@@ -37,13 +38,13 @@ public @interface Accesses {
/**
* Describes the annotations produced and consumed in one tree on a field
- * @author vegardh
*
+ * @author vegardh
*/
@Documented
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
- public @interface Tree {
+ @interface Tree {
/** The name of the tree */
String name() default "";
/** The annotation types that this docproc writes in this tree */
diff --git a/docproc/src/main/java/com/yahoo/docproc/Call.java b/docproc/src/main/java/com/yahoo/docproc/Call.java
index edde89cd01a..a46e74c0f5f 100644
--- a/docproc/src/main/java/com/yahoo/docproc/Call.java
+++ b/docproc/src/main/java/com/yahoo/docproc/Call.java
@@ -91,10 +91,10 @@ public class Call implements Cloneable {
* if schema mapping or @Accesses is in effect.
*
* <p>
- * public for testing
+ * Public for testing
*/
public DocumentPut configDoc(DocumentProcessor docProc, DocumentPut documentPut) {
- if (!docProc.getFieldMap().isEmpty() || docProc.hasAnnotations()) {
+ if ( ! docProc.getFieldMap().isEmpty() || docProc.hasAnnotations()) {
Document document = documentPut.getDocument();
document = new ProxyDocument(docProc, document, docProc.getDocMap(document.getDataType().getName()));
diff --git a/docproc/src/main/java/com/yahoo/docproc/CallStack.java b/docproc/src/main/java/com/yahoo/docproc/CallStack.java
index 1e7bf4fba8e..2a09de1fdd5 100644
--- a/docproc/src/main/java/com/yahoo/docproc/CallStack.java
+++ b/docproc/src/main/java/com/yahoo/docproc/CallStack.java
@@ -12,18 +12,18 @@ import java.util.List;
import java.util.ListIterator;
/**
- * <p>A stack of the processors to call next in this processing. To push which
+ * A stack of the processors to call next in this processing. To push which
* processor to call next, call addNext, to get and remove the next processor,
- * call pop.</p>
+ * call pop.
*
- * <p>This is not thread safe.</p>
+ * This is not thread safe.
*
* @author bratseth
*/
public class CallStack {
/** The name of this stack, or null if it is not named */
- private String name = null;
+ private String name;
/** The Call objects of this stack */
private final List<Call> elements = new java.util.LinkedList<>();
@@ -51,7 +51,7 @@ public class CallStack {
}
/** Creates an empty stack with a name */
- public CallStack(final String name, Statistics manager, Metric metric) {
+ public CallStack(String name, Statistics manager, Metric metric) {
this.name = name;
this.statistics = manager;
this.metric = metric;
@@ -61,10 +61,10 @@ public class CallStack {
* Creates a stack from another stack (starting at the next of the given
* callstack) This does a deep copy of the stack.
*/
- public CallStack(final CallStack stackToCopy) {
+ public CallStack(CallStack stackToCopy) {
name = stackToCopy.name;
- for (final Iterator<Call> i = stackToCopy.iterator(); i.hasNext();) {
- final Call callToCopy = i.next();
+ for (Iterator<Call> i = stackToCopy.iterator(); i.hasNext();) {
+ Call callToCopy = i.next();
elements.add((Call) callToCopy.clone());
}
this.statistics = stackToCopy.statistics;
@@ -78,7 +78,7 @@ public class CallStack {
* @param name the name of the stack
* @param docprocs the document processors to call
*/
- public CallStack(final String name, Collection<DocumentProcessor> docprocs, Statistics manager, Metric metric) {
+ public CallStack(String name, Collection<DocumentProcessor> docprocs, Statistics manager, Metric metric) {
this(name, manager, metric);
for (DocumentProcessor docproc : docprocs) {
addLast(docproc);
@@ -91,7 +91,7 @@ public class CallStack {
}
/** Sets the name of this stack */
- public void setName(final String name) {
+ public void setName(String name) {
this.name = name;
}
@@ -100,7 +100,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addNext(final Call call) {
+ public CallStack addNext(Call call) {
elements.add(0, call);
return this;
}
@@ -110,7 +110,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addNext(final DocumentProcessor processor) {
+ public CallStack addNext(DocumentProcessor processor) {
return addNext(new Call(processor, name, statistics, metric));
}
@@ -119,7 +119,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addNext(final CallStack callStack) {
+ public CallStack addNext(CallStack callStack) {
elements.addAll(0, callStack.elements);
return this;
}
@@ -129,7 +129,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addLast(final Call call) {
+ public CallStack addLast(Call call) {
elements.add(call);
return this;
}
@@ -139,7 +139,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addLast(final DocumentProcessor processor) {
+ public CallStack addLast(DocumentProcessor processor) {
return addLast(new Call(processor, name, statistics, metric));
}
@@ -148,7 +148,7 @@ public class CallStack {
*
* @return this for convenience
*/
- public CallStack addLast(final CallStack callStack) {
+ public CallStack addLast(CallStack callStack) {
elements.addAll(callStack.elements);
return this;
}
@@ -164,8 +164,8 @@ public class CallStack {
* @param call the call to add
* @return this for convenience
*/
- public CallStack addBefore(final Call before, final Call call) {
- final int insertPosition = elements.indexOf(before);
+ public CallStack addBefore(Call before, Call call) {
+ int insertPosition = elements.indexOf(before);
if (insertPosition < 0) {
addLast(call);
} else {
@@ -185,7 +185,7 @@ public class CallStack {
* @param processor the processor to add
* @return this for convenience
*/
- public CallStack addBefore(final Call before, DocumentProcessor processor) {
+ public CallStack addBefore(Call before, DocumentProcessor processor) {
return addBefore(before, new Call(processor, name, statistics, metric));
}
@@ -193,16 +193,13 @@ public class CallStack {
* Adds multiple elements just before the first occurence of some element on
* the stack. This can not be called during an iteration.
*
- * @param before
- * the call to add this before. If this call is not present (the
- * same object instance), the new processor is added as the last
- * element
- * @param callStack
- * the calls to add
+ * @param before the call to add this before. If this call is not present (the
+ * same object instance), the new processor is added as the last element
+ * @param callStack the calls to add
* @return this for convenience
*/
- public CallStack addBefore(final Call before, final CallStack callStack) {
- final int insertPosition = elements.indexOf(before);
+ public CallStack addBefore(Call before, CallStack callStack) {
+ int insertPosition = elements.indexOf(before);
if (insertPosition < 0) {
addLast(callStack);
} else {
@@ -223,8 +220,8 @@ public class CallStack {
* the call to add
* @return this for convenience
*/
- public CallStack addAfter(final Call after, final Call call) {
- final int insertPosition = elements.indexOf(after);
+ public CallStack addAfter(Call after, Call call) {
+ int insertPosition = elements.indexOf(after);
if (insertPosition < 0) {
addLast(call);
} else {
@@ -237,15 +234,12 @@ public class CallStack {
* Adds an element just after the first occurence of some other element on
* the stack. This can not be called during an iteration.
*
- * @param after
- * the call to add this after. If this call is not present, (the
- * same object instance), the new processor is added as the last
- * element
- * @param processor
- * the processor to add
+ * @param after the call to add this after. If this call is not present, (the
+ * same object instance), the new processor is added as the last element
+ * @param processor the processor to add
* @return this for convenience
*/
- public CallStack addAfter(final Call after, final DocumentProcessor processor) {
+ public CallStack addAfter(Call after, DocumentProcessor processor) {
return addAfter(after, new Call(processor, name, statistics, metric));
}
@@ -253,16 +247,13 @@ public class CallStack {
* Adds multiple elements just after another given element on the stack.
* This can not be called during an iteration.
*
- * @param after
- * the call to add this before. If this call is not present, (the
- * same object instance), the new processor is added as the last
- * element
- * @param callStack
- * the calls to add
+ * @param after the call to add this before. If this call is not present, (the
+ * same object instance), the new processor is added as the last element
+ * @param callStack the calls to add
* @return this for convenience
*/
- public CallStack addAfter(final Call after, final CallStack callStack) {
- final int insertPosition = elements.indexOf(after);
+ public CallStack addAfter(Call after, CallStack callStack) {
+ int insertPosition = elements.indexOf(after);
if (insertPosition < 0) {
addLast(callStack);
} else {
@@ -278,9 +269,9 @@ public class CallStack {
* the call to remove
* @return this for convenience
*/
- public CallStack remove(final Call call) {
- for (final ListIterator<Call> i = iterator(); i.hasNext();) {
- final Call current = i.next();
+ public CallStack remove(Call call) {
+ for (ListIterator<Call> i = iterator(); i.hasNext();) {
+ Call current = i.next();
if (current == call) {
i.remove();
}
@@ -295,9 +286,9 @@ public class CallStack {
* the call to check
* @return true if the call is present, false otherwise
*/
- public boolean contains(final Call call) {
- for (final ListIterator<Call> i = iterator(); i.hasNext();) {
- final Call current = i.next();
+ public boolean contains(Call call) {
+ for (ListIterator<Call> i = iterator(); i.hasNext();) {
+ Call current = i.next();
if (current == call) {
return true;
}
@@ -306,12 +297,11 @@ public class CallStack {
}
/**
- * Returns the next call to this processor id, or null if no such calls are
- * left
+ * Returns the next call to this processor id, or null if no such calls are left
*/
- public Call findCall(final ComponentId processorId) {
- for (final Iterator<Call> i = iterator(); i.hasNext();) {
- final Call call = i.next();
+ public Call findCall(ComponentId processorId) {
+ for (Iterator<Call> i = iterator(); i.hasNext();) {
+ Call call = i.next();
if (call.getDocumentProcessorId().equals(processorId)) {
return call;
}
@@ -323,7 +313,7 @@ public class CallStack {
* Returns the next call to this processor, or null if no such calls are
* left
*/
- public Call findCall(final DocumentProcessor processor) {
+ public Call findCall(DocumentProcessor processor) {
return findCall(processor.getId());
}
@@ -365,15 +355,14 @@ public class CallStack {
return elements.listIterator();
}
- /** Returns the number of remainnig elements in this stack */
+ /** Returns the number of remaining elements in this stack */
public int size() {
return elements.size();
}
@Override
public String toString() {
- StringBuilder b = new StringBuilder();
- b.append("callstack");
+ StringBuilder b = new StringBuilder("callstack");
if (name != null) {
b.append(" ");
b.append(name);
diff --git a/docproc/src/main/java/com/yahoo/docproc/SimpleDocumentProcessor.java b/docproc/src/main/java/com/yahoo/docproc/SimpleDocumentProcessor.java
index bb9ef8ff636..e1bcf9c7c42 100644
--- a/docproc/src/main/java/com/yahoo/docproc/SimpleDocumentProcessor.java
+++ b/docproc/src/main/java/com/yahoo/docproc/SimpleDocumentProcessor.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.docproc;
-import com.yahoo.document.Document;
import com.yahoo.document.DocumentOperation;
import com.yahoo.document.DocumentPut;
import com.yahoo.document.DocumentRemove;
@@ -86,7 +85,7 @@ public class SimpleDocumentProcessor extends DocumentProcessor {
*/
@Override
public final Progress process(Processing processing) {
- final int initialSize = processing.getDocumentOperations().size();
+ int initialSize = processing.getDocumentOperations().size();
for (DocumentOperation op : processing.getDocumentOperations()) {
try {
if (op instanceof DocumentPut) {
diff --git a/docproc/src/main/java/com/yahoo/docproc/TransientFailureException.java b/docproc/src/main/java/com/yahoo/docproc/TransientFailureException.java
index cda196f309c..235e2561066 100644
--- a/docproc/src/main/java/com/yahoo/docproc/TransientFailureException.java
+++ b/docproc/src/main/java/com/yahoo/docproc/TransientFailureException.java
@@ -2,8 +2,8 @@
package com.yahoo.docproc;
/**
- * Exception to be thrown by a document processor on transient failures.&nbsp;Caller
- * is welcome to try the call again later.
+ * Exception to be thrown by a document processor on transient failures.
+ * Caller is welcome to try the call again later.
*
* @author Einar M R Rosenvinge
*/
diff --git a/docproc/src/main/java/com/yahoo/docproc/jdisc/RequestContext.java b/docproc/src/main/java/com/yahoo/docproc/jdisc/RequestContext.java
index 5d04eb9fe6f..8f021546ac8 100644
--- a/docproc/src/main/java/com/yahoo/docproc/jdisc/RequestContext.java
+++ b/docproc/src/main/java/com/yahoo/docproc/jdisc/RequestContext.java
@@ -9,37 +9,35 @@ import java.net.URI;
import java.util.List;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
public interface RequestContext {
- public List<Processing> getProcessings();
+ List<Processing> getProcessings();
- public String getServiceName();
+ String getServiceName();
- public URI getUri();
+ URI getUri();
- public boolean isProcessable();
+ boolean isProcessable();
- public int getApproxSize();
+ int getApproxSize();
- public int getPriority();
+ int getPriority();
- public void processingDone(List<Processing> processing);
+ void processingDone(List<Processing> processing);
- public void processingFailed(ErrorCode error, String msg);
+ void processingFailed(ErrorCode error, String msg);
- public void processingFailed(Exception exception);
+ void processingFailed(Exception exception);
- /**
- * Will check if the given timeout has expired
- * @return true if the timeout has expired.
- */
- public default boolean hasExpired() { return false;}
+ /** Returns whether this request has timed out */
+ default boolean hasExpired() { return false;}
- public void skip();
+ void skip();
+
+ enum ErrorCode {
- public enum ErrorCode {
//transient:
ERROR_ABORTED(Response.Status.TEMPORARY_REDIRECT, DocumentProtocol.ERROR_ABORTED),
ERROR_BUSY(Response.Status.TEMPORARY_REDIRECT, DocumentProtocol.ERROR_BUSY),
@@ -50,7 +48,7 @@ public interface RequestContext {
private int discStatus;
private int documentProtocolStatus;
- private ErrorCode(int discStatus, int documentProtocolStatus) {
+ ErrorCode(int discStatus, int documentProtocolStatus) {
this.discStatus = discStatus;
this.documentProtocolStatus = documentProtocolStatus;
}
@@ -63,4 +61,5 @@ public interface RequestContext {
return documentProtocolStatus;
}
}
+
}
diff --git a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/MessageFactory.java b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/MessageFactory.java
index 89f3782da52..702343acd1c 100644
--- a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/MessageFactory.java
+++ b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/MessageFactory.java
@@ -2,16 +2,24 @@
package com.yahoo.docproc.jdisc.messagebus;
import com.yahoo.docproc.Processing;
-import com.yahoo.document.*;
+import com.yahoo.document.DocumentOperation;
+import com.yahoo.document.DocumentPut;
+import com.yahoo.document.DocumentRemove;
+import com.yahoo.document.DocumentUpdate;
import com.yahoo.documentapi.messagebus.loadtypes.LoadType;
-import com.yahoo.documentapi.messagebus.protocol.*;
+import com.yahoo.documentapi.messagebus.protocol.DocumentMessage;
+import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
+import com.yahoo.documentapi.messagebus.protocol.PutDocumentMessage;
+import com.yahoo.documentapi.messagebus.protocol.RemoveDocumentMessage;
+import com.yahoo.documentapi.messagebus.protocol.TestAndSetMessage;
+import com.yahoo.documentapi.messagebus.protocol.UpdateDocumentMessage;
import com.yahoo.log.LogLevel;
import com.yahoo.messagebus.Message;
import java.util.logging.Logger;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
class MessageFactory {
@@ -27,26 +35,26 @@ class MessageFactory {
}
public DocumentMessage fromDocumentOperation(Processing processing, DocumentOperation documentOperation) {
- DocumentMessage msg = newMessage(documentOperation);
- msg.setLoadType(loadType);
- msg.setPriority(priority);
- msg.setRoute(requestMsg.getRoute());
- msg.setTimeReceivedNow();
- msg.setTimeRemaining(requestMsg.getTimeRemainingNow());
- msg.getTrace().setLevel(requestMsg.getTrace().getLevel());
+ DocumentMessage message = newMessage(documentOperation);
+ message.setLoadType(loadType);
+ message.setPriority(priority);
+ message.setRoute(requestMsg.getRoute());
+ message.setTimeReceivedNow();
+ message.setTimeRemaining(requestMsg.getTimeRemainingNow());
+ message.getTrace().setLevel(requestMsg.getTrace().getLevel());
if (log.isLoggable(LogLevel.DEBUG)) {
- log.log(LogLevel.DEBUG, "Created '" + msg.getClass().getName() +
- "', route = '" + msg.getRoute() +
- "', priority = '" + msg.getPriority().name() +
- "', load type = '" + msg.getLoadType() +
- "', trace level = '" + msg.getTrace().getLevel() +
- "', time remaining = '" + msg.getTimeRemaining() + "'.");
+ log.log(LogLevel.DEBUG, "Created '" + message.getClass().getName() +
+ "', route = '" + message.getRoute() +
+ "', priority = '" + message.getPriority().name() +
+ "', load type = '" + message.getLoadType() +
+ "', trace level = '" + message.getTrace().getLevel() +
+ "', time remaining = '" + message.getTimeRemaining() + "'.");
}
- return msg;
+ return message;
}
private static DocumentMessage newMessage(DocumentOperation documentOperation) {
- final TestAndSetMessage message;
+ TestAndSetMessage message;
if (documentOperation instanceof DocumentPut) {
message = new PutDocumentMessage(((DocumentPut)documentOperation));
diff --git a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ProcessingFactory.java b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ProcessingFactory.java
index 49a6020d633..4696c627fe3 100644
--- a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ProcessingFactory.java
+++ b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ProcessingFactory.java
@@ -74,9 +74,8 @@ class ProcessingFactory {
Document document = msg.getDocumentPut().getDocument();
String typeName = document.getDataType().getName();
ContainerDocumentConfig.Doctype typeConfig = getDocumentConfig(typeName);
- if (typeConfig == null) {
- return document;
- }
+ if (typeConfig == null) return document;
+
return createConcreteDocument(document, typeConfig);
}
diff --git a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ResponseMerger.java b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ResponseMerger.java
index 5a5e43c3cf8..d5aee5b5ff3 100644
--- a/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ResponseMerger.java
+++ b/docproc/src/main/java/com/yahoo/docproc/jdisc/messagebus/ResponseMerger.java
@@ -9,7 +9,6 @@ import com.yahoo.jdisc.handler.ResponseHandler;
import com.yahoo.messagebus.Message;
import com.yahoo.messagebus.Reply;
import com.yahoo.messagebus.TraceNode;
-import com.yahoo.messagebus.jdisc.MbusClient;
import com.yahoo.messagebus.jdisc.MbusResponse;
import com.yahoo.messagebus.jdisc.StatusCodes;
@@ -17,7 +16,7 @@ import java.util.ArrayList;
import java.util.List;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
class ResponseMerger implements ResponseHandler {
diff --git a/docproc/src/main/java/com/yahoo/docproc/jdisc/metric/NullMetric.java b/docproc/src/main/java/com/yahoo/docproc/jdisc/metric/NullMetric.java
index fe204686d6c..9e88af4abd7 100644
--- a/docproc/src/main/java/com/yahoo/docproc/jdisc/metric/NullMetric.java
+++ b/docproc/src/main/java/com/yahoo/docproc/jdisc/metric/NullMetric.java
@@ -6,9 +6,10 @@ import com.yahoo.jdisc.Metric;
import java.util.Map;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
public class NullMetric implements Metric {
+
@Override
public void set(String key, Number val, Context ctx) {
}
@@ -25,4 +26,5 @@ public class NullMetric implements Metric {
private static class NullContext implements Context {
private static final NullContext INSTANCE = new NullContext();
}
+
}
diff --git a/docproc/src/main/java/com/yahoo/docproc/proxy/ProxyDocument.java b/docproc/src/main/java/com/yahoo/docproc/proxy/ProxyDocument.java
index b540f1c204d..e825db4e21d 100644
--- a/docproc/src/main/java/com/yahoo/docproc/proxy/ProxyDocument.java
+++ b/docproc/src/main/java/com/yahoo/docproc/proxy/ProxyDocument.java
@@ -32,15 +32,18 @@ import java.util.Map.Entry;
import java.util.Set;
/**
- * This is a facade to a Document, with multiple purposes: <ul> <li>Getters and setters for field data takes possibly
- * into account a schema map of field names. <li>We support mapping into struct fields of arbitrary depth using
- * from→mystruct.mystruct.myfield </ul> We also enforce the @Accesses annotation(s) of the doc proc which uses this.
+ * This is a facade to a Document, with two purposes:
+ * <ul>
+ * <li>Getters and setters for field data may take into account a schema map of field names.
+ * <li>Mapping into struct fields of arbitrary depth using from→mystruct.mystruct.myfield
+ * </ul>
+ *
+ * This also enforces the @Accesses annotation(s) of the doc proc which uses this.
*
* @author Vegard Havdal
*/
public class ProxyDocument extends Document implements DocumentOperationWrapper {
- private static final long serialVersionUID = 1L;
private final Map<String, String> fieldMap;
private final Set<String> fieldsAllowed = new HashSet<>();
private final String docProcName;
@@ -72,7 +75,7 @@ public class ProxyDocument extends Document implements DocumentOperationWrapper
* directly, but may refer to a field in a struct contained in it,
* in which case the returned Field is only useful for obtaining
* the field type; it can't be used for get() and set().
- **/
+ */
@Override
public Field getField(String fieldName) {
if (fieldMap != null && fieldMap.containsKey(fieldName)) {
diff --git a/docproc/src/main/java/com/yahoo/docproc/util/JoinerDocumentProcessor.java b/docproc/src/main/java/com/yahoo/docproc/util/JoinerDocumentProcessor.java
index 6242be25f86..448d5213e6d 100644
--- a/docproc/src/main/java/com/yahoo/docproc/util/JoinerDocumentProcessor.java
+++ b/docproc/src/main/java/com/yahoo/docproc/util/JoinerDocumentProcessor.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.docproc.util;
-import com.yahoo.component.ComponentId;
import com.yahoo.document.DocumentOperation;
import com.yahoo.document.DocumentPut;
import com.yahoo.document.config.DocumentmanagerConfig;
diff --git a/docprocs/src/main/java/com/yahoo/docprocs/indexing/DocumentScript.java b/docprocs/src/main/java/com/yahoo/docprocs/indexing/DocumentScript.java
index 4905f3d9dad..dd894390ac2 100644
--- a/docprocs/src/main/java/com/yahoo/docprocs/indexing/DocumentScript.java
+++ b/docprocs/src/main/java/com/yahoo/docprocs/indexing/DocumentScript.java
@@ -107,4 +107,5 @@ public class DocumentScript {
}
}
}
+
}
diff --git a/docprocs/src/main/java/com/yahoo/docprocs/indexing/FastLogger.java b/docprocs/src/main/java/com/yahoo/docprocs/indexing/FastLogger.java
index 3ce2a2f1df4..ba1d5e17a78 100644
--- a/docprocs/src/main/java/com/yahoo/docprocs/indexing/FastLogger.java
+++ b/docprocs/src/main/java/com/yahoo/docprocs/indexing/FastLogger.java
@@ -29,4 +29,5 @@ class FastLogger {
public static FastLogger getLogger(String name) {
return new FastLogger(Logger.getLogger(name));
}
+
}
diff --git a/docprocs/src/main/java/com/yahoo/docprocs/indexing/IndexingProcessor.java b/docprocs/src/main/java/com/yahoo/docprocs/indexing/IndexingProcessor.java
index 761661710d4..aa66f6c7c3c 100644
--- a/docprocs/src/main/java/com/yahoo/docprocs/indexing/IndexingProcessor.java
+++ b/docprocs/src/main/java/com/yahoo/docprocs/indexing/IndexingProcessor.java
@@ -9,7 +9,14 @@ import com.yahoo.component.chain.dependencies.Before;
import com.yahoo.component.chain.dependencies.Provides;
import com.yahoo.docproc.DocumentProcessor;
import com.yahoo.docproc.Processing;
-import com.yahoo.document.*;
+import com.yahoo.document.Document;
+import com.yahoo.document.DocumentOperation;
+import com.yahoo.document.DocumentPut;
+import com.yahoo.document.DocumentRemove;
+import com.yahoo.document.DocumentType;
+import com.yahoo.document.DocumentTypeManager;
+import com.yahoo.document.DocumentTypeManagerConfigurer;
+import com.yahoo.document.DocumentUpdate;
import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.language.Linguistics;
import com.yahoo.log.LogLevel;
diff --git a/document/src/main/java/com/yahoo/document/datatypes/Struct.java b/document/src/main/java/com/yahoo/document/datatypes/Struct.java
index fc75870bb94..db9349f22cf 100644
--- a/document/src/main/java/com/yahoo/document/datatypes/Struct.java
+++ b/document/src/main/java/com/yahoo/document/datatypes/Struct.java
@@ -2,14 +2,24 @@
package com.yahoo.document.datatypes;
import com.yahoo.collections.Hashlet;
-import com.yahoo.document.*;
+import com.yahoo.document.DataType;
+import com.yahoo.document.Document;
+import com.yahoo.document.Field;
+import com.yahoo.document.PositionDataType;
+import com.yahoo.document.StructDataType;
import com.yahoo.document.serialization.FieldReader;
import com.yahoo.document.serialization.FieldWriter;
import com.yahoo.document.serialization.XmlSerializationHelper;
import com.yahoo.document.serialization.XmlStream;
import com.yahoo.vespa.objects.Ids;
-import java.util.*;
+import java.util.AbstractSet;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+
/**
* @author Håkon Humberset
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/PutDocumentMessage.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/PutDocumentMessage.java
index ec24798f7f2..b2bf26d3b05 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/PutDocumentMessage.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/PutDocumentMessage.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.documentapi.messagebus.protocol;
-import com.yahoo.document.Document;
import com.yahoo.document.DocumentPut;
import com.yahoo.document.TestAndSetCondition;
import com.yahoo.document.serialization.DocumentDeserializer;
@@ -28,7 +27,8 @@ public class PutDocumentMessage extends TestAndSetMessage {
/**
* Constructs a new message from a byte buffer.
- * @param decoder The decoder to use for deserialization.
+ *
+ * @param decoder The decoder to use for deserialization.
* @param buffer A byte buffer that contains a serialized message.
*/
public PutDocumentMessage(LazyDecoder decoder, DocumentDeserializer buffer) {
@@ -36,11 +36,7 @@ public class PutDocumentMessage extends TestAndSetMessage {
this.buffer = buffer;
}
- /**
- * Constructs a new document put message.
- *
- * @param put Document put operation
- */
+ /** Constructs a new document put message */
public PutDocumentMessage(DocumentPut put) {
this.put = put;
}
@@ -64,40 +60,26 @@ public class PutDocumentMessage extends TestAndSetMessage {
}
}
- /**
- * Returns the document put operation
- */
+ /** Returns the document put operation */
public DocumentPut getDocumentPut() {
deserialize();
return put;
}
- /**
- * Sets the document to put.
- *
- * @param put Put document operation
- */
+ /** Sets the document to put */
public void setDocumentPut(DocumentPut put) {
buffer = null;
decoder = null;
this.put = put;
}
- /**
- * Returns the timestamp of the document to put.
- *
- * @return The document timestamp.
- */
+ /** Returns the timestamp of the document to put */
public long getTimestamp() {
deserialize();
return time;
}
- /**
- * Sets the timestamp of the document to put.
- *
- * @param time The timestamp to set.
- */
+ /** Sets the timestamp of the document to put */
public void setTimestamp(long time) {
buffer = null;
decoder = null;
@@ -108,7 +90,7 @@ public class PutDocumentMessage extends TestAndSetMessage {
* Returns the raw serialized buffer. This buffer is stored as the message is received from accross the network, and
* deserialized from as soon as a member is requested. This method will return null if the buffer has been decoded.
*
- * @return The buffer containing the serialized data for this message, or null.
+ * @return the buffer containing the serialized data for this message, or null
*/
ByteBuffer getSerializedBuffer() {
return buffer != null ? buffer.getBuf().getByteBuffer() : null; // TODO: very dirty. Must make interface.
@@ -153,4 +135,5 @@ public class PutDocumentMessage extends TestAndSetMessage {
public void setCondition(TestAndSetCondition condition) {
put.setCondition(condition);
}
+
}
diff --git a/documentgen-test/src/main/java/com/yahoo/vespa/document/NodeImpl.java b/documentgen-test/src/main/java/com/yahoo/vespa/document/NodeImpl.java
index 58b63835539..45da1e85795 100644
--- a/documentgen-test/src/main/java/com/yahoo/vespa/document/NodeImpl.java
+++ b/documentgen-test/src/main/java/com/yahoo/vespa/document/NodeImpl.java
@@ -7,7 +7,6 @@ import com.yahoo.document.annotation.Annotation;
* Example of user provided annotation subtype that vespa-documentgen-plugin supports
*
* @author vegardh
- *
*/
public class NodeImpl extends Annotation {
diff --git a/documentgen-test/src/main/java/com/yahoo/vespa/document/dom/DocumentImpl.java b/documentgen-test/src/main/java/com/yahoo/vespa/document/dom/DocumentImpl.java
index fd9b8b46711..1a1548b93d5 100644
--- a/documentgen-test/src/main/java/com/yahoo/vespa/document/dom/DocumentImpl.java
+++ b/documentgen-test/src/main/java/com/yahoo/vespa/document/dom/DocumentImpl.java
@@ -7,7 +7,6 @@ import com.yahoo.document.annotation.Annotation;
* Example of user provided annotation subtype that vespa-documentgen-plugin supports
*
* @author vegardh
- *
*/
public class DocumentImpl extends Annotation {
diff --git a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
index e772a3138da..1e8c585b43e 100644
--- a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
+++ b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
@@ -38,6 +38,8 @@ import com.yahoo.document.datatypes.WeightedSet;
import com.yahoo.document.serialization.DocumentDeserializerFactory;
import com.yahoo.document.serialization.DocumentSerializer;
import com.yahoo.document.serialization.DocumentSerializerFactory;
+import com.yahoo.document.serialization.VespaDocumentDeserializerHead;
+import com.yahoo.document.serialization.VespaDocumentSerializerHead;
import com.yahoo.io.GrowableByteBuffer;
import com.yahoo.searchdefinition.derived.Deriver;
import com.yahoo.tensor.Tensor;
@@ -82,15 +84,16 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertThat;
-
/**
- * Testcases for vespa-documentgen-plugin
+ * Tests vespa-documentgen-plugin
*
* @author vegardh
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class DocumentGenPluginTest {
+ // NOTE: Most assertEquals in this use the wrong argument order
+
private static final int NUM_BOOKS = 10000;
@Test
@@ -186,7 +189,7 @@ public class DocumentGenPluginTest {
}
@Test
- public void testremoveFieldValue() {
+ public void testRemoveFieldValue() {
Book book = getBook();
book.setAuthor(null);
Field a = new Field("author", DataType.STRING);
@@ -229,9 +232,13 @@ public class DocumentGenPluginTest {
@Test
public void testStructs() {
Book book = getBook();
+ assertBook(book);
+ }
+
+ private void assertBook(Book book) {
assertTrue(Struct.class.isInstance(book.getMystruct()));
- assertEquals(book.getMystruct().getSs01().getD0(), -238472634.78, 0);
- assertEquals(book.getMystruct().getI1(), (Integer)999);
+ assertEquals(-238472634.78, book.getMystruct().getSs01().getD0(), 0);
+ assertEquals((Integer)999, book.getMystruct().getI1());
assertEquals(book.getAuthor(), "Herman Melville");
book.getMystruct().getSs01().setD0(4d);
assertEquals(book.getMystruct().getSs01().getD0(), 4.0, 1E-6);
@@ -278,9 +285,7 @@ public class DocumentGenPluginTest {
assertEquals(twelve, new IntegerFieldValue(12));
}
- @Test
- public void testArrayOfStruct() {
- Book book = getBook();
+ private void verifyArrayOfStruct(Book book) {
assertEquals(book.getMysinglestructarray().get(0).getS1(), "YEPS");
assertEquals(book.getMysinglestructarray().get(1).getI1(), (Integer)456);
Struct s1 = (Struct) ((Array)book.getFieldValue("mysinglestructarray")).get(0);
@@ -295,13 +300,28 @@ public class DocumentGenPluginTest {
assertEquals(ifv2.getInteger(), 456);
s2.setFieldValue("i1", new IntegerFieldValue(123));
assertEquals(book.getMysinglestructarray().get(1).getI1(), (Integer)123);
- book.getMysinglestructarray().remove(0);
+ Book.Ss1 prev = book.getMysinglestructarray().remove(0);
assertEquals(book.getMysinglestructarray().get(0).getI1(), (Integer)123);
+ book.getMysinglestructarray().add(0, prev);
+ assertEquals(book.getMysinglestructarray().get(1).getI1(), (Integer)123);
+ s2.setFieldValue("i1", new IntegerFieldValue(456));
+ }
+
+ private static Document copyBySerialization(Document orig) {
+ return roundtripSerialize(orig, typeManagerForBookType());
+ }
+ private Book toBook(Document doc) {
+ return (Book) new ConcreteDocumentFactory().getDocumentCopy(doc.getDataType().getName(), doc, doc.getId());
}
@Test
- public void testMaps() {
+ public void testArrayOfStruct() {
Book book = getBook();
+ verifyArrayOfStruct(book);
+ verifyArrayOfStruct(toBook(copyBySerialization(book)));
+ }
+
+ private void verifyMaps(Book book) {
assertTrue(book.getField("stringmap").getDataType() instanceof MapDataType);
MapFieldValue mfv = (MapFieldValue) book.getFieldValue("stringmap");
assertEquals(mfv.get(new StringFieldValue("Melville")), new StringFieldValue("Moby Dick"));
@@ -311,6 +331,8 @@ public class DocumentGenPluginTest {
assertEquals(mfv.keySet().size(), 2);
book.getStringmap().put("Melville", "MD");
assertEquals(mfv.keySet().size(), 3);
+ book.getStringmap().put("Melville", "Moby Dick");
+ assertEquals(mfv.keySet().size(), 3);
assertEquals(book.getStructmap().get(50).getS1(), "test s1");
MapFieldValue mfv2 = (MapFieldValue) book.getFieldValue("structmap");
@@ -320,6 +342,13 @@ public class DocumentGenPluginTest {
}
@Test
+ public void testMaps() {
+ Book book = getBook();
+ verifyMaps(book);
+ verifyMaps(toBook(copyBySerialization(book)));
+ }
+
+ @Test
public void testWeightedSets() {
Book book = getBook();
assertTrue(book.getField("mywsfloat").getDataType() instanceof WeightedSetDataType);
@@ -484,13 +513,13 @@ public class DocumentGenPluginTest {
}
}
- private DocumentTypeManager typeManagerFromSDs(String... files) {
+ private static DocumentTypeManager typeManagerFromSDs(String... files) {
final DocumentTypeManager mgr = new DocumentTypeManager();
mgr.configure("raw:" + getDocumentConfig(Arrays.asList(files)));
return mgr;
}
- private DocumentTypeManager typeManagerForBookType() {
+ private static DocumentTypeManager typeManagerForBookType() {
return typeManagerFromSDs("etc/complex/common.sd", "etc/complex/parent.sd", "etc/complex/book.sd");
}
@@ -506,7 +535,7 @@ public class DocumentGenPluginTest {
assertEquals(NUM_BOOKS, manyGenericBooks.size());
}
- private String getDocumentConfig(List<String> sds) {
+ private static String getDocumentConfig(List<String> sds) {
return new DocumentmanagerConfig(Deriver.getDocumentManagerConfig(sds)).toString();
}
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 26e046ec52a..e0159febdfe 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -25,6 +25,7 @@ vespa_define_module(
src/tests/eval/value_cache
src/tests/eval/value_type
src/tests/gp/ponder_nov2017
+ src/tests/tensor/dense_dimension_combiner
src/tests/tensor/dense_add_dimension_optimizer
src/tests/tensor/dense_dot_product_function
src/tests/tensor/dense_fast_rename_optimizer
@@ -32,7 +33,6 @@ vespa_define_module(
src/tests/tensor/dense_inplace_map_function
src/tests/tensor/dense_remove_dimension_optimizer
src/tests/tensor/dense_replace_type_function
- src/tests/tensor/dense_tensor_address_combiner
src/tests/tensor/dense_xw_product_function
src/tests/tensor/direct_dense_tensor_builder
src/tests/tensor/direct_sparse_tensor_builder
diff --git a/eval/src/tests/tensor/dense_dimension_combiner/CMakeLists.txt b/eval/src/tests/tensor/dense_dimension_combiner/CMakeLists.txt
new file mode 100644
index 00000000000..eaee8ebb4e4
--- /dev/null
+++ b/eval/src/tests/tensor/dense_dimension_combiner/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(eval_dense_dimension_combiner_test_app TEST
+ SOURCES
+ dense_dimension_combiner_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_dimension_combiner_test_app COMMAND eval_dense_dimension_combiner_test_app)
diff --git a/eval/src/tests/tensor/dense_dimension_combiner/dense_dimension_combiner_test.cpp b/eval/src/tests/tensor/dense_dimension_combiner/dense_dimension_combiner_test.cpp
new file mode 100644
index 00000000000..b8949e3a7e6
--- /dev/null
+++ b/eval/src/tests/tensor/dense_dimension_combiner/dense_dimension_combiner_test.cpp
@@ -0,0 +1,185 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/tensor/dense/dense_dimension_combiner.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::tensor;
+
+void verifyLeft(DenseDimensionCombiner &d, size_t last) {
+ d.commonReset();
+ d.leftReset();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+ size_t expect = 0;
+ while (d.leftInRange()) {
+ d.stepLeft();
+ EXPECT_GREATER(d.leftIdx(), expect);
+ expect = d.leftIdx();
+ }
+ EXPECT_FALSE(d.leftInRange());
+ EXPECT_EQUAL(expect, last);
+ d.leftReset();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+}
+
+void verifyRight(DenseDimensionCombiner &d, size_t last) {
+ d.commonReset();
+ d.rightReset();
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_EQUAL(d.rightIdx(), 0u);
+ size_t expect = 0;
+ while (d.rightInRange()) {
+ d.stepRight();
+ EXPECT_GREATER(d.rightIdx(), expect);
+ expect = d.rightIdx();
+ }
+ EXPECT_FALSE(d.rightInRange());
+ EXPECT_EQUAL(expect, last);
+ d.rightReset();
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_EQUAL(d.rightIdx(), 0u);
+}
+
+
+TEST("require that one left, one common, one right dimension works") {
+ ValueType t12_lc = ValueType::tensor_type({{"d1_l", 3},{"d2_c", 4}});
+ ValueType t23_cr = ValueType::tensor_type({{"d2_c", 4},{"d3_r", 5}});
+
+ DenseDimensionCombiner d(t12_lc, t23_cr);
+
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+ EXPECT_EQUAL(d.rightIdx(), 0u);
+ EXPECT_EQUAL(d.outputIdx(), 0u);
+
+ d.stepCommon();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 1u);
+ EXPECT_EQUAL(d.rightIdx(), 5u);
+ EXPECT_EQUAL(d.outputIdx(), 5u);
+
+ d.stepRight();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 1u);
+ EXPECT_EQUAL(d.rightIdx(), 6u);
+ EXPECT_EQUAL(d.outputIdx(), 6u);
+
+ d.stepLeft();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 5u);
+ EXPECT_EQUAL(d.rightIdx(), 6u);
+ EXPECT_EQUAL(d.outputIdx(), 26u);
+
+ d.stepLeft();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 9u);
+ EXPECT_EQUAL(d.rightIdx(), 6u);
+ EXPECT_EQUAL(d.outputIdx(), 46u);
+
+ d.stepLeft();
+ EXPECT_FALSE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 13u);
+ EXPECT_EQUAL(d.rightIdx(), 6u);
+ EXPECT_EQUAL(d.outputIdx(), 6u);
+
+ d.leftReset();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 1u);
+ EXPECT_EQUAL(d.rightIdx(), 6u);
+ EXPECT_EQUAL(d.outputIdx(), 6u);
+
+ d.stepCommon();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 2u);
+ EXPECT_EQUAL(d.rightIdx(), 11u);
+ EXPECT_EQUAL(d.outputIdx(), 11u);
+
+ d.stepRight();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 2u);
+ EXPECT_EQUAL(d.rightIdx(), 12u);
+ EXPECT_EQUAL(d.outputIdx(), 12u);
+
+ TEST_DO(verifyLeft(d, 12));
+ TEST_DO(verifyRight(d, 20));
+}
+
+TEST("require that two left, no common, two right dimensions works") {
+ ValueType t12_ll = ValueType::tensor_type({{"d1_l", 3},{"d2_l", 4}});
+ ValueType t34_rr = ValueType::tensor_type({{"d3_r", 5},{"d4_r", 2}});
+
+ DenseDimensionCombiner d(t12_ll, t34_rr);
+
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+ EXPECT_EQUAL(d.rightIdx(), 0u);
+ EXPECT_EQUAL(d.outputIdx(), 0u);
+
+ d.stepCommon();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_FALSE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+ EXPECT_EQUAL(d.rightIdx(), 0u);
+ EXPECT_EQUAL(d.outputIdx(), 120u);
+
+ d.commonReset();
+ d.stepRight();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 0u);
+ EXPECT_EQUAL(d.rightIdx(), 1u);
+ EXPECT_EQUAL(d.outputIdx(), 1u);
+
+ d.stepLeft();
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 1u);
+ EXPECT_EQUAL(d.rightIdx(), 1u);
+ EXPECT_EQUAL(d.outputIdx(), 11u);
+
+ d.stepLeft();
+ d.stepLeft();
+ d.stepLeft();
+ d.stepLeft();
+ d.stepLeft();
+ d.stepLeft();
+ d.stepLeft();
+
+ EXPECT_TRUE(d.leftInRange());
+ EXPECT_TRUE(d.rightInRange());
+ EXPECT_TRUE(d.commonInRange());
+ EXPECT_EQUAL(d.leftIdx(), 8u);
+ EXPECT_EQUAL(d.rightIdx(), 1u);
+ EXPECT_EQUAL(d.outputIdx(), 81u);
+
+ TEST_DO(verifyLeft(d, 12));
+ TEST_DO(verifyRight(d, 10));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt
deleted file mode 100644
index a006d70935d..00000000000
--- a/eval/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(eval_dense_tensor_address_combiner_test_app TEST
- SOURCES
- dense_tensor_address_combiner_test.cpp
- DEPENDS
- vespaeval
-)
-vespa_add_test(NAME eval_dense_tensor_address_combiner_test_app COMMAND eval_dense_tensor_address_combiner_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp b/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
deleted file mode 100644
index 91a6087ea3a..00000000000
--- a/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/eval/tensor/dense/dense_tensor_address_combiner.h>
-#include <vespa/vespalib/test/insertion_operators.h>
-
-using namespace vespalib::tensor;
-using vespalib::eval::ValueType;
-
-ValueType
-combine(const std::vector<ValueType::Dimension> &lhs,
- const std::vector<ValueType::Dimension> &rhs)
-{
- return DenseTensorAddressCombiner::combineDimensions(
- ValueType::tensor_type(lhs),
- ValueType::tensor_type(rhs));
-}
-
-TEST("require that dimensions can be combined")
-{
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}}));
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
- combine({{"a", 3}, {"c", 5}, {"d", 7}},
- {{"b", 11}, {"c", 5}, {"e", 17}}));
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
- combine({{"b", 11}, {"c", 5}, {"e", 17}},
- {{"a", 3}, {"c", 5}, {"d", 7}}));
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/vespa/eval/tensor/dense/CMakeLists.txt b/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
index c2638466de6..ce20d6ba6d9 100644
--- a/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
+++ b/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
@@ -2,16 +2,16 @@
vespa_add_library(eval_tensor_dense OBJECT
SOURCES
dense_add_dimension_optimizer.cpp
+ dense_dimension_combiner.cpp
dense_dot_product_function.cpp
dense_fast_rename_optimizer.cpp
dense_inplace_join_function.cpp
dense_inplace_map_function.cpp
dense_remove_dimension_optimizer.cpp
dense_replace_type_function.cpp
- dense_tensor.cpp
- dense_tensor_address_combiner.cpp
dense_tensor_address_mapper.cpp
dense_tensor_cells_iterator.cpp
+ dense_tensor.cpp
dense_tensor_modify.cpp
dense_tensor_reduce.cpp
dense_tensor_view.cpp
diff --git a/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.cpp b/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.cpp
new file mode 100644
index 00000000000..22c8ff12ad1
--- /dev/null
+++ b/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.cpp
@@ -0,0 +1,91 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "dense_dimension_combiner.h"
+#include <cassert>
+
+namespace vespalib::tensor {
+
+DenseDimensionCombiner::~DenseDimensionCombiner() = default;
+
+DenseDimensionCombiner::DenseDimensionCombiner(const eval::ValueType &lhs,
+ const eval::ValueType &rhs)
+ : _left(), _right(),
+ _commonDims(),
+ _outputIndex(0),
+ _outputSize(1u),
+ result_type(eval::ValueType::join(lhs, rhs))
+{
+ assert(lhs.is_dense());
+ assert(rhs.is_dense());
+ assert(result_type.is_dense());
+
+ const auto &lDims = lhs.dimensions();
+ const auto &rDims = rhs.dimensions();
+ const auto &oDims = result_type.dimensions();
+
+ size_t i = lDims.size();
+ size_t j = rDims.size();
+ size_t k = oDims.size();
+
+ uint32_t lMul = 1;
+ uint32_t rMul = 1;
+ uint32_t oMul = 1;
+
+ while (k-- > 0) {
+ if ((i > 0) && (lDims[i-1].name == oDims[k].name)) {
+ --i;
+ // left dim match
+ if ((j > 0) && (rDims[j-1].name == oDims[k].name)) {
+ // both dim match
+ --j;
+ CommonDim cd;
+ cd.idx = 0;
+ cd.leftMultiplier = lMul;
+ cd.rightMultiplier = rMul;
+ cd.outputMultiplier = oMul;
+ assert(lDims[i].size == oDims[k].size);
+ assert(rDims[j].size == oDims[k].size);
+ cd.size = oDims[k].size;
+ lMul *= cd.size;
+ rMul *= cd.size;
+ oMul *= cd.size;
+ _left.totalSize *= cd.size;
+ _right.totalSize *= cd.size;
+ _outputSize *= cd.size;
+ _commonDims.push_back(cd);
+ } else {
+ SideDim ld;
+ ld.idx = 0;
+ ld.sideMultiplier = lMul;
+ ld.outputMultiplier = oMul;
+ assert(lDims[i].size == oDims[k].size);
+ ld.size = oDims[k].size;
+ lMul *= ld.size;
+ oMul *= ld.size;
+ _outputSize *= ld.size;
+ _left.totalSize *= ld.size;
+ _left.dims.push_back(ld);
+ }
+ } else {
+ // right dim match
+ assert(j > 0);
+ assert(rDims[j-1].name == oDims[k].name);
+ --j;
+ SideDim rd;
+ rd.idx = 0;
+ rd.sideMultiplier = rMul;
+ rd.outputMultiplier = oMul;
+ assert(rDims[j].size == oDims[k].size);
+ rd.size = oDims[k].size;
+ rMul *= rd.size;
+ oMul *= rd.size;
+ _outputSize *= rd.size;
+ _right.totalSize *= rd.size;
+ _right.dims.push_back(rd);
+ }
+ }
+}
+
+
+} // namespace
+
diff --git a/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.h b/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.h
new file mode 100644
index 00000000000..dd3f74bad9b
--- /dev/null
+++ b/eval/src/vespa/eval/tensor/dense/dense_dimension_combiner.h
@@ -0,0 +1,114 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/tensor/tensor.h>
+#include <vespa/eval/tensor/types.h>
+#include <vespa/eval/eval/value_type.h>
+
+namespace vespalib::tensor {
+
+class DenseDimensionCombiner {
+
+ struct SideDim {
+ uint32_t idx;
+ uint32_t size;
+ uint32_t sideMultiplier;
+ uint32_t outputMultiplier;
+ };
+ struct CommonDim {
+ uint32_t idx;
+ uint32_t size;
+ uint32_t leftMultiplier;
+ uint32_t rightMultiplier;
+ uint32_t outputMultiplier;
+ };
+
+ struct SideDims {
+ std::vector<SideDim> dims;
+ uint32_t index;
+ uint32_t totalSize;
+
+ SideDims() : dims(), index(0), totalSize(1u) {}
+
+ void reset(uint32_t &outIndex) {
+ for (SideDim& d : dims) {
+ index -= d.idx * d.sideMultiplier;
+ outIndex -= d.idx * d.outputMultiplier;
+ d.idx = 0;
+ }
+ if (index >= totalSize) {
+ index -= totalSize;
+ }
+ }
+ void step(uint32_t &outIndex) {
+ for (SideDim& d : dims) {
+ d.idx++;
+ index += d.sideMultiplier;
+ outIndex += d.outputMultiplier;
+ if (d.idx < d.size) return;
+ index -= d.idx * d.sideMultiplier;
+ outIndex -= d.idx * d.outputMultiplier;
+ d.idx = 0;
+ }
+ index += totalSize;
+ }
+ };
+ SideDims _left;
+ SideDims _right;
+ std::vector<CommonDim> _commonDims;
+ uint32_t _outputIndex;
+ uint32_t _outputSize;
+
+public:
+ size_t leftIdx() const { return _left.index; }
+ size_t rightIdx() const { return _right.index; }
+ size_t outputIdx() const { return _outputIndex; }
+
+ bool leftInRange() const { return _left.index < _left.totalSize; }
+ bool rightInRange() const { return _right.index < _right.totalSize; }
+ bool commonInRange() const { return _outputIndex < _outputSize; }
+
+ void leftReset() { _left.reset(_outputIndex); }
+ void stepLeft() { _left.step(_outputIndex); }
+
+ void rightReset() { _right.reset(_outputIndex); }
+ void stepRight() { _right.step(_outputIndex); }
+
+ void commonReset() {
+ for (CommonDim& cd : _commonDims) {
+ _left.index -= cd.idx * cd.leftMultiplier;
+ _right.index -= cd.idx * cd.rightMultiplier;
+ _outputIndex -= cd.idx * cd.outputMultiplier;
+ cd.idx = 0;
+ }
+ if (_outputIndex >= _outputSize) {
+ _outputIndex -= _outputSize;
+ }
+ }
+
+ void stepCommon() {
+ size_t lim = _commonDims.size();
+ for (size_t i = 0; i < lim; ++i) {
+ CommonDim &cd = _commonDims[i];
+ cd.idx++;
+ _left.index += cd.leftMultiplier;
+ _right.index += cd.rightMultiplier;
+ _outputIndex += cd.outputMultiplier;
+ if (cd.idx < cd.size) return;
+ _left.index -= cd.idx * cd.leftMultiplier;
+ _right.index -= cd.idx * cd.rightMultiplier;
+ _outputIndex -= cd.idx * cd.outputMultiplier;
+ cd.idx = 0;
+ }
+ _outputIndex += _outputSize;
+ }
+
+ const eval::ValueType result_type;
+
+ DenseDimensionCombiner(const eval::ValueType &lhs, const eval::ValueType &rhs);
+
+ ~DenseDimensionCombiner();
+};
+
+}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp
deleted file mode 100644
index b5c5d9b6a04..00000000000
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "dense_tensor_address_combiner.h"
-#include <vespa/vespalib/util/exceptions.h>
-#include <cassert>
-
-namespace vespalib::tensor {
-
-DenseTensorAddressCombiner::~DenseTensorAddressCombiner() = default;
-
-DenseTensorAddressCombiner::DenseTensorAddressCombiner(const eval::ValueType &combined, const eval::ValueType &lhs,
- const eval::ValueType &rhs)
- : _rightAddress(rhs),
- _combinedAddress(combined),
- _left(),
- _commonRight(),
- _right()
-{
- auto rhsItr = rhs.dimensions().cbegin();
- auto rhsItrEnd = rhs.dimensions().cend();
- uint32_t numDimensions(0);
- for (const auto &lhsDim : lhs.dimensions()) {
- while ((rhsItr != rhsItrEnd) && (rhsItr->name < lhsDim.name)) {
- _right.emplace_back(numDimensions++, rhsItr-rhs.dimensions().cbegin());
- ++rhsItr;
- }
- if ((rhsItr != rhsItrEnd) && (rhsItr->name == lhsDim.name)) {
- _left.emplace_back(numDimensions, _left.size());
- _commonRight.emplace_back(numDimensions, rhsItr-rhs.dimensions().cbegin());
- ++numDimensions;
- ++rhsItr;
- } else {
- _left.emplace_back(numDimensions++, _left.size());
- }
- }
- while (rhsItr != rhsItrEnd) {
- _right.emplace_back(numDimensions++, rhsItr-rhs.dimensions().cbegin());
- ++rhsItr;
- }
-}
-
-AddressContext::AddressContext(const eval::ValueType &type)
- : _type(type),
- _accumulatedSize(_type.dimensions().size()),
- _address(type.dimensions().size(), 0)
-
-{
- size_t multiplier = 1;
- for (int32_t i(_address.size() - 1); i >= 0; i--) {
- _accumulatedSize[i] = multiplier;
- multiplier *= type.dimensions()[i].size;
- }
-}
-
-AddressContext::~AddressContext() = default;
-
-eval::ValueType
-DenseTensorAddressCombiner::combineDimensions(const eval::ValueType &lhs, const eval::ValueType &rhs)
-{
- return eval::ValueType::join(lhs, rhs);
-}
-
-}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.h
deleted file mode 100644
index 3f6e347490c..00000000000
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "dense_tensor_cells_iterator.h"
-#include <vespa/eval/tensor/tensor.h>
-#include <vespa/eval/tensor/types.h>
-#include <vespa/eval/eval/value_type.h>
-
-namespace vespalib::tensor {
-
-class AddressContext {
-public:
- using Address = DenseTensorCellsIterator::Address;
- using size_type = eval::ValueType::Dimension::size_type;
- using Mapping = std::vector<std::pair<uint32_t, uint32_t>>;
- AddressContext(const eval::ValueType &type);
- ~AddressContext();
- size_type dimSize(uint32_t dim) const { return _type.dimensions()[dim].size; }
- size_type wholeDimStep(uint32_t dim) const { return _accumulatedSize[dim] * dimSize(dim); }
- size_t index() const {
- size_t cellIdx(0);
- for (uint32_t i(0); i < _address.size(); i++) {
- cellIdx += _address[i]*_accumulatedSize[i];
- }
- return cellIdx;
- }
- void update(const Address & addr, const Mapping & mapping) {
- for (const auto & m : mapping) {
- _address[m.first] = addr[m.second];
- }
- }
- bool updateCommon(const Address & addr, const Mapping & mapping) {
- for (const auto & m : mapping) {
- if (addr[m.first] >= dimSize(m.second)) {
- return false;
- }
- _address[m.second] = addr[m.first];
- }
- return true;
- }
-
- const eval::ValueType &_type;
- std::vector<size_t> _accumulatedSize;
- Address _address;
-
-};
-
-/**
- * Combines two dense tensor addresses to a new tensor address.
- * The resulting dimensions is the union of the input dimensions and
- * common dimensions must have matching labels.
- */
-class DenseTensorAddressCombiner
-{
-public:
- using Mapping = AddressContext::Mapping;
-
-private:
- using Address = DenseTensorCellsIterator::Address;
- using CellsRef = vespalib::ConstArrayRef<double>;
- using size_type = eval::ValueType::Dimension::size_type;
-
- AddressContext _rightAddress;
- AddressContext _combinedAddress;
-
- Mapping _left;
- Mapping _commonRight;
- Mapping _right;
-
-public:
- DenseTensorAddressCombiner(const eval::ValueType &combined, const eval::ValueType &lhs, const eval::ValueType &rhs);
- ~DenseTensorAddressCombiner();
- void updateLeftAndCommon(const Address & addr) { _combinedAddress.update(addr, _left); }
- bool updateCommon() { return _rightAddress.updateCommon(_combinedAddress._address, _commonRight); }
- bool hasAnyRightOnlyDimensions() const { return ! _right.empty(); }
-
- const Address & address() const { return _combinedAddress._address; }
- size_t rightCellIndex() const { return _rightAddress.index(); }
-
- template <typename Func>
- void for_each_right(const CellsRef & rhsCells, Func && func) {
- // The rightAddress oly holds the starting point for iteration and what is need to efficiently maintain
- // an index for addressing th ecells.
- const int32_t lastDimension = _right.size() - 1;
- int32_t curDimension = lastDimension;
- size_t rightCellIdx = _rightAddress.index();
- size_t combinedCellIdx = _combinedAddress.index();
- while (curDimension >= 0) {
- const uint32_t rdim = _right[curDimension].second;
- const uint32_t cdim = _right[curDimension].first;
- size_type & cindex = _combinedAddress._address[cdim];
- if (curDimension == lastDimension) {
- for (cindex = 0; cindex < _rightAddress.dimSize(rdim); cindex++) {
- func(combinedCellIdx, rhsCells[rightCellIdx]);
- rightCellIdx += _rightAddress._accumulatedSize[rdim];
- combinedCellIdx += _combinedAddress._accumulatedSize[cdim];
- }
- cindex = 0;
- rightCellIdx -= _rightAddress.wholeDimStep(rdim);
- combinedCellIdx -= _combinedAddress.wholeDimStep(cdim);
- curDimension--;
- } else {
- if ((cindex + 1) < _rightAddress.dimSize(rdim)) {
- cindex++;
- rightCellIdx += _rightAddress._accumulatedSize[rdim];
- combinedCellIdx += _combinedAddress._accumulatedSize[cdim];
- curDimension++;
- } else {
- rightCellIdx -= _rightAddress.wholeDimStep(rdim);
- combinedCellIdx -= _combinedAddress.wholeDimStep(cdim);
- cindex = 0;
- curDimension--;
- }
- }
- }
- }
-
- static eval::ValueType combineDimensions(const eval::ValueType &lhs, const eval::ValueType &rhs);
-};
-
-
-}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
index fa1e59c87db..e71840f392c 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_apply.hpp
@@ -3,78 +3,45 @@
#pragma once
#include "dense_tensor_apply.h"
-#include "dense_tensor_address_combiner.h"
+#include "dense_dimension_combiner.h"
#include "direct_dense_tensor_builder.h"
namespace vespalib::tensor::dense {
template <typename Function>
std::unique_ptr<Tensor>
-apply(DenseTensorAddressCombiner & combiner, DirectDenseTensorBuilder & builder,
- const DenseTensorView &lhs, const DenseTensorView::CellsRef & rhsCells, Function &&func) __attribute__((noinline));
+apply(DenseDimensionCombiner & combiner, DirectDenseTensorBuilder & builder,
+ const DenseTensorView::CellsRef & lhsCells,
+ const DenseTensorView::CellsRef & rhsCells, Function &&func) __attribute__((noinline));
template <typename Function>
std::unique_ptr<Tensor>
-apply(DenseTensorAddressCombiner & combiner, DirectDenseTensorBuilder & builder,
- const DenseTensorView &lhs, const DenseTensorView::CellsRef & rhsCells, Function &&func)
+apply(DenseDimensionCombiner & combiner, DirectDenseTensorBuilder & builder,
+ const DenseTensorView::CellsRef & lhsCells,
+ const DenseTensorView::CellsRef & rhsCells, Function &&func)
{
- for (DenseTensorCellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
- combiner.updateLeftAndCommon(lhsItr.address());
- if (combiner.updateCommon()) {
- combiner.for_each_right(rhsCells, [&func, &builder, &lhsItr](size_t combined, double rhsCell) {
- builder.insertCell(combined, func(lhsItr.cell(), rhsCell));
- });
+ for (combiner.leftReset(); combiner.leftInRange(); combiner.stepLeft()) {
+ for (combiner.rightReset(); combiner.rightInRange(); combiner.stepRight()) {
+ for (combiner.commonReset(); combiner.commonInRange(); combiner.stepCommon()) {
+ size_t outIdx = combiner.outputIdx();
+ size_t l = combiner.leftIdx();
+ size_t r = combiner.rightIdx();
+ builder.insertCell(outIdx, func(lhsCells[l], rhsCells[r]));
+ }
}
}
return builder.build();
}
-
-template <typename Function>
-std::unique_ptr<Tensor>
-apply_no_rightonly_dimensions(DenseTensorAddressCombiner & combiner, DirectDenseTensorBuilder & builder,
- const DenseTensorView &lhs, const DenseTensorView::CellsRef & rhsCells,
- Function &&func) __attribute__((noinline));
-
-template <typename Function>
-std::unique_ptr<Tensor>
-apply_no_rightonly_dimensions(DenseTensorAddressCombiner & combiner, DirectDenseTensorBuilder & builder,
- const DenseTensorView &lhs, const DenseTensorView::CellsRef & rhsCells, Function &&func)
-{
- for (DenseTensorCellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
- combiner.updateLeftAndCommon(lhsItr.address());
- if (combiner.updateCommon()) {
- builder.insertCell(combiner.address(), func(lhsItr.cell(), rhsCells[combiner.rightCellIndex()]));
- }
- }
- return builder.build();
-}
-
-template <typename Function>
-std::unique_ptr<Tensor>
-apply(const DenseTensorView &lhs, const DenseTensorView &rhs, Function &&func)
-{
- eval::ValueType resultType = DenseTensorAddressCombiner::combineDimensions(lhs.fast_type(), rhs.fast_type());
- DenseTensorAddressCombiner combiner(resultType, lhs.fast_type(), rhs.fast_type());
- DirectDenseTensorBuilder builder(resultType);
- if (combiner.hasAnyRightOnlyDimensions()) {
- return apply(combiner, builder, lhs, rhs.cellsRef(), std::move(func));
- } else {
- return apply_no_rightonly_dimensions(combiner, builder, lhs, rhs.cellsRef(), std::move(func));
- }
-}
-
template <typename Function>
std::unique_ptr<Tensor>
apply(const DenseTensorView &lhs, const Tensor &rhs, Function &&func)
{
const DenseTensorView *view = dynamic_cast<const DenseTensorView *>(&rhs);
if (view) {
- return apply(lhs, *view, func);
- }
- const DenseTensor *dense = dynamic_cast<const DenseTensor *>(&rhs);
- if (dense) {
- return apply(lhs, *dense, func);
+ DenseDimensionCombiner combiner(lhs.fast_type(), view->fast_type());
+ DirectDenseTensorBuilder builder(combiner.result_type);
+ return apply(combiner, builder, lhs.cellsRef(), view->cellsRef(), std::move(func));
}
return Tensor::UP();
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 66c8da86403..b969e328419 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -145,6 +145,11 @@ public class Flags {
"Configserver RPC authorizer. Allowed values: ['disable', 'log-only', 'enforce']",
"Takes effect on restart of configserver");
+ public static final UnboundBooleanFlag PROVISION_APPLICATION_CERTIFICATE = defineFeatureFlag(
+ "provision-application-certificate", false,
+ "Privision certificate from CA and include reference in deployment",
+ "Takes effect on deployment through controller",
+ APPLICATION_ID);
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, String description,
diff --git a/hosted-api/pom.xml b/hosted-api/pom.xml
index f20244a8816..928a173f9d8 100644
--- a/hosted-api/pom.xml
+++ b/hosted-api/pom.xml
@@ -34,9 +34,13 @@
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>4.12</version>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
diff --git a/hosted-api/src/test/java/ai/vespa/hosted/api/MultiPartStreamerTest.java b/hosted-api/src/test/java/ai/vespa/hosted/api/MultiPartStreamerTest.java
index a55c0d91cd3..bfc544e82f8 100644
--- a/hosted-api/src/test/java/ai/vespa/hosted/api/MultiPartStreamerTest.java
+++ b/hosted-api/src/test/java/ai/vespa/hosted/api/MultiPartStreamerTest.java
@@ -1,9 +1,8 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.api;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
import java.net.URI;
@@ -12,16 +11,13 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
-public class MultiPartStreamerTest {
-
- @Rule
- public TemporaryFolder tmp = new TemporaryFolder();
+class MultiPartStreamerTest {
@Test
- public void test() throws IOException {
- Path file = tmp.newFile().toPath();
+ void test(@TempDir Path tmp) throws IOException {
+ Path file = tmp.resolve("file");
Files.write(file, new byte[]{0x48, 0x69});
MultiPartStreamer streamer = new MultiPartStreamer("My boundary");
diff --git a/hosted-api/src/test/java/ai/vespa/hosted/api/SignaturesTest.java b/hosted-api/src/test/java/ai/vespa/hosted/api/SignaturesTest.java
index 0a0d4a48edf..6749fb902f9 100644
--- a/hosted-api/src/test/java/ai/vespa/hosted/api/SignaturesTest.java
+++ b/hosted-api/src/test/java/ai/vespa/hosted/api/SignaturesTest.java
@@ -1,7 +1,7 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.api;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
@@ -16,9 +16,9 @@ import java.time.ZoneOffset;
import static ai.vespa.hosted.api.Signatures.sha256Digest;
import static ai.vespa.hosted.api.Signatures.sha256Digester;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests that messages can be signed and verified, and that the keys used for this can be parsed.
@@ -32,7 +32,7 @@ import static org.junit.Assert.assertTrue;
*
* @author jonmv
*/
-public class SignaturesTest {
+class SignaturesTest {
private static final String ecPemPublicKey = "-----BEGIN PUBLIC KEY-----\n" +
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
@@ -58,7 +58,7 @@ public class SignaturesTest {
"∠( ᐛ 」∠)_").getBytes(UTF_8);
@Test
- public void testHashing() throws Exception {
+ void testHashing() throws Exception {
byte[] hash1 = MessageDigest.getInstance("SHA-256").digest(message);
byte[] hash2 = sha256Digest(() -> new ByteArrayInputStream(message));
DigestInputStream digester = sha256Digester(new ByteArrayInputStream(message));
@@ -70,7 +70,7 @@ public class SignaturesTest {
}
@Test
- public void testSigning() {
+ void testSigning() {
Clock clock = Clock.fixed(Instant.EPOCH, ZoneOffset.UTC);
RequestSigner signer = new RequestSigner(ecPemPrivateKey, "myKey", clock);
diff --git a/jdisc_http_service/abi-spec.json b/jdisc_http_service/abi-spec.json
index 04e6d22a445..a326b5792be 100644
--- a/jdisc_http_service/abi-spec.json
+++ b/jdisc_http_service/abi-spec.json
@@ -78,7 +78,9 @@
"public void <init>(com.yahoo.jdisc.http.ConnectorConfig$Ssl)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder enabled(boolean)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder privateKeyFile(java.lang.String)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder privateKey(java.lang.String)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder certificateFile(java.lang.String)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder certificate(java.lang.String)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder caCertificateFile(java.lang.String)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder clientAuth(com.yahoo.jdisc.http.ConnectorConfig$Ssl$ClientAuth$Enum)",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl build()"
@@ -131,7 +133,9 @@
"public void <init>(com.yahoo.jdisc.http.ConnectorConfig$Ssl$Builder)",
"public boolean enabled()",
"public java.lang.String privateKeyFile()",
+ "public java.lang.String privateKey()",
"public java.lang.String certificateFile()",
+ "public java.lang.String certificate()",
"public java.lang.String caCertificateFile()",
"public com.yahoo.jdisc.http.ConnectorConfig$Ssl$ClientAuth$Enum clientAuth()"
],
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/impl/ConfiguredSslContextFactoryProvider.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/impl/ConfiguredSslContextFactoryProvider.java
index facb54bc37a..2021105fc52 100644
--- a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/impl/ConfiguredSslContextFactoryProvider.java
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/impl/ConfiguredSslContextFactoryProvider.java
@@ -60,15 +60,23 @@ public class ConfiguredSslContextFactoryProvider implements SslContextFactoryPro
private static void validateConfig(ConnectorConfig.Ssl config) {
if (!config.enabled()) return;
- if (config.certificateFile().isEmpty()) {
- throw new IllegalArgumentException("Missing certificate file.");
- }
- if (config.privateKeyFile().isEmpty()) {
- throw new IllegalArgumentException("Missing private key file.");
- }
+ if(hasBoth(config.certificate(), config.certificateFile()))
+ throw new IllegalArgumentException("Specified both certificate and certificate file.");
+
+ if(hasBoth(config.privateKey(), config.privateKeyFile()))
+ throw new IllegalArgumentException("Specified both private key and private key file.");
+
+ if(hasNeither(config.certificate(), config.certificateFile()))
+ throw new IllegalArgumentException("Specified neither certificate or certificate file.");
+
+ if(hasNeither(config.privateKey(), config.privateKeyFile()))
+ throw new IllegalArgumentException("Specified neither private key or private key file.");
}
+ private static boolean hasBoth(String a, String b) { return !a.isBlank() && !b.isBlank(); }
+ private static boolean hasNeither(String a, String b) { return a.isBlank() && b.isBlank(); }
+
private static KeyStore createTruststore(ConnectorConfig.Ssl sslConfig) {
List<X509Certificate> caCertificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.caCertificateFile()));
return KeyStoreBuilder.withType(KeyStoreType.JKS)
@@ -77,11 +85,21 @@ public class ConfiguredSslContextFactoryProvider implements SslContextFactoryPro
}
private static KeyStore createKeystore(ConnectorConfig.Ssl sslConfig) {
- PrivateKey privateKey = KeyUtils.fromPemEncodedPrivateKey(readToString(sslConfig.privateKeyFile()));
- List<X509Certificate> certificates = X509CertificateUtils.certificateListFromPem(readToString(sslConfig.certificateFile()));
+ PrivateKey privateKey = KeyUtils.fromPemEncodedPrivateKey(getPrivateKey(sslConfig));
+ List<X509Certificate> certificates = X509CertificateUtils.certificateListFromPem(getCertificate(sslConfig));
return KeyStoreBuilder.withType(KeyStoreType.JKS).withKeyEntry("default", privateKey, certificates).build();
}
+ private static String getPrivateKey(ConnectorConfig.Ssl config) {
+ if(!config.privateKey().isBlank()) return config.privateKey();
+ return readToString(config.privateKeyFile());
+ }
+
+ private static String getCertificate(ConnectorConfig.Ssl config) {
+ if(!config.certificate().isBlank()) return config.certificate();
+ return readToString(config.certificateFile());
+ }
+
private static String readToString(String filename) {
try {
return Files.readString(Paths.get(filename), StandardCharsets.UTF_8);
diff --git a/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def b/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
index 7735420d803..c6c6fad345b 100644
--- a/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
+++ b/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
@@ -56,12 +56,18 @@ throttling.idleTimeout double default=-1.0
# Whether to enable SSL for this connector.
ssl.enabled bool default=false
-# File with private key in PEM format
+# File with private key in PEM format. Specify either this or privateKey, but not both
ssl.privateKeyFile string default=""
-# File with certificate in PEM format
+# Private key in PEM format. Specify either this or privateKeyFile, but not both
+ssl.privateKey string default=""
+
+# File with certificate in PEM format. Specify either this or certificate, but not both
ssl.certificateFile string default=""
+# Certificate in PEM format. Specify either this or certificateFile, but not both
+ssl.certificate string default=""
+
# with trusted CA certificates in PEM format. Used to verify clients
ssl.caCertificateFile string default=""
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/IntermediateSession.java b/messagebus/src/main/java/com/yahoo/messagebus/IntermediateSession.java
index 8286404aa23..309316b450a 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/IntermediateSession.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/IntermediateSession.java
@@ -36,7 +36,7 @@ public final class IntermediateSession implements MessageHandler, ReplyHandler {
* Sets the destroyed flag to true. The very first time this method is called, it cleans up all its dependencies.
* Even if you retain a reference to this object, all of its content is allowed to be garbage collected.
*
- * @return True if content existed and was destroyed.
+ * @return true if content existed and was destroyed
*/
public boolean destroy() {
if (!destroyed.getAndSet(true)) {
@@ -73,11 +73,7 @@ public final class IntermediateSession implements MessageHandler, ReplyHandler {
}
}
- /**
- * Returns the message handler of this session.
- *
- * @return The message handler.
- */
+ /** Returns the message handler of this session */
public MessageHandler getMessageHandler() {
return msgHandler;
}
@@ -94,18 +90,12 @@ public final class IntermediateSession implements MessageHandler, ReplyHandler {
/**
* Returns the connection spec string for this session. This returns a combination of the owning message bus' own
* spec string and the name of this session.
- *
- * @return The connection string.
*/
public String getConnectionSpec() {
return mbus.getConnectionSpec() + "/" + name;
}
- /**
- * Returns the name of this session.
- *
- * @return The session name.
- */
+ /** Returns the name of this session */
public String getName() {
return name;
}
@@ -123,4 +113,5 @@ public final class IntermediateSession implements MessageHandler, ReplyHandler {
replyHandler.handleReply(reply);
}
}
+
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
index 2ca24dad1e2..ab9f4f6c9c4 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
@@ -117,7 +117,8 @@ public class VespaMetrics {
.statusCode(health.getStatus().ordinal()) // TODO: MetricsPacket should use StatusCode instead of int
.statusMessage(health.getMessage())
.putDimensions(service.getDimensions())
- .putDimension(INSTANCE_DIMENSION_ID, service.getInstanceName());
+ .putDimension(INSTANCE_DIMENSION_ID, service.getInstanceName())
+ .addConsumers(metricsConsumers.getAllConsumers());
}
/**
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/GenericMetricsHandler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/GenericMetricsHandler.java
index cadfc053b94..f61a96917a9 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/GenericMetricsHandler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/GenericMetricsHandler.java
@@ -4,7 +4,9 @@
package ai.vespa.metricsproxy.http;
+import ai.vespa.metricsproxy.core.MetricsConsumers;
import ai.vespa.metricsproxy.core.MetricsManager;
+import ai.vespa.metricsproxy.metric.model.ConsumerId;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
import ai.vespa.metricsproxy.metric.model.json.JsonRenderingException;
import ai.vespa.metricsproxy.service.VespaServices;
@@ -19,22 +21,33 @@ import java.nio.charset.Charset;
import java.time.Instant;
import java.util.List;
import java.util.concurrent.Executor;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import static ai.vespa.metricsproxy.metric.model.ConsumerId.toConsumerId;
import static ai.vespa.metricsproxy.metric.model.json.GenericJsonUtil.toGenericJsonModel;
/**
- * Handler exposing the generic metrics format via http.
+ * Http handler that exposes the generic metrics format.
*
* @author gjoranv
*/
public class GenericMetricsHandler extends ThreadedHttpRequestHandler {
+ private static final Logger log = Logger.getLogger(GenericMetricsHandler.class.getName());
+ public static final ConsumerId DEFAULT_PUBLIC_CONSUMER_ID = toConsumerId("default");
+
+ private final MetricsConsumers metricsConsumers;
private final MetricsManager metricsManager;
private final VespaServices vespaServices;
@Inject
- public GenericMetricsHandler(Executor executor, MetricsManager metricsManager, VespaServices vespaServices) {
+ public GenericMetricsHandler(Executor executor,
+ MetricsManager metricsManager,
+ VespaServices vespaServices,
+ MetricsConsumers metricsConsumers) {
super(executor);
+ this.metricsConsumers = metricsConsumers;
this.metricsManager = metricsManager;
this.vespaServices = vespaServices;
}
@@ -42,13 +55,29 @@ public class GenericMetricsHandler extends ThreadedHttpRequestHandler {
@Override
public HttpResponse handle(HttpRequest request) {
try {
- List<MetricsPacket> metrics = metricsManager.getMetrics(vespaServices.getVespaServices(), Instant.now());
+ ConsumerId consumer = getConsumerOrDefault(request.getProperty("consumer"));
+
+ List<MetricsPacket> metrics = metricsManager.getMetrics(vespaServices.getVespaServices(), Instant.now())
+ .stream()
+ .filter(metricsPacket -> metricsPacket.consumers().contains(consumer))
+ .collect(Collectors.toList());
return new Response(200, toGenericJsonModel(metrics).serialize());
} catch (JsonRenderingException e) {
return new Response(500, e.getMessageAsJson());
}
}
+ private ConsumerId getConsumerOrDefault(String consumer) {
+ if (consumer == null) return DEFAULT_PUBLIC_CONSUMER_ID;
+
+ ConsumerId consumerId = toConsumerId(consumer);
+ if (! metricsConsumers.getAllConsumers().contains(consumerId)) {
+ log.info("No consumer with id '" + consumer + "' - using the default consumer instead.");
+ return DEFAULT_PUBLIC_CONSUMER_ID;
+ }
+ return consumerId;
+ }
+
private static class Response extends HttpResponse {
private final byte[] data;
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/ExternalMetrics.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/ExternalMetrics.java
index 26ae177d767..64ede137e8e 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/ExternalMetrics.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/ExternalMetrics.java
@@ -53,6 +53,8 @@ public class ExternalMetrics {
}
public void setExtraMetrics(List<MetricsPacket.Builder> externalPackets) {
+ // TODO: Metrics filtering per consumer is not yet implemented.
+ // Split each packet per metric, and re-aggregate based on the metrics each consumer wants. Then filter out all packages with no consumers.
log.log(DEBUG, () -> "Setting new external metrics with " + externalPackets.size() + " metrics packets.");
externalPackets.forEach(packet -> {
packet.addConsumers(consumers.getAllConsumers())
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
index eb620fd37be..e441c353292 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/core/MetricsManagerTest.java
@@ -140,6 +140,7 @@ public class MetricsManagerTest {
service0.setSystemMetrics(oldSystemMetrics);
}
+ // TODO: test that non-whitelisted metrics are filtered out, but this is currently not the case, see ExternalMetrics.setExtraMetrics
@Test
public void extra_metrics_packets_containing_whitelisted_metrics_are_added() {
metricsManager.setExtraMetrics(ImmutableList.of(
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/GenericMetricsHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/GenericMetricsHandlerTest.java
index 301dbf56c3f..29ab8c66694 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/GenericMetricsHandlerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/GenericMetricsHandlerTest.java
@@ -6,6 +6,7 @@ package ai.vespa.metricsproxy.http;
import ai.vespa.metricsproxy.TestUtil;
import ai.vespa.metricsproxy.core.ConsumersConfig;
+import ai.vespa.metricsproxy.core.ConsumersConfig.Consumer;
import ai.vespa.metricsproxy.core.MetricsConsumers;
import ai.vespa.metricsproxy.core.MetricsManager;
import ai.vespa.metricsproxy.metric.HealthMetric;
@@ -29,12 +30,13 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
+import java.io.IOException;
import java.time.Instant;
import java.util.List;
import java.util.concurrent.Executors;
import static ai.vespa.metricsproxy.core.VespaMetrics.INSTANCE_DIMENSION_ID;
-import static ai.vespa.metricsproxy.core.VespaMetrics.VESPA_CONSUMER_ID;
+import static ai.vespa.metricsproxy.http.GenericMetricsHandler.DEFAULT_PUBLIC_CONSUMER_ID;
import static ai.vespa.metricsproxy.metric.model.ServiceId.toServiceId;
import static ai.vespa.metricsproxy.metric.model.StatusCode.DOWN;
import static ai.vespa.metricsproxy.metric.model.json.JacksonUtil.createObjectMapper;
@@ -43,6 +45,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
/**
* @author gjoranv
@@ -55,38 +58,69 @@ public class GenericMetricsHandlerTest {
new DummyService(1, ""),
new DownService(HealthMetric.getDown("No response")));
+ private static final VespaServices vespaServices = new VespaServices(testServices);
+
+ private static final String DEFAULT_CONSUMER = "default";
+ private static final String CUSTOM_CONSUMER = "custom-consumer";
+
private static final String CPU_METRIC = "cpu";
private static final String URI = "http://localhost/metrics/v1/values";
- private static final VespaServices vespaServices = new VespaServices(testServices);
private static RequestHandlerTestDriver testDriver;
@BeforeClass
- public static void setupMetricsManager() {
+ public static void setup() {
MetricsManager metricsManager = TestUtil.createMetricsManager(vespaServices, getMetricsConsumers(), getApplicationDimensions(), getNodeDimensions());
metricsManager.setExtraMetrics(ImmutableList.of(
new MetricsPacket.Builder(toServiceId("foo"))
.timestamp(Instant.now().getEpochSecond())
.putMetrics(ImmutableList.of(new Metric(CPU_METRIC, 12.345)))));
- GenericMetricsHandler handler = new GenericMetricsHandler(Executors.newSingleThreadExecutor(), metricsManager, vespaServices);
+ GenericMetricsHandler handler = new GenericMetricsHandler(Executors.newSingleThreadExecutor(), metricsManager, vespaServices, getMetricsConsumers());
testDriver = new RequestHandlerTestDriver(handler);
}
+ private GenericJsonModel getResponseAsJsonModel(String consumer) {
+ String response = testDriver.sendRequest(URI + "?consumer=" + consumer).readAll();
+ try {
+ return createObjectMapper().readValue(response, GenericJsonModel.class);
+ } catch (IOException e) {
+ fail("Failed to create json model: " + e.getMessage());
+ throw new RuntimeException(e);
+ }
+ }
+
@Ignore
@Test
public void visually_inspect_response() throws Exception{
- String response = testDriver.sendRequest(URI).readAll();
+ String response = testDriver.sendRequest(URI+ "?consumer=default").readAll();
ObjectMapper mapper = createObjectMapper();
var jsonModel = mapper.readValue(response, GenericJsonModel.class);
System.out.println(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(jsonModel));
}
@Test
- public void response_contains_node_metrics() throws Exception {
+ public void no_explicit_consumer_gives_the_default_consumer() {
+ String responseDefaultConsumer = testDriver.sendRequest(URI + "?consumer=default").readAll();
+ String responseNoConsumer = testDriver.sendRequest(URI).readAll();
+ assertEqualsExceptTimestamps(responseDefaultConsumer, responseNoConsumer);
+ }
+
+ @Test
+ public void unknown_consumer_gives_the_default_consumer() {
String response = testDriver.sendRequest(URI).readAll();
- var jsonModel = createObjectMapper().readValue(response, GenericJsonModel.class);
+ String responseUnknownConsumer = testDriver.sendRequest(URI + "?consumer=not_defined").readAll();
+ assertEqualsExceptTimestamps(response, responseUnknownConsumer);
+ }
+
+ private void assertEqualsExceptTimestamps(String s1, String s2) {
+ assertEquals(replaceTimestamps(s1), replaceTimestamps(s2));
+ }
+
+ @Test
+ public void response_contains_node_metrics() {
+ GenericJsonModel jsonModel = getResponseAsJsonModel(DEFAULT_CONSUMER);
assertNotNull(jsonModel.node);
assertEquals(1, jsonModel.node.metrics.size());
@@ -94,9 +128,8 @@ public class GenericMetricsHandlerTest {
}
@Test
- public void response_contains_service_metrics() throws Exception {
- String response = testDriver.sendRequest(URI).readAll();
- var jsonModel = createObjectMapper().readValue(response, GenericJsonModel.class);
+ public void response_contains_service_metrics() {
+ GenericJsonModel jsonModel = getResponseAsJsonModel(DEFAULT_CONSUMER);
assertEquals(2, jsonModel.services.size());
GenericService dummyService = jsonModel.services.get(0);
@@ -104,17 +137,50 @@ public class GenericMetricsHandlerTest {
GenericMetrics dummy0Metrics = getMetricsForInstance("dummy0", dummyService);
assertEquals(1L, dummy0Metrics.values.get(METRIC_1).longValue());
- assertEquals("metric-dim", dummy0Metrics.dimensions.get("dim0"));
+ assertEquals("default-val", dummy0Metrics.dimensions.get("consumer-dim"));
GenericMetrics dummy1Metrics = getMetricsForInstance("dummy1", dummyService);
assertEquals(6L, dummy1Metrics.values.get(METRIC_1).longValue());
- assertEquals("metric-dim", dummy1Metrics.dimensions.get("dim0"));
+ assertEquals("default-val", dummy1Metrics.dimensions.get("consumer-dim"));
}
@Test
- public void response_contains_health_from_service_that_is_down() throws Exception {
- String response = testDriver.sendRequest(URI).readAll();
- var jsonModel = createObjectMapper().readValue(response, GenericJsonModel.class);
+ public void all_consumers_get_health_from_service_that_is_down() {
+ assertDownServiceHealth(DEFAULT_CONSUMER);
+ assertDownServiceHealth(CUSTOM_CONSUMER);
+ }
+
+ @Test
+ public void all_timestamps_are_equal_and_non_zero() {
+ GenericJsonModel jsonModel = getResponseAsJsonModel(DEFAULT_CONSUMER);
+
+ Long nodeTimestamp = jsonModel.node.timestamp;
+ assertNotEquals(0L, (long) nodeTimestamp);
+ for (var service : jsonModel.services)
+ assertEquals(nodeTimestamp, service.timestamp);
+ }
+
+ @Test
+ public void custom_consumer_gets_only_its_whitelisted_metrics() {
+ GenericJsonModel jsonModel = getResponseAsJsonModel(CUSTOM_CONSUMER);
+
+ assertNotNull(jsonModel.node);
+ // TODO: see comment in ExternalMetrics.setExtraMetrics
+ // assertEquals(0, jsonModel.node.metrics.size());
+
+ assertEquals(2, jsonModel.services.size());
+ GenericService dummyService = jsonModel.services.get(0);
+ assertEquals(2, dummyService.metrics.size());
+
+ GenericMetrics dummy0Metrics = getMetricsForInstance("dummy0", dummyService);
+ assertEquals("custom-val", dummy0Metrics.dimensions.get("consumer-dim"));
+
+ GenericMetrics dummy1Metrics = getMetricsForInstance("dummy1", dummyService);
+ assertEquals("custom-val", dummy1Metrics.dimensions.get("consumer-dim"));
+ }
+
+ private void assertDownServiceHealth(String consumer) {
+ GenericJsonModel jsonModel = getResponseAsJsonModel(consumer);
GenericService downService = jsonModel.services.get(1);
assertEquals(DOWN.status, downService.status.code);
@@ -127,15 +193,8 @@ public class GenericMetricsHandlerTest {
assertEquals(DownService.NAME, downService.metrics.get(0).dimensions.get(INSTANCE_DIMENSION_ID.id));
}
- @Test
- public void all_timestamps_are_equal_and_non_zero() throws Exception {
- String response = testDriver.sendRequest(URI).readAll();
- var jsonModel = createObjectMapper().readValue(response, GenericJsonModel.class);
-
- Long nodeTimestamp = jsonModel.node.timestamp;
- assertNotEquals(0L, (long) nodeTimestamp);
- for (var service : jsonModel.services)
- assertEquals(nodeTimestamp, service.timestamp);
+ private String replaceTimestamps(String s) {
+ return s.replaceAll("timestamp\":\\d+,", "timestamp\":1,");
}
private static GenericMetrics getMetricsForInstance(String instance, GenericService service) {
@@ -143,23 +202,33 @@ public class GenericMetricsHandlerTest {
if (metrics.dimensions.get(INSTANCE_DIMENSION_ID.id).equals(instance))
return metrics;
}
- throw new RuntimeException("Could not find metrics for service instance " + instance);
+ fail("Could not find metrics for service instance " + instance);
+ throw new RuntimeException();
}
private static MetricsConsumers getMetricsConsumers() {
- ConsumersConfig.Consumer.Metric.Dimension.Builder metricDimension = new ConsumersConfig.Consumer.Metric.Dimension.Builder()
- .key("dim0").value("metric-dim");
+ var defaultConsumerDimension = new Consumer.Metric.Dimension.Builder()
+ .key("consumer-dim").value("default-val");
+
+ var customConsumerDimension = new Consumer.Metric.Dimension.Builder()
+ .key("consumer-dim").value("custom-val");
return new MetricsConsumers(new ConsumersConfig.Builder()
- .consumer(new ConsumersConfig.Consumer.Builder()
- .name(VESPA_CONSUMER_ID.id)
- .metric(new ConsumersConfig.Consumer.Metric.Builder()
+ .consumer(new Consumer.Builder()
+ .name(DEFAULT_PUBLIC_CONSUMER_ID.id)
+ .metric(new Consumer.Metric.Builder()
.name(CPU_METRIC)
.outputname(CPU_METRIC))
- .metric(new ConsumersConfig.Consumer.Metric.Builder()
+ .metric(new Consumer.Metric.Builder()
+ .name(METRIC_1)
+ .outputname(METRIC_1)
+ .dimension(defaultConsumerDimension)))
+ .consumer(new Consumer.Builder()
+ .name(CUSTOM_CONSUMER)
+ .metric(new Consumer.Metric.Builder()
.name(METRIC_1)
.outputname(METRIC_1)
- .dimension(metricDimension)))
+ .dimension(customConsumerDimension)))
.build());
}
diff --git a/metrics/CMakeLists.txt b/metrics/CMakeLists.txt
index 938617e09d3..6cf1eadd6f7 100644
--- a/metrics/CMakeLists.txt
+++ b/metrics/CMakeLists.txt
@@ -10,9 +10,6 @@ vespa_define_module(
LIBS
src/vespa/metrics
- TEST_EXTERNAL_DEPENDS
- cppunit
-
TESTS
src/tests
)
diff --git a/metrics/src/tests/metricmanagertest.cpp b/metrics/src/tests/metricmanagertest.cpp
index f260089ec84..1d954a641b6 100644
--- a/metrics/src/tests/metricmanagertest.cpp
+++ b/metrics/src/tests/metricmanagertest.cpp
@@ -895,8 +895,6 @@ public:
void verifyDimensions(size_t metricIndex,
const std::string& name,
const Metric::Tags& dimensions) {
- // Works to do this outside of main test body because cppunit uses
- // exceptions for its failures.
EXPECT_EQ(name, nthMetricName(metricIndex)) << _jsonText;
EXPECT_EQ(dimensions.size(), nthMetricDimensionCount(metricIndex)) << _jsonText;
for (auto& dim : dimensions) {
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java
new file mode 100644
index 00000000000..cdacbe1656a
--- /dev/null
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java
@@ -0,0 +1,40 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.rankingexpression.importer.operations;
+
+import ai.vespa.rankingexpression.importer.OrderedTensorType;
+import com.yahoo.tensor.functions.TensorFunction;
+
+import java.util.List;
+
+/**
+ * Convert imported 'softmax' operation to the Vespa softmax ranking function.
+ *
+ * @author lesters
+ */
+public class Softmax extends IntermediateOperation {
+
+ public Softmax(String modelName, String nodeName, List<IntermediateOperation> inputs) {
+ super(modelName, nodeName, inputs);
+ }
+
+ @Override
+ protected OrderedTensorType lazyGetType() {
+ if ( ! allInputTypesPresent(1)) return null;
+ return inputs.get(0).type().get();
+ }
+
+ @Override
+ protected TensorFunction lazyGetFunction() {
+ if ( ! allInputFunctionsPresent(1)) return null;
+
+ OrderedTensorType inputType = inputs.get(0).type().get();
+ String dimension = inputType.dimensions().get(0).name();
+ if (inputType.rank() == 2) {
+ dimension = inputType.dimensions().get(1).name(); // assumption: first dimension is batch dimension
+ }
+
+ TensorFunction inputFunction = inputs.get(0).function().get();
+ return new com.yahoo.tensor.functions.Softmax(inputFunction, dimension);
+ }
+
+}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java
index 1abbd0063a1..357794faee2 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java
@@ -2,6 +2,7 @@
package ai.vespa.rankingexpression.importer.tensorflow;
+import ai.vespa.rankingexpression.importer.operations.Softmax;
import ai.vespa.rankingexpression.importer.operations.Sum;
import com.yahoo.searchlib.rankingexpression.evaluation.TensorValue;
import ai.vespa.rankingexpression.importer.IntermediateGraph;
@@ -112,6 +113,7 @@ class GraphImporter {
case "elu": return new Map(modelName, nodeName, inputs, ScalarFunctions.elu());
case "relu": return new Map(modelName, nodeName, inputs, ScalarFunctions.relu());
case "selu": return new Map(modelName, nodeName, inputs, ScalarFunctions.selu());
+ case "softmax": return new Softmax(modelName, nodeName, inputs);
// state ops
case "variable": return new Constant(modelName, nodeName, nodeType);
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java
index 1b8d06bf964..e75c7fd4da3 100644
--- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java
@@ -22,7 +22,7 @@ public class BatchNormImportTestCase {
"src/test/models/tensorflow/batch_norm/saved");
ImportedModel.Signature signature = model.get().signature("serving_default");
- assertEquals("Has skipped outputs",
+ assertEquals("Should have no skipped outputs",
0, model.get().signature("serving_default").skippedOutputs().size());
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java
index 5e5c81ddcf1..b9d767774be 100644
--- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java
@@ -29,7 +29,7 @@ public class DropoutImportTestCase {
ImportedModel.Signature signature = model.get().signature("serving_default");
- Assert.assertEquals("Has skipped outputs",
+ Assert.assertEquals("Should have no skipped outputs",
0, model.get().signature("serving_default").skippedOutputs().size());
ImportedMlFunction function = signature.outputFunction("y", "y");
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java
index 6b3e9207fad..c13ed84f701 100644
--- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java
@@ -19,7 +19,7 @@ public class MnistImportTestCase {
public void testMnistImport() {
TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/mnist/saved");
ImportedModel.Signature signature = model.get().signature("serving_default");
- Assert.assertEquals("Has skipped outputs",
+ Assert.assertEquals("Should have no skipped outputs",
0, model.get().signature("serving_default").skippedOutputs().size());
ImportedMlFunction output = signature.outputFunction("y", "y");
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java
new file mode 100644
index 00000000000..525f915b252
--- /dev/null
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java
@@ -0,0 +1,29 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.rankingexpression.importer.tensorflow;
+
+import ai.vespa.rankingexpression.importer.ImportedModel;
+import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlFunction;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * @author lesters
+ */
+public class SoftmaxImportTestCase {
+
+ @Test
+ public void testSoftmaxImport() {
+ TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5);
+ ImportedModel.Signature signature = model.get().signature("serving_default");
+ Assert.assertEquals("Should have no skipped outputs",
+ 0, model.get().signature("serving_default").skippedOutputs().size());
+
+ ImportedMlFunction output = signature.outputFunction("y", "y");
+ assertNotNull(output);
+ model.assertEqualResult("input", "output");
+ }
+
+}
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java
index 4ff0c96d369..9d2f8cf0692 100644
--- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java
@@ -33,14 +33,20 @@ public class TestableTensorFlowModel {
private ImportedModel model;
// Sizes of the input vector
- private final int d0Size = 1;
- private final int d1Size = 784;
+ private int d0Size = 1;
+ private int d1Size = 784;
public TestableTensorFlowModel(String modelName, String modelDir) {
tensorFlowModel = SavedModelBundle.load(modelDir, "serve");
model = new TensorFlowImporter().importModel(modelName, modelDir, tensorFlowModel);
}
+ public TestableTensorFlowModel(String modelName, String modelDir, int d0Size, int d1Size) {
+ this(modelName, modelDir);
+ this.d0Size = d0Size;
+ this.d1Size = d1Size;
+ }
+
public ImportedModel get() { return model; }
/** Compare that summing the tensors produce the same result to within some tolerance delta */
diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt b/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt
new file mode 100644
index 00000000000..11435ce3fa1
--- /dev/null
+++ b/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt
@@ -0,0 +1,1999 @@
+saved_model_schema_version: 1
+meta_graphs {
+ meta_info_def {
+ stripped_op_list {
+ op {
+ name: "Add"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_STRING
+ }
+ }
+ }
+ }
+ op {
+ name: "Assign"
+ input_arg {
+ name: "ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ input_arg {
+ name: "value"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output_ref"
+ type_attr: "T"
+ is_ref: true
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "validate_shape"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ attr {
+ name: "use_locking"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ allows_uninitialized_input: true
+ }
+ op {
+ name: "Const"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "value"
+ type: "tensor"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ }
+ op {
+ name: "Identity"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ }
+ op {
+ name: "MatMul"
+ input_arg {
+ name: "a"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "b"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "product"
+ type_attr: "T"
+ }
+ attr {
+ name: "transpose_a"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "transpose_b"
+ type: "bool"
+ default_value {
+ b: false
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "MergeV2Checkpoints"
+ input_arg {
+ name: "checkpoint_prefixes"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "destination_prefix"
+ type: DT_STRING
+ }
+ attr {
+ name: "delete_old_dirs"
+ type: "bool"
+ default_value {
+ b: true
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "Mul"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ is_commutative: true
+ }
+ op {
+ name: "NoOp"
+ }
+ op {
+ name: "Pack"
+ input_arg {
+ name: "values"
+ type_attr: "T"
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "T"
+ type: "type"
+ }
+ attr {
+ name: "axis"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ }
+ op {
+ name: "Placeholder"
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ default_value {
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ op {
+ name: "RandomUniform"
+ input_arg {
+ name: "shape"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "dtype"
+ }
+ attr {
+ name: "seed"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ attr {
+ name: "seed2"
+ type: "int"
+ default_value {
+ i: 0
+ }
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_BFLOAT16
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+ is_stateful: true
+ }
+ op {
+ name: "Relu"
+ input_arg {
+ name: "features"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "activations"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_INT64
+ type: DT_BFLOAT16
+ type: DT_UINT16
+ type: DT_HALF
+ type: DT_UINT32
+ type: DT_UINT64
+ type: DT_QINT8
+ }
+ }
+ }
+ }
+ op {
+ name: "RestoreV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ output_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "SaveV2"
+ input_arg {
+ name: "prefix"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensor_names"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shape_and_slices"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "tensors"
+ type_list_attr: "dtypes"
+ }
+ attr {
+ name: "dtypes"
+ type: "list(type)"
+ has_minimum: true
+ minimum: 1
+ }
+ is_stateful: true
+ }
+ op {
+ name: "ShardedFilename"
+ input_arg {
+ name: "basename"
+ type: DT_STRING
+ }
+ input_arg {
+ name: "shard"
+ type: DT_INT32
+ }
+ input_arg {
+ name: "num_shards"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "filename"
+ type: DT_STRING
+ }
+ }
+ op {
+ name: "Softmax"
+ input_arg {
+ name: "logits"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "softmax"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_HALF
+ type: DT_BFLOAT16
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ }
+ }
+ }
+ }
+ op {
+ name: "StringJoin"
+ input_arg {
+ name: "inputs"
+ type: DT_STRING
+ number_attr: "N"
+ }
+ output_arg {
+ name: "output"
+ type: DT_STRING
+ }
+ attr {
+ name: "N"
+ type: "int"
+ has_minimum: true
+ minimum: 1
+ }
+ attr {
+ name: "separator"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ }
+ op {
+ name: "Sub"
+ input_arg {
+ name: "x"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "y"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "z"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_UINT8
+ type: DT_INT8
+ type: DT_UINT16
+ type: DT_INT16
+ type: DT_INT32
+ type: DT_INT64
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ }
+ }
+ }
+ }
+ op {
+ name: "VariableV2"
+ output_arg {
+ name: "ref"
+ type_attr: "dtype"
+ is_ref: true
+ }
+ attr {
+ name: "shape"
+ type: "shape"
+ }
+ attr {
+ name: "dtype"
+ type: "type"
+ }
+ attr {
+ name: "container"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ attr {
+ name: "shared_name"
+ type: "string"
+ default_value {
+ s: ""
+ }
+ }
+ is_stateful: true
+ }
+ }
+ tags: "serve"
+ tensorflow_version: "1.12.0"
+ tensorflow_git_version: "v1.12.0-rc2-3-ga6d8ffae09"
+ }
+ graph_def {
+ node {
+ name: "input"
+ op: "Placeholder"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ tensor_content: "\005\000\000\000\003\000\000\000"
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform/min"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform/max"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform/RandomUniform"
+ op: "RandomUniform"
+ input: "random_uniform/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "random_uniform/sub"
+ op: "Sub"
+ input: "random_uniform/max"
+ input: "random_uniform/min"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform/mul"
+ op: "Mul"
+ input: "random_uniform/RandomUniform"
+ input: "random_uniform/sub"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform"
+ op: "Add"
+ input: "random_uniform/mul"
+ input: "random_uniform/min"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "weights"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "weights/Assign"
+ op: "Assign"
+ input: "weights"
+ input: "random_uniform"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "weights/read"
+ op: "Identity"
+ input: "weights"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/shape"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ dim {
+ size: 1
+ }
+ }
+ int_val: 3
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/min"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/max"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/RandomUniform"
+ op: "RandomUniform"
+ input: "random_uniform_1/shape"
+ attr {
+ key: "T"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "seed"
+ value {
+ i: 0
+ }
+ }
+ attr {
+ key: "seed2"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/sub"
+ op: "Sub"
+ input: "random_uniform_1/max"
+ input: "random_uniform_1/min"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1/mul"
+ op: "Mul"
+ input: "random_uniform_1/RandomUniform"
+ input: "random_uniform_1/sub"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "random_uniform_1"
+ op: "Add"
+ input: "random_uniform_1/mul"
+ input: "random_uniform_1/min"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "bias"
+ op: "VariableV2"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "container"
+ value {
+ s: ""
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "shape"
+ value {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ attr {
+ key: "shared_name"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "bias/Assign"
+ op: "Assign"
+ input: "bias"
+ input: "random_uniform_1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "bias/read"
+ op: "Identity"
+ input: "bias"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "MatMul"
+ op: "MatMul"
+ input: "input"
+ input: "weights/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "transpose_a"
+ value {
+ b: false
+ }
+ }
+ attr {
+ key: "transpose_b"
+ value {
+ b: false
+ }
+ }
+ }
+ node {
+ name: "add"
+ op: "Add"
+ input: "MatMul"
+ input: "bias/read"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "Relu"
+ op: "Relu"
+ input: "add"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "output"
+ op: "Softmax"
+ input: "Relu"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "init"
+ op: "NoOp"
+ input: "^bias/Assign"
+ input: "^weights/Assign"
+ }
+ node {
+ name: "save/Const"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "model"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin/inputs_1"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ }
+ string_val: "_temp_6341ee658682497a95c4fd82a2c87cc6/part"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/StringJoin"
+ op: "StringJoin"
+ input: "save/Const"
+ input: "save/StringJoin/inputs_1"
+ attr {
+ key: "N"
+ value {
+ i: 2
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "separator"
+ value {
+ s: ""
+ }
+ }
+ }
+ node {
+ name: "save/num_shards"
+ op: "Const"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 1
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename/shard"
+ op: "Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 0
+ }
+ }
+ }
+ }
+ node {
+ name: "save/ShardedFilename"
+ op: "ShardedFilename"
+ input: "save/StringJoin"
+ input: "save/ShardedFilename/shard"
+ input: "save/num_shards"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/tensor_names"
+ op: "Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: "bias"
+ string_val: "weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2/shape_and_slices"
+ op: "Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: ""
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/SaveV2"
+ op: "SaveV2"
+ input: "save/ShardedFilename"
+ input: "save/SaveV2/tensor_names"
+ input: "save/SaveV2/shape_and_slices"
+ input: "bias"
+ input: "weights"
+ device: "/device:CPU:0"
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/control_dependency"
+ op: "Identity"
+ input: "save/ShardedFilename"
+ input: "^save/SaveV2"
+ device: "/device:CPU:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@save/ShardedFilename"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ op: "Pack"
+ input: "save/ShardedFilename"
+ input: "^save/control_dependency"
+ device: "/device:CPU:0"
+ attr {
+ key: "N"
+ value {
+ i: 1
+ }
+ }
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 1
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "axis"
+ value {
+ i: 0
+ }
+ }
+ }
+ node {
+ name: "save/MergeV2Checkpoints"
+ op: "MergeV2Checkpoints"
+ input: "save/MergeV2Checkpoints/checkpoint_prefixes"
+ input: "save/Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "delete_old_dirs"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/Identity"
+ op: "Identity"
+ input: "save/Const"
+ input: "^save/MergeV2Checkpoints"
+ input: "^save/control_dependency"
+ device: "/device:CPU:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ }
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/tensor_names"
+ op: "Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: "bias"
+ string_val: "weights"
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2/shape_and_slices"
+ op: "Const"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 2
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtype"
+ value {
+ type: DT_STRING
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_STRING
+ tensor_shape {
+ dim {
+ size: 2
+ }
+ }
+ string_val: ""
+ string_val: ""
+ }
+ }
+ }
+ }
+ node {
+ name: "save/RestoreV2"
+ op: "RestoreV2"
+ input: "save/Const"
+ input: "save/RestoreV2/tensor_names"
+ input: "save/RestoreV2/shape_and_slices"
+ device: "/device:CPU:0"
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ unknown_rank: true
+ }
+ shape {
+ unknown_rank: true
+ }
+ }
+ }
+ }
+ attr {
+ key: "dtypes"
+ value {
+ list {
+ type: DT_FLOAT
+ type: DT_FLOAT
+ }
+ }
+ }
+ }
+ node {
+ name: "save/Assign"
+ op: "Assign"
+ input: "bias"
+ input: "save/RestoreV2"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@bias"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/Assign_1"
+ op: "Assign"
+ input: "weights"
+ input: "save/RestoreV2:1"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "_class"
+ value {
+ list {
+ s: "loc:@weights"
+ }
+ }
+ }
+ attr {
+ key: "_output_shapes"
+ value {
+ list {
+ shape {
+ dim {
+ size: 5
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ }
+ attr {
+ key: "use_locking"
+ value {
+ b: true
+ }
+ }
+ attr {
+ key: "validate_shape"
+ value {
+ b: true
+ }
+ }
+ }
+ node {
+ name: "save/restore_shard"
+ op: "NoOp"
+ input: "^save/Assign"
+ input: "^save/Assign_1"
+ }
+ node {
+ name: "save/restore_all"
+ op: "NoOp"
+ input: "^save/restore_shard"
+ }
+ versions {
+ producer: 27
+ }
+ }
+ saver_def {
+ filename_tensor_name: "save/Const:0"
+ save_tensor_name: "save/Identity:0"
+ restore_op_name: "save/restore_all"
+ max_to_keep: 5
+ sharded: true
+ keep_checkpoint_every_n_hours: 10000.0
+ version: V2
+ }
+ collection_def {
+ key: "trainable_variables"
+ value {
+ bytes_list {
+ value: "\n\tweights:0\022\016weights/Assign\032\016weights/read:02\020random_uniform:08\001"
+ value: "\n\006bias:0\022\013bias/Assign\032\013bias/read:02\022random_uniform_1:08\001"
+ }
+ }
+ }
+ collection_def {
+ key: "variables"
+ value {
+ bytes_list {
+ value: "\n\tweights:0\022\016weights/Assign\032\016weights/read:02\020random_uniform:08\001"
+ value: "\n\006bias:0\022\013bias/Assign\032\013bias/read:02\022random_uniform_1:08\001"
+ }
+ }
+ }
+ signature_def {
+ key: "serving_default"
+ value {
+ inputs {
+ key: "x"
+ value {
+ name: "input:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 5
+ }
+ }
+ }
+ }
+ outputs {
+ key: "y"
+ value {
+ name: "output:0"
+ dtype: DT_FLOAT
+ tensor_shape {
+ dim {
+ size: -1
+ }
+ dim {
+ size: 3
+ }
+ }
+ }
+ }
+ method_name: "tensorflow/serving/predict"
+ }
+ }
+}
diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001 b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001
new file mode 100644
index 00000000000..a9edaf376d0
--- /dev/null
+++ b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001
Binary files differ
diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index
new file mode 100644
index 00000000000..0ae49491ce6
--- /dev/null
+++ b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index
Binary files differ
diff --git a/model-integration/src/test/models/tensorflow/softmax/softmax.py b/model-integration/src/test/models/tensorflow/softmax/softmax.py
new file mode 100644
index 00000000000..aab9956f914
--- /dev/null
+++ b/model-integration/src/test/models/tensorflow/softmax/softmax.py
@@ -0,0 +1,29 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import numpy as np
+import tensorflow as tf
+
+# Creates simple random neural network that has softmax on output. No training.
+
+n_inputs = 5
+n_outputs = 3
+
+input = tf.placeholder(tf.float32, shape=(None, n_inputs), name="input")
+W = tf.Variable(tf.random.uniform([n_inputs, n_outputs]), name="weights")
+b = tf.Variable(tf.random.uniform([n_outputs]), name="bias")
+Z = tf.matmul(input, W) + b
+hidden_layer = tf.nn.relu(Z)
+output_layer = tf.nn.softmax(hidden_layer, name="output")
+
+init = tf.global_variables_initializer()
+
+with tf.Session() as sess:
+ init.run()
+ export_path = "saved"
+ builder = tf.saved_model.builder.SavedModelBuilder(export_path)
+ signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs = {'x':input}, outputs = {'y':output_layer})
+ builder.add_meta_graph_and_variables(sess,
+ [tf.saved_model.tag_constants.SERVING],
+ signature_def_map={'serving_default':signature})
+ builder.save(as_text=True)
+
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
index 73c86fc8de1..59873d7956e 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
@@ -116,16 +116,18 @@ public class ConfigServerApiImpl implements ConfigServerApi {
if (configServers.size() == 1) break;
// Failure to communicate with a config server is not abnormal during upgrades
- if (e.getMessage().contains("(Connection refused)")) {
- logger.info("Connection refused to " + configServer + " (upgrading?), will try next");
+ if (ConnectionException.isKnownConnectionException(e)) {
+ logger.info("Failed to connect to " + configServer + " (upgrading?), will try next: " + e.getMessage());
} else {
logger.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage());
}
}
}
- throw HttpException.handleException(
- "All requests against the config servers (" + configServers + ") failed, last as follows:", lastException);
+ String prefix = configServers.size() == 1 ?
+ "Request against " + configServers.get(0) + " failed: " :
+ "All requests against the config servers (" + configServers + ") failed, last as follows: ";
+ throw ConnectionException.handleException(prefix, lastException);
}
@Override
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConnectionException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConnectionException.java
new file mode 100644
index 00000000000..7e860bfb66b
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConnectionException.java
@@ -0,0 +1,43 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.configserver;
+
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
+import org.apache.http.NoHttpResponseException;
+
+import java.io.EOFException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+
+/**
+ * @author freva
+ */
+@SuppressWarnings("serial")
+public class ConnectionException extends ConvergenceException {
+
+ private ConnectionException(String message) {
+ super(message);
+ }
+
+ /**
+ * Returns {@link ConnectionException} if the given Throwable is of a known and well understood error or
+ * a RuntimeException with the given exception as cause otherwise.
+ */
+ public static RuntimeException handleException(String prefix, Throwable t) {
+ if (isKnownConnectionException(t))
+ return new ConnectionException(prefix + t.getMessage());
+
+ return new RuntimeException(prefix, t);
+ }
+
+ static boolean isKnownConnectionException(Throwable t) {
+ for (; t != null; t = t.getCause()) {
+ if (t instanceof SocketException ||
+ t instanceof SocketTimeoutException ||
+ t instanceof NoHttpResponseException ||
+ t instanceof EOFException)
+ return true;
+ }
+
+ return false;
+ }
+}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java
index 3825107bfa6..a9493d4606e 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java
@@ -2,12 +2,8 @@
package com.yahoo.vespa.hosted.node.admin.configserver;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
-import org.apache.http.NoHttpResponseException;
import javax.ws.rs.core.Response;
-import java.io.EOFException;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
/**
* @author hakonhall
@@ -66,22 +62,6 @@ public class HttpException extends ConvergenceException {
throw new HttpException(status, message, true);
}
- /**
- * Returns {@link HttpException} if the given Throwable is of a known and well understood error or
- * a RuntimeException with the given exception as cause otherwise.
- */
- public static RuntimeException handleException(String prefix, Throwable t) {
- for (; t != null; t = t.getCause()) {
- if (t instanceof SocketException ||
- t instanceof SocketTimeoutException ||
- t instanceof NoHttpResponseException ||
- t instanceof EOFException)
- return new HttpException(prefix + t.getMessage());
- }
-
- return new RuntimeException(prefix, t);
- }
-
public static class NotFoundException extends HttpException {
public NotFoundException(String message) {
super(Response.Status.NOT_FOUND, message, false);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java
index bb16e2bae63..22633f67463 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java
@@ -19,19 +19,19 @@ public class NodeMembership {
this.retired = retired;
}
- public String getClusterType() {
+ public String clusterType() {
return clusterType;
}
- public String getClusterId() {
+ public String clusterId() {
return clusterId;
}
- public String getGroup() {
+ public String group() {
return group;
}
- public int getIndex() {
+ public int index() {
return index;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java
index c1900316bb9..c41e050d534 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java
@@ -17,15 +17,15 @@ public class NodeOwner {
this.instance = instance;
}
- public String getTenant() {
+ public String tenant() {
return tenant;
}
- public String getApplication() {
+ public String application() {
return application;
}
- public String getInstance() {
+ public String instance() {
return instance;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
index 52d6f16dd78..6fb6d44bd6f 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
@@ -17,7 +17,7 @@ import java.util.Set;
public class NodeSpec {
private final String hostname;
private final NodeState state;
- private final NodeType nodeType;
+ private final NodeType type;
private final String flavor;
private final String canonicalFlavor;
@@ -25,7 +25,7 @@ public class NodeSpec {
private final Optional<DockerImage> currentDockerImage;
private final Optional<Version> wantedVespaVersion;
- private final Optional<Version> vespaVersion;
+ private final Optional<Version> currentVespaVersion;
private final Optional<Version> wantedOsVersion;
private final Optional<Version> currentOsVersion;
@@ -46,9 +46,9 @@ public class NodeSpec {
private final Optional<NodeOwner> owner;
private final Optional<NodeMembership> membership;
- private final double minCpuCores;
- private final double minMainMemoryAvailableGb;
- private final double minDiskAvailableGb;
+ private final double vcpus;
+ private final double memoryGb;
+ private final double diskGb;
private final boolean fastDisk;
private final double bandwidth;
@@ -64,11 +64,11 @@ public class NodeSpec {
Optional<DockerImage> wantedDockerImage,
Optional<DockerImage> currentDockerImage,
NodeState state,
- NodeType nodeType,
+ NodeType type,
String flavor,
String canonicalFlavor,
Optional<Version> wantedVespaVersion,
- Optional<Version> vespaVersion,
+ Optional<Version> currentVespaVersion,
Optional<Version> wantedOsVersion,
Optional<Version> currentOsVersion,
Optional<Boolean> allowedToBeDown,
@@ -82,9 +82,9 @@ public class NodeSpec {
Optional<Instant> wantedFirmwareCheck,
Optional<Instant> currentFirmwareCheck,
Optional<String> modelName,
- double minCpuCores,
- double minMainMemoryAvailableGb,
- double minDiskAvailableGb,
+ double vcpus,
+ double memoryGb,
+ double diskGb,
boolean fastDisk,
double bandwidth,
Set<String> ipAddresses,
@@ -102,12 +102,12 @@ public class NodeSpec {
this.wantedDockerImage = Objects.requireNonNull(wantedDockerImage);
this.currentDockerImage = Objects.requireNonNull(currentDockerImage);
this.state = Objects.requireNonNull(state);
- this.nodeType = Objects.requireNonNull(nodeType);
+ this.type = Objects.requireNonNull(type);
this.flavor = Objects.requireNonNull(flavor);
this.canonicalFlavor = canonicalFlavor;
this.modelName = modelName;
this.wantedVespaVersion = Objects.requireNonNull(wantedVespaVersion);
- this.vespaVersion = Objects.requireNonNull(vespaVersion);
+ this.currentVespaVersion = Objects.requireNonNull(currentVespaVersion);
this.wantedOsVersion = Objects.requireNonNull(wantedOsVersion);
this.currentOsVersion = Objects.requireNonNull(currentOsVersion);
this.allowedToBeDown = Objects.requireNonNull(allowedToBeDown);
@@ -120,9 +120,9 @@ public class NodeSpec {
this.currentRebootGeneration = currentRebootGeneration;
this.wantedFirmwareCheck = Objects.requireNonNull(wantedFirmwareCheck);
this.currentFirmwareCheck = Objects.requireNonNull(currentFirmwareCheck);
- this.minCpuCores = minCpuCores;
- this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
- this.minDiskAvailableGb = minDiskAvailableGb;
+ this.vcpus = vcpus;
+ this.memoryGb = memoryGb;
+ this.diskGb = diskGb;
this.fastDisk = fastDisk;
this.bandwidth = bandwidth;
this.ipAddresses = Objects.requireNonNull(ipAddresses);
@@ -131,125 +131,125 @@ public class NodeSpec {
this.parentHostname = Objects.requireNonNull(parentHostname);
}
- public String getHostname() {
+ public String hostname() {
return hostname;
}
- public NodeState getState() {
+ public NodeState state() {
return state;
}
- public NodeType getNodeType() {
- return nodeType;
+ public NodeType type() {
+ return type;
}
- public String getFlavor() {
+ public String flavor() {
return flavor;
}
- public String getCanonicalFlavor() {
+ public String canonicalFlavor() {
return canonicalFlavor;
}
- public Optional<DockerImage> getWantedDockerImage() {
+ public Optional<DockerImage> wantedDockerImage() {
return wantedDockerImage;
}
- public Optional<DockerImage> getCurrentDockerImage() {
+ public Optional<DockerImage> currentDockerImage() {
return currentDockerImage;
}
- public Optional<Version> getWantedVespaVersion() {
+ public Optional<Version> wantedVespaVersion() {
return wantedVespaVersion;
}
- public Optional<Version> getVespaVersion() {
- return vespaVersion;
+ public Optional<Version> currentVespaVersion() {
+ return currentVespaVersion;
}
- public Optional<Version> getCurrentOsVersion() {
+ public Optional<Version> currentOsVersion() {
return currentOsVersion;
}
- public Optional<Version> getWantedOsVersion() {
+ public Optional<Version> wantedOsVersion() {
return wantedOsVersion;
}
- public Optional<Long> getWantedRestartGeneration() {
+ public Optional<Long> wantedRestartGeneration() {
return wantedRestartGeneration;
}
- public Optional<Long> getCurrentRestartGeneration() {
+ public Optional<Long> currentRestartGeneration() {
return currentRestartGeneration;
}
- public long getWantedRebootGeneration() {
+ public long wantedRebootGeneration() {
return wantedRebootGeneration;
}
- public long getCurrentRebootGeneration() {
+ public long currentRebootGeneration() {
return currentRebootGeneration;
}
- public Optional<Instant> getWantedFirmwareCheck() {
+ public Optional<Instant> wantedFirmwareCheck() {
return wantedFirmwareCheck;
}
- public Optional<Instant> getCurrentFirmwareCheck() {
+ public Optional<Instant> currentFirmwareCheck() {
return currentFirmwareCheck;
}
- public Optional<String> getModelName() {
+ public Optional<String> modelName() {
return modelName;
}
- public Optional<Boolean> getAllowedToBeDown() {
+ public Optional<Boolean> allowedToBeDown() {
return allowedToBeDown;
}
- public Optional<Boolean> getWantToDeprovision() {
+ public Optional<Boolean> wantToDeprovision() {
return wantToDeprovision;
}
- public Optional<NodeOwner> getOwner() {
+ public Optional<NodeOwner> owner() {
return owner;
}
- public Optional<NodeMembership> getMembership() {
+ public Optional<NodeMembership> membership() {
return membership;
}
- public double getMinCpuCores() {
- return minCpuCores;
+ public double vcpus() {
+ return vcpus;
}
- public double getMinMainMemoryAvailableGb() {
- return minMainMemoryAvailableGb;
+ public double memoryGb() {
+ return memoryGb;
}
- public double getMinDiskAvailableGb() {
- return minDiskAvailableGb;
+ public double diskGb() {
+ return diskGb;
}
public boolean isFastDisk() {
return fastDisk;
}
- public double getBandwidth() {
+ public double bandwidth() {
return bandwidth;
}
- public Set<String> getIpAddresses() {
+ public Set<String> ipAddresses() {
return ipAddresses;
}
- public Set<String> getAdditionalIpAddresses() {
+ public Set<String> additionalIpAddresses() {
return additionalIpAddresses;
}
- public NodeReports getReports() { return reports; }
+ public NodeReports reports() { return reports; }
- public Optional<String> getParentHostname() {
+ public Optional<String> parentHostname() {
return parentHostname;
}
@@ -264,11 +264,11 @@ public class NodeSpec {
Objects.equals(wantedDockerImage, that.wantedDockerImage) &&
Objects.equals(currentDockerImage, that.currentDockerImage) &&
Objects.equals(state, that.state) &&
- Objects.equals(nodeType, that.nodeType) &&
+ Objects.equals(type, that.type) &&
Objects.equals(flavor, that.flavor) &&
Objects.equals(canonicalFlavor, that.canonicalFlavor) &&
Objects.equals(wantedVespaVersion, that.wantedVespaVersion) &&
- Objects.equals(vespaVersion, that.vespaVersion) &&
+ Objects.equals(currentVespaVersion, that.currentVespaVersion) &&
Objects.equals(wantedOsVersion, that.wantedOsVersion) &&
Objects.equals(currentOsVersion, that.currentOsVersion) &&
Objects.equals(allowedToBeDown, that.allowedToBeDown) &&
@@ -281,9 +281,9 @@ public class NodeSpec {
Objects.equals(currentRebootGeneration, that.currentRebootGeneration) &&
Objects.equals(wantedFirmwareCheck, that.wantedFirmwareCheck) &&
Objects.equals(currentFirmwareCheck, that.currentFirmwareCheck) &&
- Objects.equals(minCpuCores, that.minCpuCores) &&
- Objects.equals(minMainMemoryAvailableGb, that.minMainMemoryAvailableGb) &&
- Objects.equals(minDiskAvailableGb, that.minDiskAvailableGb) &&
+ Objects.equals(vcpus, that.vcpus) &&
+ Objects.equals(memoryGb, that.memoryGb) &&
+ Objects.equals(diskGb, that.diskGb) &&
Objects.equals(fastDisk, that.fastDisk) &&
Objects.equals(bandwidth, that.bandwidth) &&
Objects.equals(ipAddresses, that.ipAddresses) &&
@@ -299,11 +299,11 @@ public class NodeSpec {
wantedDockerImage,
currentDockerImage,
state,
- nodeType,
+ type,
flavor,
canonicalFlavor,
wantedVespaVersion,
- vespaVersion,
+ currentVespaVersion,
wantedOsVersion,
currentOsVersion,
allowedToBeDown,
@@ -316,9 +316,9 @@ public class NodeSpec {
currentRebootGeneration,
wantedFirmwareCheck,
currentFirmwareCheck,
- minCpuCores,
- minMainMemoryAvailableGb,
- minDiskAvailableGb,
+ vcpus,
+ memoryGb,
+ diskGb,
fastDisk,
bandwidth,
ipAddresses,
@@ -334,26 +334,26 @@ public class NodeSpec {
+ " wantedDockerImage=" + wantedDockerImage
+ " currentDockerImage=" + currentDockerImage
+ " state=" + state
- + " nodeType=" + nodeType
+ + " type=" + type
+ " flavor=" + flavor
+ " canonicalFlavor=" + canonicalFlavor
+ " wantedVespaVersion=" + wantedVespaVersion
- + " vespaVersion=" + vespaVersion
+ + " currentVespaVersion=" + currentVespaVersion
+ " wantedOsVersion=" + wantedOsVersion
+ " currentOsVersion=" + currentOsVersion
+ " allowedToBeDown=" + allowedToBeDown
+ " wantToDeprovision=" + wantToDeprovision
+ " owner=" + owner
+ " membership=" + membership
- + " minCpuCores=" + minCpuCores
+ + " vcpus=" + vcpus
+ " wantedRestartGeneration=" + wantedRestartGeneration
+ " currentRestartGeneration=" + currentRestartGeneration
+ " wantedRebootGeneration=" + wantedRebootGeneration
+ " currentRebootGeneration=" + currentRebootGeneration
+ " wantedFirmwareCheck=" + wantedFirmwareCheck
+ " currentFirmwareCheck=" + currentFirmwareCheck
- + " minMainMemoryAvailableGb=" + minMainMemoryAvailableGb
- + " minDiskAvailableGb=" + minDiskAvailableGb
+ + " memoryGb=" + memoryGb
+ + " diskGb=" + diskGb
+ " fastDisk=" + fastDisk
+ " bandwidth=" + bandwidth
+ " ipAddresses=" + ipAddresses
@@ -365,14 +365,14 @@ public class NodeSpec {
public static class Builder {
private String hostname;
- private Optional<DockerImage> wantedDockerImage = Optional.empty();
- private Optional<DockerImage> currentDockerImage = Optional.empty();
private NodeState state;
- private NodeType nodeType;
+ private NodeType type;
private String flavor;
private String canonicalFlavor;
+ private Optional<DockerImage> wantedDockerImage = Optional.empty();
+ private Optional<DockerImage> currentDockerImage = Optional.empty();
private Optional<Version> wantedVespaVersion = Optional.empty();
- private Optional<Version> vespaVersion = Optional.empty();
+ private Optional<Version> currentVespaVersion = Optional.empty();
private Optional<Version> wantedOsVersion = Optional.empty();
private Optional<Version> currentOsVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
@@ -386,10 +386,10 @@ public class NodeSpec {
private Optional<Instant> wantedFirmwareCheck = Optional.empty();
private Optional<Instant> currentFirmwareCheck = Optional.empty();
private Optional<String> modelName = Optional.empty();
- private double minCpuCores;
- private double minMainMemoryAvailableGb;
- private double minDiskAvailableGb;
- private boolean fastDisk = false;
+ private double vcpus;
+ private double memoryGb;
+ private double diskGb;
+ private boolean fastDisk;
private double bandwidth;
private Set<String> ipAddresses = Set.of();
private Set<String> additionalIpAddresses = Set.of();
@@ -401,12 +401,12 @@ public class NodeSpec {
public Builder(NodeSpec node) {
hostname(node.hostname);
state(node.state);
- nodeType(node.nodeType);
+ type(node.type);
flavor(node.flavor);
canonicalFlavor(node.canonicalFlavor);
- minCpuCores(node.minCpuCores);
- minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
- minDiskAvailableGb(node.minDiskAvailableGb);
+ vcpus(node.vcpus);
+ memoryGb(node.memoryGb);
+ diskGb(node.diskGb);
fastDisk(node.fastDisk);
bandwidth(node.bandwidth);
ipAddresses(node.ipAddresses);
@@ -418,7 +418,7 @@ public class NodeSpec {
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
- node.vespaVersion.ifPresent(this::vespaVersion);
+ node.currentVespaVersion.ifPresent(this::currentVespaVersion);
node.wantedOsVersion.ifPresent(this::wantedOsVersion);
node.currentOsVersion.ifPresent(this::currentOsVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
@@ -452,8 +452,8 @@ public class NodeSpec {
return this;
}
- public Builder nodeType(NodeType nodeType) {
- this.nodeType = nodeType;
+ public Builder type(NodeType nodeType) {
+ this.type = nodeType;
return this;
}
@@ -472,8 +472,8 @@ public class NodeSpec {
return this;
}
- public Builder vespaVersion(Version vespaVersion) {
- this.vespaVersion = Optional.of(vespaVersion);
+ public Builder currentVespaVersion(Version vespaVersion) {
+ this.currentVespaVersion = Optional.of(vespaVersion);
return this;
}
@@ -537,18 +537,18 @@ public class NodeSpec {
return this;
}
- public Builder minCpuCores(double minCpuCores) {
- this.minCpuCores = minCpuCores;
+ public Builder vcpus(double minCpuCores) {
+ this.vcpus = minCpuCores;
return this;
}
- public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
- this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
+ public Builder memoryGb(double minMainMemoryAvailableGb) {
+ this.memoryGb = minMainMemoryAvailableGb;
return this;
}
- public Builder minDiskAvailableGb(double minDiskAvailableGb) {
- this.minDiskAvailableGb = minDiskAvailableGb;
+ public Builder diskGb(double minDiskAvailableGb) {
+ this.diskGb = minDiskAvailableGb;
return this;
}
@@ -603,127 +603,127 @@ public class NodeSpec {
return this;
}
- public String getHostname() {
+ public String hostname() {
return hostname;
}
- public Optional<DockerImage> getWantedDockerImage() {
+ public Optional<DockerImage> wantedDockerImage() {
return wantedDockerImage;
}
- public Optional<DockerImage> getCurrentDockerImage() {
+ public Optional<DockerImage> currentDockerImage() {
return currentDockerImage;
}
- public NodeState getState() {
+ public NodeState state() {
return state;
}
- public NodeType getNodeType() {
- return nodeType;
+ public NodeType type() {
+ return type;
}
- public String getFlavor() {
+ public String flavor() {
return flavor;
}
- public String getCanonicalFlavor() {
+ public String canonicalFlavor() {
return canonicalFlavor;
}
- public Optional<Version> getWantedVespaVersion() {
+ public Optional<Version> wantedVespaVersion() {
return wantedVespaVersion;
}
- public Optional<Version> getVespaVersion() {
- return vespaVersion;
+ public Optional<Version> currentVespaVersion() {
+ return currentVespaVersion;
}
- public Optional<Version> getWantedOsVersion() {
+ public Optional<Version> wantedOsVersion() {
return wantedOsVersion;
}
- public Optional<Version> getCurrentOsVersion() {
+ public Optional<Version> currentOsVersion() {
return currentOsVersion;
}
- public Optional<Boolean> getAllowedToBeDown() {
+ public Optional<Boolean> allowedToBeDown() {
return allowedToBeDown;
}
- public Optional<Boolean> getWantToDeprovision() {
+ public Optional<Boolean> wantToDeprovision() {
return wantToDeprovision;
}
- public Optional<NodeOwner> getOwner() {
+ public Optional<NodeOwner> owner() {
return owner;
}
- public Optional<NodeMembership> getMembership() {
+ public Optional<NodeMembership> membership() {
return membership;
}
- public Optional<Long> getWantedRestartGeneration() {
+ public Optional<Long> wantedRestartGeneration() {
return wantedRestartGeneration;
}
- public Optional<Long> getCurrentRestartGeneration() {
+ public Optional<Long> currentRestartGeneration() {
return currentRestartGeneration;
}
- public long getWantedRebootGeneration() {
+ public long wantedRebootGeneration() {
return wantedRebootGeneration;
}
- public long getCurrentRebootGeneration() {
+ public long currentRebootGeneration() {
return currentRebootGeneration;
}
- public double getMinCpuCores() {
- return minCpuCores;
+ public double vcpus() {
+ return vcpus;
}
- public double getMinMainMemoryAvailableGb() {
- return minMainMemoryAvailableGb;
+ public double memoryGb() {
+ return memoryGb;
}
- public double getMinDiskAvailableGb() {
- return minDiskAvailableGb;
+ public double diskGb() {
+ return diskGb;
}
public boolean isFastDisk() {
return fastDisk;
}
- public double getBandwidth() {
+ public double bandwidth() {
return bandwidth;
}
- public Set<String> getIpAddresses() {
+ public Set<String> ipAddresses() {
return ipAddresses;
}
- public Set<String> getAdditionalIpAddresses() {
+ public Set<String> additionalIpAddresses() {
return additionalIpAddresses;
}
- public NodeReports getReports() {
+ public NodeReports reports() {
return reports;
}
- public Optional<String> getParentHostname() {
+ public Optional<String> parentHostname() {
return parentHostname;
}
public NodeSpec build() {
- return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, state, nodeType,
+ return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, state, type,
flavor, canonicalFlavor,
- wantedVespaVersion, vespaVersion, wantedOsVersion, currentOsVersion, allowedToBeDown, wantToDeprovision,
+ wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, allowedToBeDown, wantToDeprovision,
owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
wantedFirmwareCheck, currentFirmwareCheck, modelName,
- minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
+ vcpus, memoryGb, diskGb,
fastDisk, bandwidth, ipAddresses, additionalIpAddresses,
reports, parentHostname);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
index 6124e1bdc0e..353abd64778 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
@@ -2,7 +2,9 @@
package com.yahoo.vespa.hosted.node.admin.configserver.orchestrator;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
+import com.yahoo.vespa.hosted.node.admin.configserver.ConnectionException;
import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
import com.yahoo.vespa.orchestrator.restapi.HostApi;
import com.yahoo.vespa.orchestrator.restapi.HostSuspensionApi;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult;
@@ -41,6 +43,8 @@ public class OrchestratorImpl implements Orchestrator {
throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found");
} catch (HttpException e) {
throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString());
+ } catch (ConnectionException e) {
+ throw new ConvergenceException("Failed to suspend " + hostName + ": " + e.getMessage());
} catch (RuntimeException e) {
throw new RuntimeException("Got error on suspend", e);
}
@@ -60,6 +64,8 @@ public class OrchestratorImpl implements Orchestrator {
batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class);
} catch (HttpException e) {
throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString());
+ } catch (ConnectionException e) {
+ throw new ConvergenceException("Failed to batch suspend for " + parentHostName + ": " + e.getMessage());
} catch (RuntimeException e) {
throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e);
}
@@ -78,7 +84,9 @@ public class OrchestratorImpl implements Orchestrator {
} catch (HttpException.NotFoundException n) {
throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found");
} catch (HttpException e) {
- throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString());
+ throw new OrchestratorException("Failed to resume " + hostName + ": " + e.toString());
+ } catch (ConnectionException e) {
+ throw new ConvergenceException("Failed to resume " + hostName + ": " + e.getMessage());
} catch (RuntimeException e) {
throw new RuntimeException("Got error on resume", e);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java
index 2fe8d4b4792..e99a107cfe1 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.configserver.state;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
+import com.yahoo.vespa.hosted.node.admin.configserver.ConnectionException;
import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
import com.yahoo.vespa.hosted.node.admin.configserver.state.bindings.HealthResponse;
@@ -20,7 +21,7 @@ public class StateImpl implements State {
try {
HealthResponse response = configServerApi.get("/state/v1/health", HealthResponse.class);
return HealthCode.fromString(response.status.code);
- } catch (HttpException e) {
+ } catch (ConnectionException | HttpException e) {
return HealthCode.DOWN;
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
index 954ba25895a..d8d6b4781c8 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
@@ -64,13 +64,13 @@ public class DockerOperationsImpl implements DockerOperations {
context.log(logger, "Creating container");
// IPv6 - Assume always valid
- Inet6Address ipV6Address = ipAddresses.getIPv6Address(context.node().getHostname()).orElseThrow(
- () -> new RuntimeException("Unable to find a valid IPv6 address for " + context.node().getHostname() +
+ Inet6Address ipV6Address = ipAddresses.getIPv6Address(context.node().hostname()).orElseThrow(
+ () -> new RuntimeException("Unable to find a valid IPv6 address for " + context.node().hostname() +
". Missing an AAAA DNS entry?"));
Docker.CreateContainerCommand command = docker.createContainerCommand(
- context.node().getWantedDockerImage().get(), context.containerName())
- .withHostName(context.node().getHostname())
+ context.node().wantedDockerImage().get(), context.containerName())
+ .withHostName(context.node().hostname())
.withResources(containerResources)
.withManagedBy(MANAGER_NAME)
.withUlimit("nofile", 262_144, 262_144)
@@ -88,7 +88,7 @@ public class DockerOperationsImpl implements DockerOperations {
.withAddCapability("SYS_ADMIN") // Needed for perf
.withAddCapability("SYS_NICE"); // Needed for set_mempolicy to work
- if (context.node().getMembership().map(NodeMembership::getClusterType).map("content"::equalsIgnoreCase).orElse(false)) {
+ if (context.node().membership().map(NodeMembership::clusterType).map("content"::equalsIgnoreCase).orElse(false)) {
command.withSecurityOpts("seccomp=unconfined");
}
@@ -101,23 +101,24 @@ public class DockerOperationsImpl implements DockerOperations {
command.withIpAddress(ipV6Local);
// IPv4 - Only present for some containers
- Optional<InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().getHostname())
+ Optional<InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname())
.map(ipV4Address -> {
InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX);
return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2);
});
ipV4Local.ifPresent(command::withIpAddress);
- addEtcHosts(containerData, context.node().getHostname(), ipV4Local, ipV6Local);
+ addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local);
}
addMounts(context, command);
// TODO: Enforce disk constraints
- long minMainMemoryAvailableMb = (long) (context.node().getMinMainMemoryAvailableGb() * 1024);
+ long minMainMemoryAvailableMb = (long) (context.node().memoryGb() * 1024);
if (minMainMemoryAvailableMb > 0) {
// VESPA_TOTAL_MEMORY_MB is used to make any jdisc container think the machine
// only has this much physical memory (overrides total memory reported by `free -m`).
+ // TODO: Remove after all tenants are running > 7.67
command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb));
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index 4972a306377..26e4dcda88e 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -158,23 +158,23 @@ public class StorageMaintainer {
private Map<String, Object> generateTags(NodeAgentContext context) {
Map<String, String> tags = new LinkedHashMap<>();
tags.put("namespace", "Vespa");
- tags.put("role", nodeTypeToRole(context.node().getNodeType()));
+ tags.put("role", nodeTypeToRole(context.node().type()));
tags.put("zone", context.zone().getId().value());
- context.node().getVespaVersion().ifPresent(version -> tags.put("vespaVersion", version.toFullString()));
+ context.node().currentVespaVersion().ifPresent(version -> tags.put("vespaVersion", version.toFullString()));
if (! isConfigserverLike(context.nodeType())) {
- tags.put("state", context.node().getState().toString());
- context.node().getParentHostname().ifPresent(parent -> tags.put("parentHostname", parent));
- context.node().getOwner().ifPresent(owner -> {
- tags.put("tenantName", owner.getTenant());
- tags.put("app", owner.getApplication() + "." + owner.getInstance());
- tags.put("applicationName", owner.getApplication());
- tags.put("instanceName", owner.getInstance());
- tags.put("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance());
+ tags.put("state", context.node().state().toString());
+ context.node().parentHostname().ifPresent(parent -> tags.put("parentHostname", parent));
+ context.node().owner().ifPresent(owner -> {
+ tags.put("tenantName", owner.tenant());
+ tags.put("app", owner.application() + "." + owner.instance());
+ tags.put("applicationName", owner.application());
+ tags.put("instanceName", owner.instance());
+ tags.put("applicationId", owner.tenant() + "." + owner.application() + "." + owner.instance());
});
- context.node().getMembership().ifPresent(membership -> {
- tags.put("clustertype", membership.getClusterType());
- tags.put("clusterid", membership.getClusterId());
+ context.node().membership().ifPresent(membership -> {
+ tags.put("clustertype", membership.clusterType());
+ tags.put("clusterid", membership.clusterId());
});
}
@@ -260,20 +260,20 @@ public class StorageMaintainer {
private Map<String, Object> getCoredumpNodeAttributes(NodeAgentContext context, Optional<Container> container) {
Map<String, String> attributes = new HashMap<>();
- attributes.put("hostname", context.node().getHostname());
+ attributes.put("hostname", context.node().hostname());
attributes.put("region", context.zone().getRegionName().value());
attributes.put("environment", context.zone().getEnvironment().value());
- attributes.put("flavor", context.node().getFlavor());
+ attributes.put("flavor", context.node().flavor());
attributes.put("kernel_version", System.getProperty("os.version"));
attributes.put("cpu_microcode_version", getMicrocodeVersion());
container.map(c -> c.image).ifPresent(image -> attributes.put("docker_image", image.asString()));
- context.node().getParentHostname().ifPresent(parent -> attributes.put("parent_hostname", parent));
- context.node().getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version.toFullString()));
- context.node().getOwner().ifPresent(owner -> {
- attributes.put("tenant", owner.getTenant());
- attributes.put("application", owner.getApplication());
- attributes.put("instance", owner.getInstance());
+ context.node().parentHostname().ifPresent(parent -> attributes.put("parent_hostname", parent));
+ context.node().currentVespaVersion().ifPresent(version -> attributes.put("vespa_version", version.toFullString()));
+ context.node().owner().ifPresent(owner -> {
+ attributes.put("tenant", owner.tenant());
+ attributes.put("application", owner.application());
+ attributes.put("instance", owner.instance());
});
return Collections.unmodifiableMap(attributes);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index f3302bd2359..4a76e0e0a5b 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -125,7 +125,7 @@ public class NodeAdminStateUpdater {
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
}
- boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).getState() == NodeState.active;
+ boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).state() == NodeState.active;
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
@@ -164,7 +164,7 @@ public class NodeAdminStateUpdater {
void adjustNodeAgentsToRunFromNodeRepository() {
try {
Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream()
- .collect(Collectors.toMap(NodeSpec::getHostname, Function.identity()));
+ .collect(Collectors.toMap(NodeSpec::hostname, Function.identity()));
Map<String, Acl> aclByHostname = Optional.of(cachedAclSupplier.get())
.filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet()))
.orElseGet(cachedAclSupplier::invalidateAndGet);
@@ -183,8 +183,8 @@ public class NodeAdminStateUpdater {
private List<String> getNodesInActiveState() {
return nodeRepository.getNodes(hostHostname)
.stream()
- .filter(node -> node.getState() == NodeState.active)
- .map(NodeSpec::getHostname)
+ .filter(node -> node.state() == NodeState.active)
+ .map(NodeSpec::hostname)
.collect(Collectors.toList());
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
index a7cdd7e655d..f1fd97f6e4c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java
@@ -26,11 +26,11 @@ public interface NodeAgentContext extends TaskContext {
/** @return hostname of the docker container this context applies to */
default HostName hostname() {
- return HostName.from(node().getHostname());
+ return HostName.from(node().hostname());
}
default NodeType nodeType() {
- return node().getNodeType();
+ return node().type();
}
AthenzIdentity identity();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
index 8435fe34770..ef8ea60bee3 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
@@ -1,9 +1,7 @@
package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.config.provision.CloudName;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
@@ -47,7 +45,7 @@ public class NodeAgentContextImpl implements NodeAgentContext {
String vespaUser, String vespaUserOnHost) {
this.node = Objects.requireNonNull(node);
this.acl = Objects.requireNonNull(acl);
- this.containerName = ContainerName.fromHostname(node.getHostname());
+ this.containerName = ContainerName.fromHostname(node.hostname());
this.identity = Objects.requireNonNull(identity);
this.dockerNetworking = Objects.requireNonNull(dockerNetworking);
this.zone = Objects.requireNonNull(zone);
@@ -181,12 +179,12 @@ public class NodeAgentContextImpl implements NodeAgentContext {
this.nodeSpecBuilder
.hostname(hostname)
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("d-2-8-50");
}
public Builder nodeType(NodeType nodeType) {
- this.nodeSpecBuilder.nodeType(nodeType);
+ this.nodeSpecBuilder.type(nodeType);
return this;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 2716cc8cc59..90eda96d445 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -118,7 +118,7 @@ public class NodeAgentImpl implements NodeAgent {
this.healthChecker = healthChecker;
this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource)
- .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname());
+ .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().hostname());
this.loopThread = new Thread(() -> {
while (!terminated.get()) {
@@ -172,20 +172,20 @@ public class NodeAgentImpl implements NodeAgent {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
- if (context.node().getWantedRestartGeneration().isPresent() &&
- !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) {
- currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration());
+ if (context.node().wantedRestartGeneration().isPresent() &&
+ !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) {
+ currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration());
newNodeAttributes.withRestartGeneration(currentRestartGeneration);
}
- if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) {
- currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration());
+ if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) {
+ currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration());
newNodeAttributes.withRebootGeneration(currentRebootGeneration);
}
- Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN);
- if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) {
- DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY);
+ Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN);
+ if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) {
+ DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY);
DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY);
currentNodeAttributes.withDockerImage(currentImage);
@@ -228,7 +228,7 @@ public class NodeAgentImpl implements NodeAgent {
shouldRestartServices(context.node()).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(context, existingContainer.get());
- currentRestartGeneration = context.node().getWantedRestartGeneration();
+ currentRestartGeneration = context.node().wantedRestartGeneration();
});
}
@@ -236,18 +236,18 @@ public class NodeAgentImpl implements NodeAgent {
}
private Optional<String> shouldRestartServices(NodeSpec node) {
- if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
+ if (!node.wantedRestartGeneration().isPresent()) return Optional.empty();
// Restart generation is only optional because it does not exist for unallocated nodes
- if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) {
+ if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
- + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get());
+ + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeAgentContext context, Container existingContainer) {
- if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) {
+ if (existingContainer.state.isRunning() && context.node().state() == NodeState.active) {
context.log(logger, "Restarting services");
// Since we are restarting the services we need to suspend the node.
orchestratorSuspendNode(context);
@@ -290,22 +290,22 @@ public class NodeAgentImpl implements NodeAgent {
}
private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) {
- final NodeState nodeState = context.node().getState();
+ final NodeState nodeState = context.node().state();
if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
- if (context.node().getWantedDockerImage().isPresent() &&
- !context.node().getWantedDockerImage().get().equals(existingContainer.image)) {
+ if (context.node().wantedDockerImage().isPresent() &&
+ !context.node().wantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
- + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString());
+ + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
- if (currentRebootGeneration < context.node().getWantedRebootGeneration()) {
+ if (currentRebootGeneration < context.node().wantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
- currentRebootGeneration, context.node().getWantedRebootGeneration()));
+ currentRebootGeneration, context.node().wantedRebootGeneration()));
}
// Even though memory can be easily changed with docker update, we need to restart the container
@@ -330,7 +330,7 @@ public class NodeAgentImpl implements NodeAgent {
}
try {
- if (context.node().getState() != NodeState.dirty) {
+ if (context.node().state() != NodeState.dirty) {
suspend();
}
stopServices();
@@ -341,7 +341,7 @@ public class NodeAgentImpl implements NodeAgent {
storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
- currentRebootGeneration = context.node().getWantedRebootGeneration();
+ currentRebootGeneration = context.node().wantedRebootGeneration();
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
}
@@ -361,13 +361,13 @@ public class NodeAgentImpl implements NodeAgent {
private ContainerResources getContainerResources(NodeAgentContext context) {
double cpuCap = noCpuCap(context.zone()) ?
0 :
- context.node().getOwner()
+ context.node().owner()
.map(NodeOwner::asApplicationId)
.map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm()))
.orElse(containerCpuCap)
- .value() * context.node().getMinCpuCores();
+ .value() * context.node().vcpus();
- return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb());
+ return ContainerResources.from(cpuCap, context.node().vcpus(), context.node().memoryGb());
}
private boolean noCpuCap(ZoneApi zone) {
@@ -376,9 +376,9 @@ public class NodeAgentImpl implements NodeAgent {
}
private boolean downloadImageIfNeeded(NodeSpec node, Optional<Container> container) {
- if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return false;
+ if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false;
- return node.getWantedDockerImage().map(dockerOperations::pullImageAsyncIfNeeded).orElse(false);
+ return node.wantedDockerImage().map(dockerOperations::pullImageAsyncIfNeeded).orElse(false);
}
public void converge(NodeAgentContext context) {
@@ -406,14 +406,14 @@ public class NodeAgentImpl implements NodeAgent {
logChangesToNodeSpec(context, lastNode, node);
// Current reboot generation uninitialized or incremented from outside to cancel reboot
- if (currentRebootGeneration < node.getCurrentRebootGeneration())
- currentRebootGeneration = node.getCurrentRebootGeneration();
+ if (currentRebootGeneration < node.currentRebootGeneration())
+ currentRebootGeneration = node.currentRebootGeneration();
// Either we have changed allocation status (restart gen. only available to allocated nodes), or
// restart generation has been incremented from outside to cancel restart
- if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() ||
- currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false))
- currentRestartGeneration = node.getCurrentRestartGeneration();
+ if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() ||
+ currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false))
+ currentRestartGeneration = node.currentRestartGeneration();
// Every time the node spec changes, we should clear the metrics for this container as the dimensions
// will change and we will be reporting duplicate metrics.
@@ -424,7 +424,7 @@ public class NodeAgentImpl implements NodeAgent {
lastNode = node;
}
- switch (node.getState()) {
+ switch (node.state()) {
case ready:
case reserved:
case parked:
@@ -437,12 +437,12 @@ public class NodeAgentImpl implements NodeAgent {
storageMaintainer.handleCoreDumpsForContainer(context, container);
storageMaintainer.getDiskUsageFor(context)
- .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
+ .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.diskGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
if (downloadImageIfNeeded(node, container)) {
- context.log(logger, "Waiting for image to download " + context.node().getWantedDockerImage().get().asString());
+ context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(context, container);
@@ -479,20 +479,20 @@ public class NodeAgentImpl implements NodeAgent {
break;
case dirty:
removeContainerIfNeededUpdateContainerState(context, container);
- context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
+ context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready");
credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context);
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
break;
default:
- throw new ConvergenceException("UNKNOWN STATE " + node.getState().name());
+ throw new ConvergenceException("UNKNOWN STATE " + node.state().name());
}
}
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
- appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
+ appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
@@ -525,30 +525,29 @@ public class NodeAgentImpl implements NodeAgent {
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
- .add("state", node.getState().toString());
- node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
- node.getAllowedToBeDown().ifPresent(allowed ->
+ .add("state", node.state().toString());
+ node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
+ node.allowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = Metrics.APPLICATION_NODE;
final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus();
- final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode();
- final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage();
- final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage();
final long memoryTotalBytes = stats.getMemoryStats().getLimit();
final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage();
final long memoryTotalBytesCache = stats.getMemoryStats().getCache();
- final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
+ final long diskTotalBytes = (long) (node.diskGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
- lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
+ lastCpuMetric.updateCpuDeltas(stats.getCpuStats());
// Ratio of CPU cores allocated to this container to total number of CPU cores on this host
- final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
+ final double allocatedCpuRatio = node.vcpus() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
+ double cpuThrottledTimeRate = lastCpuMetric.getThrottledTimeRate();
+ double cpuThrottledCpuTimeRate = lastCpuMetric.getThrottledCpuTimeRate();
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
@@ -564,7 +563,9 @@ public class NodeAgentImpl implements NodeAgent {
.withMetric("mem_total.util", 100 * memoryTotalUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
- .withMetric("cpu.vcpus", node.getMinCpuCores())
+ .withMetric("cpu.throttled_time.rate", cpuThrottledTimeRate)
+ .withMetric("cpu.throttled_cpu_time.rate", cpuThrottledCpuTimeRate)
+ .withMetric("cpu.vcpus", node.vcpus())
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
@@ -597,7 +598,7 @@ public class NodeAgentImpl implements NodeAgent {
// Push metrics to the metrics proxy in each container.
// TODO Remove port selection logic when all hosted apps have upgraded to Vespa 7.
- int port = context.node().getVespaVersion().map(version -> version.getMajor() == 6).orElse(false) ? 19091 : 19095;
+ int port = context.node().currentVespaVersion().map(version -> version.getMajor() == 6).orElse(false) ? 19091 : 19095;
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (JsonProcessingException | DockerExecTimeoutException e) {
@@ -621,22 +622,39 @@ public class NodeAgentImpl implements NodeAgent {
}
class CpuUsageReporter {
+ private static final double PERIOD_IN_NANOSECONDS = 1_000d * ContainerResources.CPU_PERIOD_US;
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
+ private long throttledTime = 0;
+ private long throttlingActivePeriods = 0;
+ private long throttledPeriods = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
+ private long deltaThrottledTime;
+ private long deltaThrottlingActivePeriods;
+ private long deltaThrottledPeriods;
+
+ private void updateCpuDeltas(ContainerStats.CpuStats cpuStats) {
+ // Do not calculate delta during the first tick - that will result in a metric value that is
+ // average since container start
+ if (totalSystemUsage != 0) {
+ deltaSystemUsage = cpuStats.getSystemCpuUsage() - totalSystemUsage;
+ deltaContainerUsage = cpuStats.getTotalUsage() - totalContainerUsage;
+ deltaContainerKernelUsage = cpuStats.getUsageInKernelMode() - containerKernelUsage;
+ deltaThrottledTime = cpuStats.getThrottledTime() - throttledTime;
+ deltaThrottlingActivePeriods = cpuStats.getThrottlingActivePeriods() - throttlingActivePeriods;
+ deltaThrottledPeriods = cpuStats.getThrottledPeriods() - throttledPeriods;
+ }
- private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
- deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
- deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
- deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
-
- this.totalSystemUsage = totalSystemUsage;
- this.totalContainerUsage = totalContainerUsage;
- this.containerKernelUsage = containerKernelUsage;
+ totalSystemUsage = cpuStats.getSystemCpuUsage();
+ totalContainerUsage = cpuStats.getTotalUsage();
+ containerKernelUsage = cpuStats.getUsageInKernelMode();
+ throttledTime = cpuStats.getThrottledTime();
+ throttlingActivePeriods = cpuStats.getThrottlingActivePeriods();
+ throttledPeriods = cpuStats.getThrottledPeriods();
}
/**
@@ -651,6 +669,16 @@ public class NodeAgentImpl implements NodeAgent {
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
+
+ double getThrottledTimeRate() {
+ return deltaThrottlingActivePeriods == 0 ? Double.NaN :
+ (double) deltaThrottledPeriods / deltaThrottlingActivePeriods;
+ }
+
+ double getThrottledCpuTimeRate() {
+ return deltaThrottlingActivePeriods == 0 ? Double.NaN :
+ deltaThrottledTime / (PERIOD_IN_NANOSECONDS * deltaThrottlingActivePeriods);
+ }
}
// TODO: Also skip orchestration if we're downgrading in test/staging
@@ -666,7 +694,7 @@ public class NodeAgentImpl implements NodeAgent {
// to allow the node admin to make decisions that depend on the docker image. Or, each docker image
// needs to contain routines for drain and suspend. For many images, these can just be dummy routines.
private void orchestratorSuspendNode(NodeAgentContext context) {
- if (context.node().getState() != NodeState.active) return;
+ if (context.node().state() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
try {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/network/IPVersion.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/network/IPVersion.java
index de80d4dca18..4cc825dacd6 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/network/IPVersion.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/network/IPVersion.java
@@ -15,52 +15,53 @@ import java.util.regex.Pattern;
*/
public enum IPVersion {
- IPv6(6, "ip6tables", "ip -6", "ipv6-icmp", "/128", "icmp6-port-unreachable", "ip6tables-restore"),
- IPv4(4, "iptables", "ip", "icmp", "/32", "icmp-port-unreachable", "iptables-restore");
+ IPv6(6, "ip6tables", "ip -6", "ipv6-icmp", 128, "icmp6-port-unreachable", "ip6tables-restore", "fe80::/10"),
+ IPv4(4, "iptables", "ip", "icmp", 32, "icmp-port-unreachable", "iptables-restore", "169.254.0.0/16");
private static final Pattern cidrNotationPattern = Pattern.compile("/\\d+$");
IPVersion(int version, String iptablesCmd, String ipCmd,
- String icmpProtocol, String singleHostCidr, String icmpPortUnreachable,
- String iptablesRestore) {
+ String icmpProtocol, int size, String icmpPortUnreachable,
+ String iptablesRestore, String linkLocalCidr) {
this.version = version;
this.ipCmd = ipCmd;
this.iptablesCmd = iptablesCmd;
this.icmpProtocol = icmpProtocol;
- this.singleHostCidr = singleHostCidr;
+ this.size = size;
this.icmpPortUnreachable = icmpPortUnreachable;
this.iptablesRestore = iptablesRestore;
+ this.linkLocalCidr = linkLocalCidr;
}
private final int version;
private final String iptablesCmd;
private final String ipCmd;
private final String icmpProtocol;
- private final String singleHostCidr;
+ private final int size;
private final String icmpPortUnreachable;
private final String iptablesRestore;
+ private final String linkLocalCidr;
- public int version() {
- return version;
- }
- public String versionString() {
- return String.valueOf(version);
- }
- public String iptablesCmd() {
- return iptablesCmd;
- }
- public String iptablesRestore() {
- return iptablesRestore;
- }
- public String ipCmd() {
- return ipCmd;
- }
- public String icmpProtocol() {
- return icmpProtocol;
- }
- public String singleHostCidr() { return singleHostCidr; }
+ /** The ID of the IP version, either IPv4 or IPv6. */
+ public String id() { return "IPv" + version; }
+
+ /** The IP version, either 4 or 6 */
+ public int version() { return version; }
+
+ public String versionString() { return String.valueOf(version); }
+ public String iptablesCmd() { return iptablesCmd; }
+ public String iptablesRestore() { return iptablesRestore;}
+ public String ipCmd() { return ipCmd; }
+ public String icmpProtocol() { return icmpProtocol; }
+ public String singleHostCidr() { return "/" + size; }
public String icmpPortUnreachable() { return icmpPortUnreachable; }
+ /** The address size (in bits) of the IP version: 32 or 128. */
+ public int addressSize() { return size; }
+
+ /** Both IPv4 and IPv6 have exactly one link-local address space: 169.254.0.0/16 or fe80::/10. */
+ public String linkLocalAddressCidr() { return linkLocalCidr; }
+
public boolean match(InetAddress address) {
return this == IPVersion.get(address);
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
index fb443ed14c4..0938eb23b49 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
@@ -105,14 +105,14 @@ public class RealNodeRepositoryTest {
List<NodeSpec> containersToRun = nodeRepositoryApi.getNodes(dockerHostHostname);
assertThat(containersToRun.size(), is(1));
NodeSpec node = containersToRun.get(0);
- assertThat(node.getHostname(), is("host4.yahoo.com"));
- assertThat(node.getWantedDockerImage().get(), is(DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa:6.42.0")));
- assertThat(node.getState(), is(NodeState.active));
- assertThat(node.getWantedRestartGeneration().get(), is(0L));
- assertThat(node.getCurrentRestartGeneration().get(), is(0L));
- assertEquals(1, node.getMinCpuCores(), delta);
- assertEquals(1, node.getMinMainMemoryAvailableGb(), delta);
- assertEquals(100, node.getMinDiskAvailableGb(), delta);
+ assertThat(node.hostname(), is("host4.yahoo.com"));
+ assertThat(node.wantedDockerImage().get(), is(DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa:6.42.0")));
+ assertThat(node.state(), is(NodeState.active));
+ assertThat(node.wantedRestartGeneration().get(), is(0L));
+ assertThat(node.currentRestartGeneration().get(), is(0L));
+ assertEquals(1, node.vcpus(), delta);
+ assertEquals(1, node.memoryGb(), delta);
+ assertEquals(100, node.diskGb(), delta);
}
@Test
@@ -120,7 +120,7 @@ public class RealNodeRepositoryTest {
String hostname = "host4.yahoo.com";
Optional<NodeSpec> node = nodeRepositoryApi.getOptionalNode(hostname);
assertTrue(node.isPresent());
- assertEquals(hostname, node.get().getHostname());
+ assertEquals(hostname, node.get().hostname());
}
@Test
@@ -176,8 +176,8 @@ public class RealNodeRepositoryTest {
NodeSpec hostSpecInNodeRepo = nodeRepositoryApi.getOptionalNode("host123.domain.tld")
.orElseThrow(RuntimeException::new);
- assertEquals(host.nodeFlavor, hostSpecInNodeRepo.getFlavor());
- assertEquals(host.nodeType, hostSpecInNodeRepo.getNodeType());
+ assertEquals(host.nodeFlavor, hostSpecInNodeRepo.flavor());
+ assertEquals(host.nodeType, hostSpecInNodeRepo.type());
assertTrue(nodeRepositoryApi.getOptionalNode("host123-1.domain.tld").isPresent());
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java
index 14755ebf9cc..a3256b6955b 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.configserver.state;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
-import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
+import com.yahoo.vespa.hosted.node.admin.configserver.ConnectionException;
import com.yahoo.vespa.hosted.node.admin.configserver.state.bindings.HealthResponse;
import org.junit.Test;
@@ -29,7 +29,8 @@ public class StateImplTest {
@Test
public void connectException() {
- RuntimeException exception = HttpException.handleException("Error: ", new ConnectException("connection refused"));
+ RuntimeException exception =
+ ConnectionException.handleException("Error: ", new ConnectException("connection refused"));
when(api.get(any(), any())).thenThrow(exception);
HealthCode code = state.getHealth();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
index f3e334fff73..aacb2cafd30 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
@@ -29,13 +29,13 @@ public class DockerFailTest {
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
.wantedRestartGeneration(1L)
.currentRestartGeneration(1L)
- .minCpuCores(1)
- .minMainMemoryAvailableGb(1)
- .minDiskAvailableGb(1)
+ .vcpus(1)
+ .memoryGb(1)
+ .diskGb(1)
.build());
tester.inOrder(tester.docker).createContainerCommand(eq(dockerImage), eq(containerName));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
index 7f0f3fd37f6..22b3949755f 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
@@ -81,7 +81,7 @@ public class DockerTester implements AutoCloseable {
NodeSpec hostSpec = new NodeSpec.Builder()
.hostname(HOST_HOSTNAME.value())
.state(NodeState.active)
- .nodeType(NodeType.host)
+ .type(NodeType.host)
.flavor("default")
.wantedRestartGeneration(1L)
.currentRestartGeneration(1L)
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
index 27b11c3c1ba..8163f90e31f 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
@@ -28,15 +28,15 @@ public class MultiDockerTest {
tester.addChildNodeRepositoryNode(
new NodeSpec.Builder(nodeSpec2)
.state(NodeState.dirty)
- .minCpuCores(1)
- .minMainMemoryAvailableGb(1)
- .minDiskAvailableGb(1)
+ .vcpus(1)
+ .memoryGb(1)
+ .diskGb(1)
.build());
tester.inOrder(tester.docker).deleteContainer(eq(new ContainerName("host2")));
tester.inOrder(tester.storageMaintainer).archiveNodeStorage(
argThat(context -> context.containerName().equals(new ContainerName("host2"))));
- tester.inOrder(tester.nodeRepository).setNodeState(eq(nodeSpec2.getHostname()), eq(NodeState.ready));
+ tester.inOrder(tester.nodeRepository).setNodeState(eq(nodeSpec2.hostname()), eq(NodeState.ready));
addAndWaitForNode(tester, "host3.test.yahoo.com", DockerImage.fromString("image1"));
}
@@ -47,13 +47,13 @@ public class MultiDockerTest {
.hostname(hostName)
.wantedDockerImage(dockerImage)
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
.wantedRestartGeneration(1L)
.currentRestartGeneration(1L)
- .minCpuCores(2)
- .minMainMemoryAvailableGb(4)
- .minDiskAvailableGb(1)
+ .vcpus(2)
+ .memoryGb(4)
+ .diskGb(1)
.build();
tester.addChildNodeRepositoryNode(nodeSpec);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java
index ebf9d72ff1b..625166a10d2 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java
@@ -32,7 +32,7 @@ public class NodeRepoMock implements NodeRepository {
public List<NodeSpec> getNodes(String baseHostName) {
synchronized (monitor) {
return nodeRepositoryNodesByHostname.values().stream()
- .filter(node -> baseHostName.equals(node.getParentHostname().orElse(null)))
+ .filter(node -> baseHostName.equals(node.parentHostname().orElse(null)))
.collect(Collectors.toList());
}
}
@@ -69,7 +69,7 @@ public class NodeRepoMock implements NodeRepository {
void updateNodeRepositoryNode(NodeSpec nodeSpec) {
synchronized (monitor) {
- nodeRepositoryNodesByHostname.put(nodeSpec.getHostname(), nodeSpec);
+ nodeRepositoryNodesByHostname.put(nodeSpec.hostname(), nodeSpec);
}
}
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java
index 674c562cd88..4a232a5b2bd 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java
@@ -52,9 +52,9 @@ public class RebootTest {
.hostname(hostname)
.wantedDockerImage(dockerImage)
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
- .vespaVersion(Version.fromString("6.50.0"))
+ .currentVespaVersion(Version.fromString("6.50.0"))
.wantedRestartGeneration(1L)
.currentRestartGeneration(1L)
.build();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java
index 82e5eca042c..bfc54cac045 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java
@@ -30,7 +30,7 @@ public class RestartTest {
.hostname(hostname)
.state(NodeState.active)
.wantedDockerImage(dockerImage)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
.wantedRestartGeneration(1)
.currentRestartGeneration(1)
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
index 36169a2b283..57b18606def 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
@@ -157,12 +157,12 @@ public class StorageMaintainerTest {
NodeSpec nodeSpec = new NodeSpec.Builder()
.hostname("host123-5.test.domain.tld")
- .nodeType(nodeType)
+ .type(nodeType)
.state(NodeState.active)
.parentHostname("host123.test.domain.tld")
.owner(new NodeOwner("tenant", "application", "instance"))
.membership(new NodeMembership("clusterType", "clusterId", null, 0, false))
- .vespaVersion(Version.fromString("6.305.12"))
+ .currentVespaVersion(Version.fromString("6.305.12"))
.flavor("d-2-8-50")
.canonicalFlavor("d-2-8-50")
.build();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
index 6e645e6c70f..ca9b05a3ff6 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
@@ -160,7 +160,7 @@ public class NodeAdminImplTest {
NodeSpec nodeSpec = new NodeSpec.Builder()
.hostname(hostname)
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("default")
.build();
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index b8894bbf814..bb18e261301 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -59,7 +59,7 @@ public class NodeAdminStateUpdaterTest {
public void state_convergence() {
mockNodeRepo(NodeState.active, 4);
List<String> activeHostnames = nodeRepository.getNodes(hostHostname.value()).stream()
- .map(NodeSpec::getHostname)
+ .map(NodeSpec::hostname)
.collect(Collectors.toList());
List<String> suspendHostnames = new ArrayList<>(activeHostnames);
suspendHostnames.add(hostHostname.value());
@@ -170,7 +170,7 @@ public class NodeAdminStateUpdaterTest {
// When doing batch suspend, only suspend the containers if the host is not active
List<String> activeHostnames = nodeRepository.getNodes(hostHostname.value()).stream()
- .map(NodeSpec::getHostname)
+ .map(NodeSpec::hostname)
.collect(Collectors.toList());
updater.converge(SUSPENDED);
verify(orchestrator, times(1)).suspend(eq(hostHostname.value()), eq(activeHostnames));
@@ -206,9 +206,9 @@ public class NodeAdminStateUpdaterTest {
updater.adjustNodeAgentsToRunFromNodeRepository();
updater.adjustNodeAgentsToRunFromNodeRepository();
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host3.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(acl));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value()));
}
@@ -224,9 +224,9 @@ public class NodeAdminStateUpdaterTest {
updater.adjustNodeAgentsToRunFromNodeRepository();
updater.adjustNodeAgentsToRunFromNodeRepository();
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(1)).create(argThat(spec -> spec.getHostname().equals("host3.yahoo.com")), eq(Acl.EMPTY));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(1)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(Acl.EMPTY));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
verify(nodeRepository, times(2)).getAcls(eq(hostHostname.value())); // During the first tick, the cache is invalidated and retried
}
@@ -241,8 +241,8 @@ public class NodeAdminStateUpdaterTest {
updater.adjustNodeAgentsToRunFromNodeRepository();
updater.adjustNodeAgentsToRunFromNodeRepository();
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl));
- verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl));
+ verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl));
verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value()));
verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value()));
}
@@ -261,11 +261,11 @@ public class NodeAdminStateUpdaterTest {
.mapToObj(i -> new NodeSpec.Builder()
.hostname("host" + i + ".yahoo.com")
.state(NodeState.active)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
- .minCpuCores(1)
- .minMainMemoryAvailableGb(1)
- .minDiskAvailableGb(1)
+ .vcpus(1)
+ .memoryGb(1)
+ .diskGb(1)
.build())
.collect(Collectors.toList());
@@ -274,11 +274,11 @@ public class NodeAdminStateUpdaterTest {
when(nodeRepository.getNode(eq(hostHostname.value()))).thenReturn(new NodeSpec.Builder()
.hostname(hostHostname.value())
.state(hostState)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("default")
- .minCpuCores(1)
- .minMainMemoryAvailableGb(1)
- .minDiskAvailableGb(1)
+ .vcpus(1)
+ .memoryGb(1)
+ .diskGb(1)
.build());
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index f754d1798ec..46af7e7bafd 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.component.Version;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.io.IOUtils;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.dockerapi.Container;
@@ -29,13 +28,14 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
import org.junit.Test;
import org.mockito.InOrder;
-import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
-import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import static com.yahoo.yolean.Exceptions.uncheck;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -64,11 +64,11 @@ public class NodeAgentImplTest {
private final String hostName = "host1.test.yahoo.com";
private final NodeSpec.Builder nodeBuilder = new NodeSpec.Builder()
.hostname(hostName)
- .nodeType(NodeType.tenant)
+ .type(NodeType.tenant)
.flavor("docker")
- .minCpuCores(MIN_CPU_CORES)
- .minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
- .minDiskAvailableGb(MIN_DISK_AVAILABLE_GB);
+ .vcpus(MIN_CPU_CORES)
+ .memoryGb(MIN_MAIN_MEMORY_AVAILABLE_GB)
+ .diskGb(MIN_DISK_AVAILABLE_GB);
private final NodeAgentContextSupplier contextSupplier = mock(NodeAgentContextSupplier.class);
private final DockerImage dockerImage = DockerImage.fromString("dockerImage");
@@ -90,7 +90,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -119,7 +119,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -140,7 +140,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -214,7 +214,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -241,7 +241,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion);
+ .currentVespaVersion(vespaVersion);
NodeAgentContext firstContext = createContext(specBuilder.build());
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
@@ -250,9 +250,9 @@ public class NodeAgentImplTest {
when(storageMaintainer.getDiskUsageFor(any())).thenReturn(Optional.of(201326592000L));
nodeAgent.doConverge(firstContext);
- NodeAgentContext secondContext = createContext(specBuilder.minDiskAvailableGb(200).build());
+ NodeAgentContext secondContext = createContext(specBuilder.diskGb(200).build());
nodeAgent.doConverge(secondContext);
- NodeAgentContext thirdContext = createContext(specBuilder.minCpuCores(4).build());
+ NodeAgentContext thirdContext = createContext(specBuilder.vcpus(4).build());
nodeAgent.doConverge(thirdContext);
ContainerResources resourcesAfterThird = ContainerResources.from(0, 4, 16);
mockGetContainer(dockerImage, resourcesAfterThird, true);
@@ -288,7 +288,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion);
+ .currentVespaVersion(vespaVersion);
NodeAgentContext firstContext = createContext(specBuilder.build());
NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true);
@@ -297,7 +297,7 @@ public class NodeAgentImplTest {
when(storageMaintainer.getDiskUsageFor(any())).thenReturn(Optional.of(201326592000L));
nodeAgent.doConverge(firstContext);
- NodeAgentContext secondContext = createContext(specBuilder.minMainMemoryAvailableGb(20).build());
+ NodeAgentContext secondContext = createContext(specBuilder.memoryGb(20).build());
nodeAgent.doConverge(secondContext);
ContainerResources resourcesAfterThird = ContainerResources.from(0, 2, 20);
mockGetContainer(dockerImage, resourcesAfterThird, true);
@@ -325,7 +325,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.wantedRestartGeneration(wantedRestartGeneration)
.currentRestartGeneration(currentRestartGeneration)
.build();
@@ -357,7 +357,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.wantedRebootGeneration(wantedRebootGeneration)
.currentRebootGeneration(currentRebootGeneration)
.build();
@@ -400,7 +400,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.failed)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -446,7 +446,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.state(NodeState.inactive)
.wantedVespaVersion(vespaVersion)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -548,7 +548,7 @@ public class NodeAgentImplTest {
.currentDockerImage(dockerImage)
.wantedDockerImage(dockerImage)
.state(NodeState.active)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -570,7 +570,7 @@ public class NodeAgentImplTest {
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.state(NodeState.active)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.build();
NodeAgentContext context = createContext(node);
@@ -640,8 +640,7 @@ public class NodeAgentImplTest {
@Test
@SuppressWarnings("unchecked")
public void testGetRelevantMetrics() throws Exception {
- ClassLoader classLoader = getClass().getClassLoader();
- String json = IOUtils.readAll(classLoader.getResourceAsStream("docker.stats.json"), StandardCharsets.UTF_8);
+ String json = Files.readString(Paths.get("src/test/resources/docker.stats.json"));
ContainerStats stats2 = ContainerStats.fromJson(json);
ContainerStats stats1 = ContainerStats.fromJson(json.replace("\"cpu_stats\"", "\"cpu_stats2\"").replace("\"precpu_stats\"", "\"cpu_stats\""));
@@ -651,10 +650,10 @@ public class NodeAgentImplTest {
.wantedDockerImage(dockerImage)
.currentDockerImage(dockerImage)
.state(NodeState.active)
- .vespaVersion(vespaVersion)
+ .currentVespaVersion(vespaVersion)
.owner(owner)
.membership(membership)
- .minMainMemoryAvailableGb(2)
+ .memoryGb(2)
.allowedToBeDown(true)
.parentHostname("parent.host.name.yahoo.com")
.build();
@@ -667,15 +666,14 @@ public class NodeAgentImplTest {
when(dockerOperations.getContainerStats(eq(context)))
.thenReturn(Optional.of(stats1))
.thenReturn(Optional.of(stats2));
-
- nodeAgent.updateContainerNodeMetrics(); // Update metrics once to init and lastCpuMetric
- Path pathToExpectedMetrics = Paths.get(classLoader.getResource("expected.container.system.metrics.txt").getPath());
- String expectedMetrics = new String(Files.readAllBytes(pathToExpectedMetrics))
- .replaceAll("\\s", "")
- .replaceAll("\\n", "");
+ List<String> expectedMetrics = Stream.of(0, 1)
+ .map(i -> Paths.get("src/test/resources/expected.container.system.metrics." + i + ".txt"))
+ .map(path -> uncheck(() -> Files.readString(path)))
+ .map(content -> content.replaceAll("\\s", "").replaceAll("\\n", ""))
+ .collect(Collectors.toList());
+ int[] counter = {0};
- String[] expectedCommand = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095", "setExtraMetrics", expectedMetrics};
doAnswer(invocation -> {
NodeAgentContext calledContainerName = (NodeAgentContext) invocation.getArguments()[0];
long calledTimeout = (long) invocation.getArguments()[1];
@@ -687,11 +685,15 @@ public class NodeAgentImplTest {
assertEquals(context, calledContainerName);
assertEquals(5L, calledTimeout);
- assertArrayEquals(expectedCommand, calledCommand);
+ String[] expectedCommand = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095",
+ "setExtraMetrics", expectedMetrics.get(counter[0])};
+ assertArrayEquals("Ivocation #" + counter[0], expectedCommand, calledCommand);
+ counter[0]++;
return null;
}).when(dockerOperations).executeCommandInContainerAsRoot(any(), any(), any());
nodeAgent.updateContainerNodeMetrics();
+ nodeAgent.updateContainerNodeMetrics();
}
@Test
@@ -713,7 +715,7 @@ public class NodeAgentImplTest {
@Test
public void testRunningConfigServer() {
final NodeSpec node = nodeBuilder
- .nodeType(NodeType.config)
+ .type(NodeType.config)
.wantedDockerImage(dockerImage)
.state(NodeState.active)
.wantedVespaVersion(vespaVersion)
diff --git a/node-admin/src/test/resources/docker.stats.json b/node-admin/src/test/resources/docker.stats.json
index ff4a2fde943..5b42d9a2428 100644
--- a/node-admin/src/test/resources/docker.stats.json
+++ b/node-admin/src/test/resources/docker.stats.json
@@ -18,9 +18,9 @@
},
"system_cpu_usage":5876874910000000,
"throttling_data":{
- "periods":3212,
- "throttled_periods":322,
- "throttled_time":4490
+ "periods":820694,
+ "throttled_periods":177731,
+ "throttled_time":81891944744550
}
},
"cpu_stats":{
@@ -41,9 +41,9 @@
},
"system_cpu_usage":5876882680000000,
"throttling_data":{
- "periods":3242,
- "throttled_periods":332,
- "throttled_time":4523
+ "periods":821264,
+ "throttled_periods":178201,
+ "throttled_time":82181944744550
}
},
"memory_stats":{
diff --git a/node-admin/src/test/resources/expected.container.system.metrics.txt b/node-admin/src/test/resources/expected.container.system.metrics.0.txt
index ec750798c98..ea6036ce2ea 100644
--- a/node-admin/src/test/resources/expected.container.system.metrics.txt
+++ b/node-admin/src/test/resources/expected.container.system.metrics.0.txt
@@ -9,8 +9,6 @@ s:
"state": "active"
},
"metrics": {
- "cpu.sys.util": 3.402,
- "cpu.util": 5.4,
"cpu.vcpus": 2.0,
"disk.limit": 250000000000,
"disk.used": 39625000000,
diff --git a/node-admin/src/test/resources/expected.container.system.metrics.1.txt b/node-admin/src/test/resources/expected.container.system.metrics.1.txt
new file mode 100644
index 00000000000..54d4d36c7d0
--- /dev/null
+++ b/node-admin/src/test/resources/expected.container.system.metrics.1.txt
@@ -0,0 +1,82 @@
+s:
+{
+ "application": "vespa.node",
+ "dimensions": {
+ "host": "host1.test.yahoo.com",
+ "orchestratorState":"ALLOWED_TO_BE_DOWN",
+ "parentHostname": "parent.host.name.yahoo.com",
+ "role": "tenants",
+ "state": "active"
+ },
+ "metrics": {
+ "cpu.sys.util": 3.402,
+ "cpu.throttled_cpu_time.rate": 5.087,
+ "cpu.throttled_time.rate": 0.824,
+ "cpu.util": 5.4,
+ "cpu.vcpus": 2.0,
+ "disk.limit": 250000000000,
+ "disk.used": 39625000000,
+ "disk.util": 15.85,
+ "mem.limit": 4294967296,
+ "mem.used": 1073741824,
+ "mem.util": 25.0,
+ "mem_total.used": 1752707072,
+ "mem_total.util": 40.808
+ },
+ "routing": {
+ "yamas": {
+ "namespaces": ["Vespa"]
+ }
+ },
+ "timestamp": 0
+}
+{
+ "application": "vespa.node",
+ "dimensions": {
+ "host": "host1.test.yahoo.com",
+ "interface": "eth0",
+ "orchestratorState":"ALLOWED_TO_BE_DOWN",
+ "parentHostname": "parent.host.name.yahoo.com",
+ "role": "tenants",
+ "state": "active"
+ },
+ "metrics": {
+ "net.in.bytes": 19499270,
+ "net.in.dropped": 4,
+ "net.in.errors": 55,
+ "net.out.bytes": 20303455,
+ "net.out.dropped": 13,
+ "net.out.errors": 3
+ },
+ "routing": {
+ "yamas": {
+ "namespaces": ["Vespa"]
+ }
+ },
+ "timestamp": 0
+}
+{
+ "application": "vespa.node",
+ "dimensions": {
+ "host": "host1.test.yahoo.com",
+ "interface": "eth1",
+ "orchestratorState":"ALLOWED_TO_BE_DOWN",
+ "parentHostname": "parent.host.name.yahoo.com",
+ "role": "tenants",
+ "state": "active"
+ },
+ "metrics": {
+ "net.in.bytes": 3245766,
+ "net.in.dropped": 0,
+ "net.in.errors": 0,
+ "net.out.bytes": 54246745,
+ "net.out.dropped": 0,
+ "net.out.errors": 0
+ },
+ "routing": {
+ "yamas": {
+ "namespaces": ["Vespa"]
+ }
+ },
+ "timestamp": 0
+} \ No newline at end of file
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index bedfbc5bdc1..9b78f558a7a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -399,10 +399,7 @@ public class NodeRepository extends AbstractComponent {
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
- db.writeTo(Node.State.inactive,
- db.getNodes(application, Node.State.reserved, Node.State.active),
- Agent.application, Optional.empty(), transaction
- );
+ deactivate(db.getNodes(application, Node.State.reserved, Node.State.active), transaction);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
index 58c576d3f44..6f7b7c4d57d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.vespa.hosted.provision.maintenance.LoadBalancerExpirer;
+import java.time.Instant;
import java.util.Objects;
/**
@@ -14,12 +15,14 @@ public class LoadBalancer {
private final LoadBalancerId id;
private final LoadBalancerInstance instance;
- private final boolean inactive;
+ private final State state;
+ private final Instant changedAt;
- public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, boolean inactive) {
+ public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, State state, Instant changedAt) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.instance = Objects.requireNonNull(instance, "instance must be non-null");
- this.inactive = inactive;
+ this.state = Objects.requireNonNull(state, "state must be non-null");
+ this.changedAt = Objects.requireNonNull(changedAt, "changedAt must be non-null");
}
/** An identifier for this load balancer. The ID is unique inside the zone */
@@ -32,17 +35,48 @@ public class LoadBalancer {
return instance;
}
- /**
- * Returns whether this load balancer is inactive. Inactive load balancers are eventually removed by
- * {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is redeployed.
- */
- public boolean inactive() {
- return inactive;
+ /** The current state of this */
+ public State state() {
+ return state;
}
- /** Return a copy of this that is set inactive */
- public LoadBalancer deactivate() {
- return new LoadBalancer(id, instance, true);
+ /** Returns when this was last changed */
+ public Instant changedAt() {
+ return changedAt;
+ }
+
+ /** Returns a copy of this with state set to given state */
+ public LoadBalancer with(State state, Instant changedAt) {
+ if (changedAt.isBefore(this.changedAt)) {
+ throw new IllegalArgumentException("Invalid changeAt: '" + changedAt + "' is before existing value '" +
+ this.changedAt + "'");
+ }
+ if (this.state != State.reserved && state == State.reserved) {
+ throw new IllegalArgumentException("Invalid state transition: " + this.state + " -> " + state);
+ }
+ return new LoadBalancer(id, instance, state, changedAt);
+ }
+
+ /** Returns a copy of this with instance set to given instance */
+ public LoadBalancer with(LoadBalancerInstance instance) {
+ return new LoadBalancer(id, instance, state, changedAt);
+ }
+
+ public enum State {
+
+ /** This load balancer has been provisioned and reserved for an application */
+ reserved,
+
+ /**
+ * The load balancer has been deactivated and is ready to be removed. Inactive load balancers are eventually
+ * removed by {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is
+ * redeployed.
+ */
+ inactive,
+
+ /** The load balancer is in active use by an application */
+ active,
+
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
index ba7a83169ad..7fd50bf0930 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
@@ -3,18 +3,20 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.config.provision.ApplicationId;
+import java.time.Instant;
import java.util.Collection;
+import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
- * A filterable load balancer list.
+ * A filterable load balancer list. This is immutable.
*
* @author mpolden
*/
-public class LoadBalancerList {
+public class LoadBalancerList implements Iterable<LoadBalancer> {
private final List<LoadBalancer> loadBalancers;
@@ -27,9 +29,14 @@ public class LoadBalancerList {
return of(loadBalancers.stream().filter(lb -> lb.id().application().equals(application)));
}
- /** Returns the subset of load balancers that are inactive */
- public LoadBalancerList inactive() {
- return of(loadBalancers.stream().filter(LoadBalancer::inactive));
+ /** Returns the subset of load balancers that are in given state */
+ public LoadBalancerList in(LoadBalancer.State state) {
+ return of(loadBalancers.stream().filter(lb -> lb.state() == state));
+ }
+
+ /** Returns the subset of load balancers that last changed before given instant */
+ public LoadBalancerList changedBefore(Instant instant) {
+ return of(loadBalancers.stream().filter(lb -> lb.changedAt().isBefore(instant)));
}
public List<LoadBalancer> asList() {
@@ -40,4 +47,9 @@ public class LoadBalancerList {
return new LoadBalancerList(stream.collect(Collectors.toUnmodifiableList()));
}
+ @Override
+ public Iterator<LoadBalancer> iterator() {
+ return loadBalancers.iterator();
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
index 6f45403f0e6..f6398c04e61 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
@@ -13,8 +13,17 @@ import java.util.Set;
*/
public interface LoadBalancerService {
- /** Create a load balancer for given application cluster. Implementations are expected to be idempotent */
- LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals);
+ /**
+ * Create a load balancer for given application cluster. Implementations are expected to be idempotent
+ *
+ * @param application Application owning the LB
+ * @param cluster Target cluster of the LB
+ * @param reals Reals that should be configured on the LB
+ * @param force Whether reconfiguration should be forced (e.g. allow configuring an empty set of reals on a
+ * pre-existing load balancer).
+ * @return The provisioned load balancer instance
+ */
+ LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals, boolean force);
/** Permanently remove load balancer for given application cluster */
void remove(ApplicationId application, ClusterSpec.Id cluster);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceException.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceException.java
new file mode 100644
index 00000000000..e5ab519ab94
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceException.java
@@ -0,0 +1,17 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.lb;
+
+import com.yahoo.config.provision.TransientException;
+
+/**
+ * Transient exception thrown on behalf of a {@link LoadBalancerService}.
+ *
+ * @author mpolden
+ */
+public class LoadBalancerServiceException extends TransientException {
+
+ public LoadBalancerServiceException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
index d6ecba7e551..91f02a31f6b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
@@ -29,9 +29,13 @@ public class LoadBalancerServiceMock implements LoadBalancerService {
}
@Override
- public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) {
- LoadBalancerId id = new LoadBalancerId(application, cluster);
- LoadBalancerInstance instance = new LoadBalancerInstance(
+ public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals, boolean force) {
+ var id = new LoadBalancerId(application, cluster);
+ var oldInstance = instances.get(id);
+ if (!force && oldInstance != null && !oldInstance.reals().isEmpty() && reals.isEmpty()) {
+ throw new IllegalArgumentException("Refusing to remove all reals from load balancer " + id);
+ }
+ var instance = new LoadBalancerInstance(
HostName.from("lb-" + application.toShortString() + "-" + cluster.value()),
Optional.of(new DnsZone("zone-id-1")),
Collections.singleton(4443),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
index 87b7c73386e..331ffe7e202 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
@@ -32,7 +32,7 @@ public class SharedLoadBalancerService implements LoadBalancerService {
}
@Override
- public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) {
+ public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals, boolean force) {
final var proxyNodes = nodeRepository.getNodes(NodeType.proxy);
proxyNodes.sort(hostnameComparator);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
index 96c8fe21959..c1a05a3c32d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
@@ -5,10 +5,12 @@ import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Deployment;
+import com.yahoo.config.provision.TransientException;
import com.yahoo.log.LogLevel;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.time.Instant;
@@ -88,6 +90,8 @@ public abstract class ApplicationMaintainer extends Maintainer {
if ( ! deployment.isPresent()) return; // this will be done at another config server
log.log(LogLevel.DEBUG, this.getClass().getSimpleName() + " deploying " + application);
deployment.get().activate();
+ } catch (TransientException e) {
+ log.log(LogLevel.INFO, "Failed to redeploy " + application + " with a transient error: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception on maintenance redeploy", e);
} finally {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
index 684f6dbcd50..d7f41c4d8e2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
@@ -49,7 +49,7 @@ import java.util.stream.Collectors;
*/
public class FailedExpirer extends Maintainer {
- private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
+ private static final Logger log = Logger.getLogger(FailedExpirer.class.getName());
private static final int maxAllowedFailures = 5; // Stop recycling nodes after this number of failures
private final NodeRepository nodeRepository;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index d6b392c4d64..7d7f8d479fe 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -6,6 +6,7 @@ import com.yahoo.log.LogLevel;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
+import com.yahoo.vespa.hosted.provision.lb.LoadBalancer.State;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
@@ -17,15 +18,21 @@ import java.util.Objects;
import java.util.stream.Collectors;
/**
- * Periodically remove inactive load balancers permanently.
+ * Periodically expire load balancers.
*
- * When an application is removed, any associated load balancers are only deactivated. This maintainer ensures that
- * underlying load balancer instances are eventually freed.
+ * Load balancers expire from the following states:
+ *
+ * {@link LoadBalancer.State#inactive}: An application is removed and load balancers are deactivated.
+ * {@link LoadBalancer.State#reserved}: An prepared application is never successfully activated, thus never activating
+ * any prepared load balancers.
*
* @author mpolden
*/
public class LoadBalancerExpirer extends Maintainer {
+ private static final Duration reservedExpiry = Duration.ofHours(1);
+ private static final Duration inactiveExpiry = Duration.ofHours(1);
+
private final LoadBalancerService service;
private final CuratorDatabaseClient db;
@@ -37,22 +44,39 @@ public class LoadBalancerExpirer extends Maintainer {
@Override
protected void maintain() {
+ expireReserved();
removeInactive();
}
+ private void expireReserved() {
+ try (Lock lock = db.lockLoadBalancers()) {
+ var now = nodeRepository().clock().instant();
+ var expirationTime = now.minus(reservedExpiry);
+ var expired = nodeRepository().loadBalancers()
+ .in(State.reserved)
+ .changedBefore(expirationTime);
+ expired.forEach(lb -> db.writeLoadBalancer(lb.with(State.inactive, now)));
+ }
+ }
+
private void removeInactive() {
List<LoadBalancerId> failed = new ArrayList<>();
Exception lastException = null;
try (Lock lock = db.lockLoadBalancers()) {
- for (LoadBalancer loadBalancer : nodeRepository().loadBalancers().inactive().asList()) {
- if (hasNodes(loadBalancer.id().application())) { // Defer removal if there are still nodes allocated to application
+ var now = nodeRepository().clock().instant();
+ var expirationTime = now.minus(inactiveExpiry);
+ var expired = nodeRepository().loadBalancers()
+ .in(State.inactive)
+ .changedBefore(expirationTime);
+ for (var lb : expired) {
+ if (hasNodes(lb.id().application())) { // Defer removal if there are still nodes allocated to application
continue;
}
try {
- service.remove(loadBalancer.id().application(), loadBalancer.id().cluster());
- db.removeLoadBalancer(loadBalancer.id());
+ service.remove(lb.id().application(), lb.id().cluster());
+ db.removeLoadBalancer(lb.id());
} catch (Exception e) {
- failed.add(loadBalancer.id());
+ failed.add(lb.id());
lastException = e;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index b7e8395cc92..a7b750c4e46 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -5,7 +5,9 @@ import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Deployment;
import com.yahoo.config.provision.HostLivenessTracker;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.TransientException;
import com.yahoo.jdisc.Metric;
+import com.yahoo.log.LogLevel;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.applicationmodel.ServiceInstance;
@@ -21,6 +23,7 @@ import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
import com.yahoo.vespa.orchestrator.status.HostStatus;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
+import com.yahoo.yolean.Exceptions;
import java.time.Clock;
import java.time.Duration;
@@ -368,8 +371,11 @@ public class NodeFailer extends Maintainer {
try {
deployment.get().activate();
return true;
- }
- catch (RuntimeException e) {
+ } catch (TransientException e) {
+ log.log(LogLevel.INFO, "Failed to redeploy " + node.allocation().get().owner() +
+ " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e));
+ return true;
+ } catch (RuntimeException e) {
// The expected reason for deployment to fail here is that there is no capacity available to redeploy.
// In that case we should leave the node in the active state to avoid failing additional nodes.
nodeRepository().reactivate(node.hostname(), Agent.system,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 25549abe9ed..b9b1200d473 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -7,16 +7,10 @@ import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostLivenessTracker;
import com.yahoo.config.provision.InfraDeployer;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetireIPv4OnlyNodes;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetirementPolicy;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetirementPolicyList;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorSpareChecker;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorSpareCount;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisionServiceProvider;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
@@ -47,7 +41,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final DirtyExpirer dirtyExpirer;
private final ProvisionedExpirer provisionedExpirer;
private final NodeRebooter nodeRebooter;
- private final NodeRetirer nodeRetirer;
private final MetricsReporter metricsReporter;
private final InfrastructureProvisioner infrastructureProvisioner;
private final Optional<LoadBalancerExpirer> loadBalancerExpirer;
@@ -72,7 +65,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
nodeFailer = new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, durationFromEnv("fail_grace").orElse(defaults.failGrace), clock, orchestrator, throttlePolicyFromEnv().orElse(defaults.throttlePolicy), metric);
periodicApplicationMaintainer = new PeriodicApplicationMaintainer(deployer, nodeRepository, defaults.redeployMaintainerInterval, durationFromEnv("periodic_redeploy_interval").orElse(defaults.periodicRedeployInterval));
- operatorChangeApplicationMaintainer = new OperatorChangeApplicationMaintainer(deployer, nodeRepository, clock, durationFromEnv("operator_change_redeploy_interval").orElse(defaults.operatorChangeRedeployInterval));
+ operatorChangeApplicationMaintainer = new OperatorChangeApplicationMaintainer(deployer, nodeRepository, durationFromEnv("operator_change_redeploy_interval").orElse(defaults.operatorChangeRedeployInterval));
reservationExpirer = new ReservationExpirer(nodeRepository, clock, durationFromEnv("reservation_expiry").orElse(defaults.reservationExpiry));
retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, durationFromEnv("retired_interval").orElse(defaults.retiredInterval), durationFromEnv("retired_expiry").orElse(defaults.retiredExpiry));
inactiveExpirer = new InactiveExpirer(nodeRepository, clock, durationFromEnv("inactive_expiry").orElse(defaults.inactiveExpiry));
@@ -83,7 +76,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
metricsReporter = new MetricsReporter(nodeRepository, metric, orchestrator, serviceMonitor, periodicApplicationMaintainer::pendingDeployments, durationFromEnv("metrics_interval").orElse(defaults.metricsInterval));
infrastructureProvisioner = new InfrastructureProvisioner(nodeRepository, infraDeployer, durationFromEnv("infrastructure_provision_interval").orElse(defaults.infrastructureProvisionInterval));
loadBalancerExpirer = provisionServiceProvider.getLoadBalancerService().map(lbService ->
- new LoadBalancerExpirer(nodeRepository, durationFromEnv("load_balancer_expiry").orElse(defaults.loadBalancerExpiry), lbService));
+ new LoadBalancerExpirer(nodeRepository, durationFromEnv("load_balancer_expirer_interval").orElse(defaults.loadBalancerExpirerInterval), lbService));
hostProvisionMaintainer = provisionServiceProvider.getHostProvisioner().map(hostProvisioner ->
new HostProvisionMaintainer(nodeRepository, durationFromEnv("host_provisioner_interval").orElse(defaults.hostProvisionerInterval), hostProvisioner, flagSource));
hostDeprovisionMaintainer = provisionServiceProvider.getHostProvisioner().map(hostProvisioner ->
@@ -91,11 +84,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
// The DuperModel is filled with infrastructure applications by the infrastructure provisioner, so explicitly run that now
infrastructureProvisioner.maintain();
-
- RetirementPolicy policy = new RetirementPolicyList(new RetireIPv4OnlyNodes(zone));
- FlavorSpareChecker flavorSpareChecker = new FlavorSpareChecker(
- NodeRetirer.SPARE_NODES_POLICY, FlavorSpareCount.constructFlavorSpareCountGraph(zone.nodeFlavors().get().getFlavors()));
- nodeRetirer = new NodeRetirer(nodeRepository, flavorSpareChecker, durationFromEnv("retire_interval").orElse(defaults.nodeRetirerInterval), deployer, policy);
}
@Override
@@ -109,7 +97,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
failedExpirer.deconstruct();
dirtyExpirer.deconstruct();
nodeRebooter.deconstruct();
- nodeRetirer.deconstruct();
provisionedExpirer.deconstruct();
metricsReporter.deconstruct();
infrastructureProvisioner.deconstruct();
@@ -153,11 +140,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final Duration dirtyExpiry;
private final Duration provisionedExpiry;
private final Duration rebootInterval;
- private final Duration nodeRetirerInterval;
private final Duration metricsInterval;
private final Duration retiredInterval;
private final Duration infrastructureProvisionInterval;
- private final Duration loadBalancerExpiry;
+ private final Duration loadBalancerExpirerInterval;
private final Duration hostProvisionerInterval;
private final Duration hostDeprovisionerInterval;
@@ -167,16 +153,15 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
failGrace = Duration.ofMinutes(30);
periodicRedeployInterval = Duration.ofMinutes(30);
// Don't redeploy in test environments
- redeployMaintainerInterval = zone.environment().isTest() ? Duration.ofDays(1) : Duration.ofMinutes(1);
+ redeployMaintainerInterval = Duration.ofMinutes(1);
operatorChangeRedeployInterval = Duration.ofMinutes(1);
failedExpirerInterval = Duration.ofMinutes(10);
provisionedExpiry = Duration.ofHours(4);
rebootInterval = Duration.ofDays(30);
- nodeRetirerInterval = Duration.ofMinutes(30);
metricsInterval = Duration.ofMinutes(1);
infrastructureProvisionInterval = Duration.ofMinutes(1);
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
- loadBalancerExpiry = Duration.ofHours(1);
+ loadBalancerExpirerInterval = Duration.ofMinutes(10);
reservationExpiry = Duration.ofMinutes(20); // Need to be long enough for deployment to be finished for all config model versions
hostProvisionerInterval = Duration.ofMinutes(5);
hostDeprovisionerInterval = Duration.ofMinutes(5);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirer.java
deleted file mode 100644
index 0245f2a92a3..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirer.java
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Deployer;
-import com.yahoo.config.provision.Deployment;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.log.LogLevel;
-import com.yahoo.transaction.Mutex;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetirementPolicy;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorSpareChecker;
-
-import java.time.Duration;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.logging.Logger;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * Automatically retires ready and active nodes if they meet a certain criteria given by the {@link RetirementPolicy}
- * and if there are enough remaining nodes to both replace the retiring node as well as to keep enough in spare.
- *
- * @author freva
- */
-public class NodeRetirer extends Maintainer {
-
- public static final FlavorSpareChecker.SpareNodesPolicy SPARE_NODES_POLICY = flavorSpareCount ->
- flavorSpareCount.getNumReadyAmongReplacees() > 2;
-
- private static final long MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER = 1;
- private static final Logger log = Logger.getLogger(NodeRetirer.class.getName());
-
- private final Deployer deployer;
- private final FlavorSpareChecker flavorSpareChecker;
- private final RetirementPolicy retirementPolicy;
-
- NodeRetirer(NodeRepository nodeRepository, FlavorSpareChecker flavorSpareChecker, Duration interval,
- Deployer deployer, RetirementPolicy retirementPolicy) {
- super(nodeRepository, interval);
- this.deployer = deployer;
- this.retirementPolicy = retirementPolicy;
- this.flavorSpareChecker = flavorSpareChecker;
- }
-
- @Override
- protected void maintain() {
- if (! retirementPolicy.isActive()) return;
-
- if (retireUnallocated()) {
- retireAllocated();
- }
- }
-
- /**
- * Retires unallocated nodes by moving them directly to parked.
- * Returns true iff all there are no unallocated nodes that match the retirement policy
- */
- boolean retireUnallocated() {
- try (Mutex lock = nodeRepository().lockAllocation()) {
- List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
- Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
- flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
-
- long numFlavorsWithUnsuccessfullyRetiredNodes = allNodes.stream()
- .filter(node -> node.state() == Node.State.ready)
- .filter(node -> retirementPolicy.shouldRetire(node).isPresent())
- .collect(Collectors.groupingBy(
- Node::flavor,
- Collectors.toSet()))
- .entrySet().stream()
- .filter(entry -> {
- Set<Node> nodesThatShouldBeRetiredForFlavor = entry.getValue();
- for (Iterator<Node> iter = nodesThatShouldBeRetiredForFlavor.iterator(); iter.hasNext(); ) {
- Node nodeToRetire = iter.next();
- if (! flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(nodeToRetire.flavor())) break;
-
- retirementPolicy.shouldRetire(nodeToRetire).ifPresent(reason -> {
- nodeRepository().write(nodeToRetire.with(nodeToRetire.status().withWantToDeprovision(true)), lock);
- nodeRepository().park(nodeToRetire.hostname(), false, Agent.NodeRetirer, reason);
- iter.remove();
- });
- }
-
- if (! nodesThatShouldBeRetiredForFlavor.isEmpty()) {
- String commaSeparatedHostnames = nodesThatShouldBeRetiredForFlavor.stream().map(Node::hostname)
- .collect(Collectors.joining(", "));
- log.info(String.format("Failed to retire %s, wanted to retire %d nodes (%s), but there are no spare nodes left.",
- entry.getKey(), nodesThatShouldBeRetiredForFlavor.size(), commaSeparatedHostnames));
- }
- return ! nodesThatShouldBeRetiredForFlavor.isEmpty();
- }).count();
-
- return numFlavorsWithUnsuccessfullyRetiredNodes == 0;
- }
- }
-
- void retireAllocated() {
- List<Node> allNodes = nodeRepository().getNodes(NodeType.tenant);
- List<ApplicationId> activeApplications = getActiveApplicationIds(allNodes);
- Map<Flavor, Map<Node.State, Long>> numSpareNodesByFlavorByState = getNumberOfNodesByFlavorByNodeState(allNodes);
- flavorSpareChecker.updateReadyAndActiveCountsByFlavor(numSpareNodesByFlavorByState);
-
- // Get all the nodes that we could retire along with their deployments
- Map<Deployment, Set<Node>> nodesToRetireByDeployment = new HashMap<>();
- for (ApplicationId applicationId : activeApplications) {
- Map<ClusterSpec.Id, Set<Node>> nodesByCluster = getNodesBelongingToApplication(allNodes, applicationId).stream()
- .collect(Collectors.groupingBy(
- node -> node.allocation().get().membership().cluster().id(),
- Collectors.toSet()));
- Map<ClusterSpec.Id, Set<Node>> retireableNodesByCluster = nodesByCluster.entrySet().stream()
- .collect(Collectors.toMap(
- Map.Entry::getKey,
- entry -> filterRetireableNodes(entry.getValue())));
- if (retireableNodesByCluster.values().stream().mapToInt(Set::size).sum() == 0) continue;
-
- Optional<Deployment> deployment = deployer.deployFromLocalActive(applicationId);
- if ( ! deployment.isPresent()) continue; // this will be done at another config server
-
- Set<Node> replaceableNodes = retireableNodesByCluster.entrySet().stream()
- .flatMap(entry -> entry.getValue().stream()
- .filter(node -> flavorSpareChecker.canRetireAllocatedNodeWithFlavor(node.flavor()))
- .limit(getNumberNodesAllowToRetireForCluster(nodesByCluster.get(entry.getKey()), MAX_SIMULTANEOUS_RETIRES_PER_CLUSTER)))
- .collect(Collectors.toSet());
- if (! replaceableNodes.isEmpty()) nodesToRetireByDeployment.put(deployment.get(), replaceableNodes);
- }
-
- nodesToRetireByDeployment.forEach(((deployment, nodes) -> {
- ApplicationId app = nodes.iterator().next().allocation().get().owner();
- Set<Node> nodesToRetire;
-
- // While under application lock, get up-to-date node, and make sure that the state and the owner of the
- // node has not changed in the meantime, mutate the up-to-date node (so to not overwrite other fields
- // that may have changed) with wantToRetire and wantToDeprovision.
- try (Mutex lock = nodeRepository().lock(app)) {
- nodesToRetire = nodes.stream()
- .map(node ->
- nodeRepository().getNode(node.hostname())
- .filter(upToDateNode -> node.state() == Node.State.active)
- .filter(upToDateNode -> node.allocation().get().owner().equals(upToDateNode.allocation().get().owner())))
- .flatMap(node -> node.map(Stream::of).orElseGet(Stream::empty))
- .collect(Collectors.toSet());
-
- nodesToRetire.forEach(node ->
- retirementPolicy.shouldRetire(node).ifPresent(reason -> {
- log.info("Setting wantToRetire and wantToDeprovision for host " + node.hostname() +
- " with flavor " + node.flavor().name() +
- " allocated to " + node.allocation().get().owner() + ". Reason: " + reason);
-
- Node updatedNode = node.with(node.status()
- .withWantToRetire(true)
- .withWantToDeprovision(true));
- nodeRepository().write(updatedNode, lock);
- }));
- }
-
- // This takes a while, so do it outside of the application lock
- if (! nodesToRetire.isEmpty()) {
- try {
- deployment.activate();
- } catch (Exception e) {
- log.log(LogLevel.INFO, "Failed to redeploy " + app.serializedForm() + ", will be redeployed later by application maintainer", e);
- }
- }
- }));
- }
-
- private List<Node> getNodesBelongingToApplication(Collection<Node> allNodes, ApplicationId applicationId) {
- return allNodes.stream()
- .filter(node -> node.allocation().isPresent())
- .filter(node -> node.allocation().get().owner().equals(applicationId))
- .collect(Collectors.toList());
- }
-
- /**
- * Returns a list of ApplicationIds sorted by number of active nodes the application has allocated to it
- */
- List<ApplicationId> getActiveApplicationIds(Collection<Node> nodes) {
- return nodes.stream()
- .filter(node -> node.state() == Node.State.active)
- .collect(Collectors.groupingBy(
- node -> node.allocation().get().owner(),
- Collectors.counting()))
- .entrySet().stream()
- .sorted((c1, c2) -> c2.getValue().compareTo(c1.getValue()))
- .map(Map.Entry::getKey)
- .collect(Collectors.toList());
- }
-
- /**
- * @param nodes Collection of nodes that are considered for retirement
- * @return Set of nodes that all should eventually be retired
- */
- Set<Node> filterRetireableNodes(Collection<Node> nodes) {
- return nodes.stream()
- .filter(node -> node.state() == Node.State.active)
- .filter(node -> !node.status().wantToRetire())
- .filter(node -> retirementPolicy.shouldRetire(node).isPresent())
- .collect(Collectors.toSet());
- }
-
- /**
- * @param clusterNodes All the nodes allocated to an application belonging to a single cluster
- * @return number of nodes we can safely start retiring
- */
- long getNumberNodesAllowToRetireForCluster(Collection<Node> clusterNodes, long maxSimultaneousRetires) {
- long numNodesInWantToRetire = clusterNodes.stream()
- .filter(node -> node.status().wantToRetire())
- .filter(node -> node.state() != Node.State.parked)
- .count();
- return Math.max(0, maxSimultaneousRetires - numNodesInWantToRetire);
- }
-
- private Map<Flavor, Map<Node.State, Long>> getNumberOfNodesByFlavorByNodeState(Collection<Node> allNodes) {
- return allNodes.stream()
- .collect(Collectors.groupingBy(
- Node::flavor,
- Collectors.groupingBy(Node::state, Collectors.counting())));
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
index 46571fd0deb..ab7a565688e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
@@ -7,12 +7,12 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.node.Allocation;
+import com.yahoo.vespa.hosted.provision.node.History;
-import java.time.Clock;
import java.time.Duration;
-import java.time.Instant;
import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -28,31 +28,25 @@ import java.util.stream.Collectors;
* @author bratseth
*/
public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
-
- private final Clock clock;
-
- private Instant previousRun;
- OperatorChangeApplicationMaintainer(Deployer deployer, NodeRepository nodeRepository, Clock clock, Duration interval) {
+ OperatorChangeApplicationMaintainer(Deployer deployer, NodeRepository nodeRepository, Duration interval) {
super(deployer, nodeRepository, interval);
- this.clock = clock;
- previousRun = clock.instant(); // Changes before this will be caught by the first PeriodicApplicationMaintainer run
}
@Override
protected Set<ApplicationId> applicationsNeedingMaintenance() {
- Instant windowEnd = clock.instant();
- Instant windowStart = previousRun;
- previousRun = windowEnd;
- return nodeRepository().getNodes(NodeType.tenant).stream()
- .filter(node -> hasManualStateChangeSince(windowStart, node))
- .flatMap(node -> node.allocation().map(Allocation::owner).stream())
- .collect(Collectors.toCollection(LinkedHashSet::new));
- }
-
- private boolean hasManualStateChangeSince(Instant instant, Node node) {
- return node.history().events().stream()
- .anyMatch(event -> event.agent() == Agent.operator && event.at().isAfter(instant));
+ Map<ApplicationId, List<Node>> nodesByApplication = nodeRepository().getNodes(NodeType.tenant).stream()
+ .filter(node -> node.allocation().isPresent())
+ .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.toList()));
+
+ return nodesByApplication.entrySet().stream()
+ .filter(entry -> entry.getValue().stream()
+ .flatMap(node -> node.history().events().stream())
+ .filter(event -> event.agent() == Agent.operator)
+ .map(History.Event::at)
+ .anyMatch(getLastDeployTime(entry.getKey())::isBefore))
+ .map(Map.Entry::getKey)
+ .collect(Collectors.toCollection(LinkedHashSet::new));
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
index 174591b0836..6ab85e76ba2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
@@ -9,7 +9,6 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
-import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
@@ -40,21 +39,24 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer {
@Override
protected boolean canDeployNow(ApplicationId application) {
- // Don't deploy if a regular deploy just happened
- return getLastDeployTime(application).isBefore(nodeRepository().clock().instant().minus(minTimeBetweenRedeployments));
+ return deployer().lastDeployTime(application)
+ // Don't deploy if a regular deploy just happened
+ .map(lastDeployTime -> lastDeployTime.isBefore(nodeRepository().clock().instant().minus(minTimeBetweenRedeployments)))
+ // We only know last deploy time for applications that were deployed on this config server,
+ // the rest will be deployed on another config server
+ .orElse(false);
}
// Returns the applications that need to be redeployed by this config server at this point in time.
@Override
protected Set<ApplicationId> applicationsNeedingMaintenance() {
- if (waitInitially()) return Collections.emptySet();
+ if (waitInitially()) return Set.of();
// Collect all deployment times before sorting as deployments may happen while we build the set, breaking
// the comparable contract. Stale times are fine as the time is rechecked in ApplicationMaintainer#deployWithLock
Map<ApplicationId, Instant> deploymentTimes = nodesNeedingMaintenance().stream()
.map(node -> node.allocation().get().owner())
.distinct()
- .filter(this::shouldBeDeployedOnThisServer)
.filter(this::canDeployNow)
.collect(Collectors.toMap(Function.identity(), this::getLastDeployTime));
@@ -64,12 +66,6 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer {
.collect(Collectors.toCollection(LinkedHashSet::new));
}
- // We only know last deploy time for applications that were deployed on this config server,
- // the rest will be deployed on another config server
- protected boolean shouldBeDeployedOnThisServer(ApplicationId application) {
- return deployer().lastDeployTime(application).isPresent();
- }
-
// TODO: Do not start deploying until some time has gone (ideally only until bootstrap of config server is finished)
private boolean waitInitially() {
return clock.instant().isBefore(start.plus(minTimeBetweenRedeployments));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
index c27989cb852..dea0b8c19d0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
@@ -4,12 +4,15 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Deployment;
+import com.yahoo.config.provision.TransientException;
+import com.yahoo.log.LogLevel;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.orchestrator.OrchestrationException;
import com.yahoo.vespa.orchestrator.Orchestrator;
+import com.yahoo.yolean.Exceptions;
import java.time.Clock;
import java.time.Duration;
@@ -73,6 +76,9 @@ public class RetiredExpirer extends Maintainer {
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
+ } catch (TransientException e) {
+ log.log(LogLevel.INFO, "Failed to redeploy " + application +
+ " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e));
} catch (RuntimeException e) {
String nodeList = retiredNodes.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.log(Level.WARNING, "Exception trying to deactivate retired nodes from " + application
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java
deleted file mode 100644
index 6562a89c2d6..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance.retire;
-
-import com.google.common.net.InetAddresses;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.hosted.provision.Node;
-
-import java.net.Inet4Address;
-import java.util.Optional;
-
-/**
- * @author freva
- */
-public class RetireIPv4OnlyNodes implements RetirementPolicy {
- private final Zone zone;
-
- public RetireIPv4OnlyNodes(Zone zone) {
- this.zone = zone;
- }
-
- @Override
- public boolean isActive() {
- if(zone.system() == SystemName.cd) {
- return zone.environment() == Environment.dev || zone.environment() == Environment.prod;
- }
-
- if (zone.system() == SystemName.main) {
- if (zone.region().equals(RegionName.from("us-east-3"))) {
- return zone.environment() == Environment.perf || zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("us-west-1"))) {
- return zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("us-central-1"))) {
- return zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("ap-southeast-1"))) {
- return zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("ap-northeast-1"))) {
- return zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("ap-northeast-2"))) {
- return zone.environment() == Environment.prod;
- } else if (zone.region().equals(RegionName.from("eu-west-1"))) {
- return zone.environment() == Environment.prod;
- }
- }
-
- return false;
- }
-
- @Override
- public Optional<String> shouldRetire(Node node) {
- if (node.flavor().getType() == Flavor.Type.VIRTUAL_MACHINE) return Optional.empty();
- boolean shouldRetire = node.ipAddresses().stream()
- .map(InetAddresses::forString)
- .allMatch(address -> address instanceof Inet4Address);
-
- return shouldRetire ? Optional.of("Node is IPv4-only") : Optional.empty();
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicy.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicy.java
deleted file mode 100644
index ca0419f11c3..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicy.java
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance.retire;
-
-import com.yahoo.vespa.hosted.provision.Node;
-
-import java.util.Optional;
-
-/**
- * @author freva
- */
-public interface RetirementPolicy {
-
- /**
- * Returns whether the policy is currently active. NodeRetirer ask every time before executing.
- */
- boolean isActive();
-
- /**
- * Returns reason for retiring the node, empty if node should not be retired
- */
- Optional<String> shouldRetire(Node node);
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyCache.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyCache.java
deleted file mode 100644
index c112daadcc9..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyCache.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance.retire;
-
-import com.yahoo.vespa.hosted.provision.Node;
-
-import java.util.Optional;
-
-/**
- * @author freva
- */
-public class RetirementPolicyCache implements RetirementPolicy {
- private final RetirementPolicy policy;
- private final boolean isActiveCached;
-
- RetirementPolicyCache(RetirementPolicy policy) {
- this.policy = policy;
- this.isActiveCached = policy.isActive();
- }
-
- @Override
- public boolean isActive() {
- return isActiveCached;
- }
-
- public Optional<String> shouldRetire(Node node) {
- return policy.shouldRetire(node);
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyList.java
deleted file mode 100644
index 5f4d887b029..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetirementPolicyList.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance.retire;
-
-import com.yahoo.vespa.hosted.provision.Node;
-
-import java.util.List;
-import java.util.Optional;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * @author freva
- */
-public class RetirementPolicyList implements RetirementPolicy {
- private final List<RetirementPolicy> retirementPolicies;
-
- public RetirementPolicyList(RetirementPolicy... retirementPolicies) {
- this.retirementPolicies = Stream.of(retirementPolicies)
- .map(RetirementPolicyCache::new)
- .collect(Collectors.toList());
- }
-
- @Override
- public boolean isActive() {
- return retirementPolicies.stream().anyMatch(RetirementPolicy::isActive);
- }
-
- @Override
- public Optional<String> shouldRetire(Node node) {
- List<String> retirementReasons = retirementPolicies.stream()
- .filter(RetirementPolicy::isActive)
- .map(retirementPolicy -> retirementPolicy.shouldRetire(node))
- .flatMap(reason -> reason.map(Stream::of).orElse(Stream.empty()))
- .collect(Collectors.toList());
-
- return retirementReasons.isEmpty() ? Optional.empty() :
- Optional.of("[" + String.join(", ", retirementReasons) + "]");
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index 371ed4d2496..e0b192de74d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -62,7 +62,6 @@ public class CuratorDatabaseClient {
private static final Path root = Path.fromString("/provision/v1");
private static final Path lockRoot = root.append("locks");
private static final Path loadBalancersRoot = root.append("loadBalancers");
- private static final Path flagsRoot = root.append("flags");
private static final Duration defaultLockTimeout = Duration.ofMinutes(2);
private final NodeSerializer nodeSerializer;
@@ -75,7 +74,6 @@ public class CuratorDatabaseClient {
public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, boolean useCache) {
this.nodeSerializer = new NodeSerializer(flavors);
this.zone = zone;
- curator.delete(flagsRoot); // TODO: Remove after 7.42 has been released
this.curatorDatabase = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter").getAbsolute());
@@ -107,7 +105,7 @@ public class CuratorDatabaseClient {
CuratorTransaction curatorTransaction = curatorDatabase.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
- throw new IllegalArgumentException(node + " is not in the " + node.state() + " state");
+ throw new IllegalArgumentException(node + " is not in the " + expectedState + " state");
node = node.with(node.history().recordStateTransition(null, expectedState, Agent.system, clock.instant()));
curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), nodeSerializer.toJson(node)));
@@ -473,23 +471,28 @@ public class CuratorDatabaseClient {
// Load balancers
- public Map<LoadBalancerId, LoadBalancer> readLoadBalancers() {
+ public List<LoadBalancerId> readLoadBalancerIds() {
return curatorDatabase.getChildren(loadBalancersRoot).stream()
.map(LoadBalancerId::fromSerializedForm)
- .map(this::readLoadBalancer)
- .filter(Optional::isPresent)
- .map(Optional::get)
- .collect(collectingAndThen(toMap(LoadBalancer::id, Function.identity()),
- Collections::unmodifiableMap));
+ .collect(Collectors.toUnmodifiableList());
+ }
+
+ public Map<LoadBalancerId, LoadBalancer> readLoadBalancers() {
+ return readLoadBalancerIds().stream()
+ .map(this::readLoadBalancer)
+ .filter(Optional::isPresent)
+ .map(Optional::get)
+ .collect(collectingAndThen(toMap(LoadBalancer::id, Function.identity()),
+ Collections::unmodifiableMap));
}
- private Optional<LoadBalancer> readLoadBalancer(LoadBalancerId id) {
+ public Optional<LoadBalancer> readLoadBalancer(LoadBalancerId id) {
return read(loadBalancerPath(id), LoadBalancerSerializer::fromJson);
}
public void writeLoadBalancer(LoadBalancer loadBalancer) {
NestedTransaction transaction = new NestedTransaction();
- writeLoadBalancers(Collections.singletonList(loadBalancer), transaction);
+ writeLoadBalancers(List.of(loadBalancer), transaction);
transaction.commit();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
index fd2294c1b5d..ae4c93621e5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
@@ -15,9 +15,9 @@ import com.yahoo.vespa.hosted.provision.lb.Real;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.time.Instant;
import java.util.LinkedHashSet;
import java.util.Optional;
-import java.util.Set;
import java.util.function.Function;
/**
@@ -36,12 +36,12 @@ public class LoadBalancerSerializer {
private static final String idField = "id";
private static final String hostnameField = "hostname";
+ private static final String stateField = "state";
+ private static final String changedAtField = "changedAt";
private static final String dnsZoneField = "dnsZone";
- private static final String inactiveField = "inactive";
private static final String portsField = "ports";
private static final String networksField = "networks";
private static final String realsField = "reals";
- private static final String nameField = "name";
private static final String ipAddressField = "ipAddress";
private static final String portField = "port";
@@ -51,6 +51,8 @@ public class LoadBalancerSerializer {
root.setString(idField, loadBalancer.id().serializedForm());
root.setString(hostnameField, loadBalancer.instance().hostname().toString());
+ root.setString(stateField, asString(loadBalancer.state()));
+ root.setLong(changedAtField, loadBalancer.changedAt().toEpochMilli());
loadBalancer.instance().dnsZone().ifPresent(dnsZone -> root.setString(dnsZoneField, dnsZone.id()));
Cursor portArray = root.setArray(portsField);
loadBalancer.instance().ports().forEach(portArray::addLong);
@@ -63,8 +65,6 @@ public class LoadBalancerSerializer {
realObject.setString(ipAddressField, real.ipAddress());
realObject.setLong(portField, real.port());
});
- root.setBool(inactiveField, loadBalancer.inactive());
-
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
@@ -75,7 +75,7 @@ public class LoadBalancerSerializer {
public static LoadBalancer fromJson(byte[] data) {
Cursor object = SlimeUtils.jsonToSlime(data).get();
- Set<Real> reals = new LinkedHashSet<>();
+ var reals = new LinkedHashSet<Real>();
object.field(realsField).traverse((ArrayTraverser) (i, realObject) -> {
reals.add(new Real(HostName.from(realObject.field(hostnameField).asString()),
realObject.field(ipAddressField).asString(),
@@ -83,25 +83,48 @@ public class LoadBalancerSerializer {
});
- Set<Integer> ports = new LinkedHashSet<>();
+ var ports = new LinkedHashSet<Integer>();
object.field(portsField).traverse((ArrayTraverser) (i, port) -> ports.add((int) port.asLong()));
- Set<String> networks = new LinkedHashSet<>();
+ var networks = new LinkedHashSet<String>();
object.field(networksField).traverse((ArrayTraverser) (i, network) -> networks.add(network.asString()));
return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()),
new LoadBalancerInstance(
HostName.from(object.field(hostnameField).asString()),
- optionalField(object.field(dnsZoneField), DnsZone::new),
+ optionalString(object.field(dnsZoneField), DnsZone::new),
ports,
networks,
reals
),
- object.field(inactiveField).asBool());
+ stateFromString(object.field(stateField).asString()),
+ Instant.ofEpochMilli(object.field(changedAtField).asLong()));
+ }
+
+ private static <T> Optional<T> optionalValue(Inspector field, Function<Inspector, T> fieldMapper) {
+ return Optional.of(field).filter(Inspector::valid).map(fieldMapper);
+ }
+
+ private static <T> Optional<T> optionalString(Inspector field, Function<String, T> fieldMapper) {
+ return optionalValue(field, Inspector::asString).map(fieldMapper);
}
- private static <T> Optional<T> optionalField(Inspector field, Function<String, T> fieldMapper) {
- return Optional.of(field).filter(Inspector::valid).map(Inspector::asString).map(fieldMapper);
+ private static String asString(LoadBalancer.State state) {
+ switch (state) {
+ case active: return "active";
+ case inactive: return "inactive";
+ case reserved: return "reserved";
+ default: throw new IllegalArgumentException("No serialization defined for state enum '" + state + "'");
+ }
+ }
+
+ private static LoadBalancer.State stateFromString(String state) {
+ switch (state) {
+ case "active": return LoadBalancer.State.active;
+ case "inactive": return LoadBalancer.State.inactive;
+ case "reserved": return LoadBalancer.State.reserved;
+ default: throw new IllegalArgumentException("No serialization defined for state string '" + state + "'");
+ }
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index 424889caf72..45fb1e050a7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -385,7 +385,7 @@ public class NodeSerializer {
case "application" : return Agent.application;
case "system" : return Agent.system;
case "operator" : return Agent.operator;
- case "NodeRetirer" : return Agent.NodeRetirer;
+ case "NodeRetirer" : return Agent.system; // TODO: Remove after 7.67
case "NodeFailer" : return Agent.NodeFailer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
@@ -395,7 +395,7 @@ public class NodeSerializer {
case application : return "application";
case system : return "system";
case operator : return "operator";
- case NodeRetirer : return "NodeRetirer";
+ case NodeRetirer : return "system"; // TODO: Remove after 7.67
case NodeFailer : return "NodeFailer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 4626a600d2c..1e83c2c9176 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -2,6 +2,8 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.transaction.Mutex;
@@ -22,16 +24,26 @@ import java.util.function.Function;
import java.util.stream.Collectors;
/**
- * Performs activation of nodes for an application
+ * Performs activation of resources for an application. E.g. nodes or load balancers.
*
* @author bratseth
*/
class Activator {
private final NodeRepository nodeRepository;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
- public Activator(NodeRepository nodeRepository) {
+ public Activator(NodeRepository nodeRepository, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
+ }
+
+ /** Activate required resources for given application */
+ public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
+ try (Mutex lock = nodeRepository.lock(application)) {
+ activateNodes(application, hosts, transaction, lock);
+ activateLoadBalancers(application, hosts, lock);
+ }
}
/**
@@ -46,36 +58,50 @@ class Activator {
* @param transaction Transaction with operations to commit together with any operations done within the repository.
* @param application the application to allocate nodes for
* @param hosts the hosts to make the set of active nodes of this
+ * @param applicationLock application lock that must be held when calling this
*/
- public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
- try (Mutex lock = nodeRepository.lock(application)) {
- Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
- NodeList allNodes = nodeRepository.list();
- NodeList applicationNodes = allNodes.owner(application);
-
- List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
- List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
- List<Node> active = applicationNodes.state(Node.State.active).asList();
- List<Node> continuedActive = retainHostsInList(hostnames, active);
- List<Node> allActive = new ArrayList<>(continuedActive);
- allActive.addAll(reservedToActivate);
- if ( ! containsAll(hostnames, allActive))
- throw new IllegalArgumentException("Activation of " + application + " failed. " +
- "Could not find all requested hosts." +
- "\nRequested: " + hosts +
- "\nReserved: " + toHostNames(reserved) +
- "\nActive: " + toHostNames(active) +
- "\nThis might happen if the time from reserving host to activation takes " +
- "longer time than reservation expiry (the hosts will then no longer be reserved)");
-
- validateParentHosts(application, allNodes, reservedToActivate);
-
- List<Node> activeToRemove = removeHostsFromList(hostnames, active);
- activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
- nodeRepository.deactivate(activeToRemove, transaction);
- nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
- nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
- }
+ private void activateNodes(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
+ NodeList allNodes = nodeRepository.list();
+ NodeList applicationNodes = allNodes.owner(application);
+
+ List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
+ List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
+ List<Node> active = applicationNodes.state(Node.State.active).asList();
+ List<Node> continuedActive = retainHostsInList(hostnames, active);
+ List<Node> allActive = new ArrayList<>(continuedActive);
+ allActive.addAll(reservedToActivate);
+ if (!containsAll(hostnames, allActive))
+ throw new IllegalArgumentException("Activation of " + application + " failed. " +
+ "Could not find all requested hosts." +
+ "\nRequested: " + hosts +
+ "\nReserved: " + toHostNames(reserved) +
+ "\nActive: " + toHostNames(active) +
+ "\nThis might happen if the time from reserving host to activation takes " +
+ "longer time than reservation expiry (the hosts will then no longer be reserved)");
+
+ validateParentHosts(application, allNodes, reservedToActivate);
+
+ List<Node> activeToRemove = removeHostsFromList(hostnames, active);
+ activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
+ nodeRepository.deactivate(activeToRemove, transaction);
+ nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
+ nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
+ }
+
+ /** Activate load balancers */
+ private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts)));
+ }
+
+ private static List<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
+ return hosts.stream()
+ .map(HostSpec::membership)
+ .flatMap(Optional::stream)
+ .map(ClusterMembership::cluster)
+ .collect(Collectors.toUnmodifiableList());
}
private static void validateParentHosts(ApplicationId application, NodeList nodes, List<Node> potentialChildren) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareChecker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareChecker.java
deleted file mode 100644
index 5f81fed2a04..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareChecker.java
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.vespa.hosted.provision.Node;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * This class helps answer the question if there are enough nodes to retire a node with flavor f by:
- * <ul>
- * <li>Finding all the possible flavors that the replacement node could end up on</li>
- * <li>Making sure that regardless of which flavor it ends up on, there is still enough spare nodes
- * to handle at unexpected node failures.</li>
- * </ul>
- * <p>
- * Definitions:
- * <ul>
- * <li>Wanted flavor: The flavor that is the node prefers, for example by specifying in services.xml</li>
- * <li>Node-repo flavor: The flavor that the node actually has (Either the wanted flavor or a flavor that transitively
- * replaces the wanted flavor)</li>
- * <li>Replacee flavor: Flavor x is replacee of y iff x transitively replaces y</li>
- * <li>Immediate replacee flavor: Flavor x is an immediate replacee of flavor y iff x directly replaces y.</li>
- * </ul>
- *
- * @author freva
- */
-public class FlavorSpareChecker {
-
- private final SpareNodesPolicy spareNodesPolicy;
- private final Map<Flavor, FlavorSpareCount> spareCountByFlavor;
-
- public FlavorSpareChecker(SpareNodesPolicy spareNodesPolicy, Map<Flavor, FlavorSpareCount> spareCountByFlavor) {
- this.spareNodesPolicy = spareNodesPolicy;
- this.spareCountByFlavor = spareCountByFlavor;
- }
-
- public void updateReadyAndActiveCountsByFlavor(Map<Flavor, Map<Node.State, Long>> numberOfNodesByFlavorByState) {
- spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
- Map<Node.State, Long> numberOfNodesByState = numberOfNodesByFlavorByState.getOrDefault(flavor, Collections.emptyMap());
- flavorSpareCount.updateReadyAndActiveCounts(
- numberOfNodesByState.getOrDefault(Node.State.ready, 0L),
- numberOfNodesByState.getOrDefault(Node.State.active, 0L));
- });
- }
-
- public boolean canRetireAllocatedNodeWithFlavor(Flavor flavor) {
- Set<FlavorSpareCount> possibleNewFlavors = findPossibleReplacementFlavorFor(spareCountByFlavor.get(flavor));
- possibleNewFlavors.forEach(FlavorSpareCount::decrementNumberOfReady);
- return !possibleNewFlavors.isEmpty();
- }
-
- public boolean canRetireUnallocatedNodeWithFlavor(Flavor flavor) {
- FlavorSpareCount flavorSpareCount = spareCountByFlavor.get(flavor);
- if (flavorSpareCount.hasReady() && spareNodesPolicy.hasSpare(flavorSpareCount)) {
- flavorSpareCount.decrementNumberOfReady();
- return true;
- }
-
- return false;
- }
-
-
- /**
- * Returns a set of possible new flavors that can replace this flavor given current node allocation.
- * If the set is empty, there are not enough spare nodes to safely retire this flavor.
- * <p>
- * The algorithm is:
- * for all possible wanted flavor, check:
- * <ul>
- * <li>1: Sum of ready nodes of flavor f and all replacee flavors of f is &gt; reserved (set by {@link SpareNodesPolicy}</li>
- * <li>2a: Number of ready nodes of flavor f is &gt; 0</li>
- * <li>2b: Verify 1 &amp; 2 for all immediate replacee of f, f_i, where sum of ready nodes of f_i and all
- * replacee flavors of f_i is &gt; 0</li>
- * </ul>
- * Only 2a OR 2b need to be satisfied.
- */
- private Set<FlavorSpareCount> findPossibleReplacementFlavorFor(FlavorSpareCount flavorSpareCount) {
- Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
- for (FlavorSpareCount possibleWantedFlavor : flavorSpareCount.getPossibleWantedFlavors()) {
- Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleWantedFlavor);
- if (replacementFlavors.isEmpty()) return Collections.emptySet();
- else possibleReplacementFlavors.addAll(replacementFlavors);
- }
-
- return possibleReplacementFlavors;
- }
-
- private Set<FlavorSpareCount> verifyReplacementConditions(FlavorSpareCount flavorSpareCount) {
- Set<FlavorSpareCount> possibleReplacementFlavors = new HashSet<>();
- // Breaks condition 1, end
- if (! spareNodesPolicy.hasSpare(flavorSpareCount)) return Collections.emptySet();
-
- // Condition 2a
- if (flavorSpareCount.hasReady()) {
- possibleReplacementFlavors.add(flavorSpareCount);
-
- // Condition 2b
- } else {
- for (FlavorSpareCount possibleNewFlavor : flavorSpareCount.getImmediateReplacees()) {
- if (possibleNewFlavor.getNumReadyAmongReplacees() == 0) continue;
-
- Set<FlavorSpareCount> replacementFlavors = verifyReplacementConditions(possibleNewFlavor);
- if (replacementFlavors.isEmpty()) return Collections.emptySet();
- else possibleReplacementFlavors.addAll(replacementFlavors);
- }
- }
- return possibleReplacementFlavors;
- }
-
- public interface SpareNodesPolicy {
- boolean hasSpare(FlavorSpareCount flavorSpareCount);
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCount.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCount.java
deleted file mode 100644
index 217f4999bfb..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCount.java
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.config.provision.Flavor;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-/**
- * Keeps track of number of ready and active nodes for a flavor and its replaces neighbors
- *
- * @author freva
- */
-public class FlavorSpareCount {
-
- private final Flavor flavor;
- private Set<FlavorSpareCount> possibleWantedFlavors;
- private Set<FlavorSpareCount> immediateReplacees;
- private long numReady;
- private long numActive;
-
- public static Map<Flavor, FlavorSpareCount> constructFlavorSpareCountGraph(List<Flavor> flavors) {
- Map<Flavor, FlavorSpareCount> spareCountByFlavor = new HashMap<>();
- Map<Flavor, Set<Flavor>> immediateReplaceeFlavorsByFlavor = new HashMap<>();
- for (Flavor flavor : flavors) {
- for (Flavor replaces : flavor.replaces()) {
- if (! immediateReplaceeFlavorsByFlavor.containsKey(replaces)) {
- immediateReplaceeFlavorsByFlavor.put(replaces, new HashSet<>());
- }
- immediateReplaceeFlavorsByFlavor.get(replaces).add(flavor);
- }
-
- spareCountByFlavor.put(flavor, new FlavorSpareCount(flavor));
- }
-
- spareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
- flavorSpareCount.immediateReplacees = ! immediateReplaceeFlavorsByFlavor.containsKey(flavor) ?
- Collections.emptySet() :
- immediateReplaceeFlavorsByFlavor.get(flavor).stream().map(spareCountByFlavor::get).collect(Collectors.toSet());
- flavorSpareCount.possibleWantedFlavors = recursiveReplacements(flavor, new HashSet<>())
- .stream().map(spareCountByFlavor::get).collect(Collectors.toSet());
- });
-
- return spareCountByFlavor;
- }
-
- private static Set<Flavor> recursiveReplacements(Flavor flavor, Set<Flavor> replacements) {
- replacements.add(flavor);
- for (Flavor replaces : flavor.replaces()) {
- recursiveReplacements(replaces, replacements);
- }
-
- return replacements;
- }
-
- private FlavorSpareCount(Flavor flavor) {
- this.flavor = flavor;
- }
-
- public Flavor getFlavor() {
- return flavor;
- }
-
- void updateReadyAndActiveCounts(long numReady, long numActive) {
- this.numReady = numReady;
- this.numActive = numActive;
- }
-
- boolean hasReady() {
- return numReady > 0;
- }
-
- public long getNumReadyAmongReplacees() {
- long sumReadyNodes = numReady;
- for (FlavorSpareCount replacee : immediateReplacees) {
- sumReadyNodes += replacee.getNumReadyAmongReplacees();
- }
-
- return sumReadyNodes;
- }
-
- Set<FlavorSpareCount> getPossibleWantedFlavors() {
- return possibleWantedFlavors;
- }
-
- Set<FlavorSpareCount> getImmediateReplacees() {
- return immediateReplacees;
- }
-
- void decrementNumberOfReady() {
- numReady--;
- }
-
- @Override
- public String toString() {
- return flavor.name() + " has " + numReady + " ready nodes and " + numActive + " active nodes";
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 372dca84a53..4f0081b6a7f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.log.LogLevel;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
@@ -13,26 +14,32 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
+import com.yahoo.vespa.hosted.provision.lb.LoadBalancerServiceException;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
-import java.util.Collections;
-import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.logging.Logger;
import java.util.stream.Collectors;
/**
- * Provides provisioning of load balancers for applications.
+ * Provisions and configures application load balancers.
*
* @author mpolden
*/
+// Load balancer state transitions:
+// 1) (new) -> reserved -> active
+// 2) active | reserved -> inactive
+// 3) inactive -> active | (removed)
public class LoadBalancerProvisioner {
+ private static final Logger log = Logger.getLogger(LoadBalancerProvisioner.class.getName());
+
private final NodeRepository nodeRepository;
private final CuratorDatabaseClient db;
private final LoadBalancerService service;
@@ -41,47 +48,86 @@ public class LoadBalancerProvisioner {
this.nodeRepository = nodeRepository;
this.db = nodeRepository.database();
this.service = service;
+ // Read and write all load balancers to make sure they are stored in the latest version of the serialization format
+ try (var lock = db.lockLoadBalancers()) {
+ for (var id : db.readLoadBalancerIds()) {
+ var loadBalancer = db.readLoadBalancer(id);
+ loadBalancer.ifPresent(db::writeLoadBalancer);
+ }
+ }
}
/**
- * Provision load balancer(s) for given application.
+ * Prepare a load balancer for given application and cluster.
*
- * If the application has multiple container clusters, one load balancer will be provisioned for each cluster.
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes. It's state will remain unchanged.
+ *
+ * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State#reserved}.
+ *
+ * Calling this for irrelevant node or cluster types is a no-op.
*/
- public Map<LoadBalancerId, LoadBalancer> provision(ApplicationId application) {
- try (Mutex applicationLock = nodeRepository.lock(application)) {
- try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- Map<LoadBalancerId, LoadBalancer> loadBalancers = new LinkedHashMap<>();
- for (Map.Entry<ClusterSpec, List<Node>> kv : activeContainers(application).entrySet()) {
- LoadBalancerId id = new LoadBalancerId(application, kv.getKey().id());
- LoadBalancerInstance instance = create(application, kv.getKey().id(), kv.getValue());
- // Load balancer is always re-activated here to avoid reallocation if an application/cluster is
- // deleted and then redeployed.
- LoadBalancer loadBalancer = new LoadBalancer(id, instance, false);
- loadBalancers.put(loadBalancer.id(), loadBalancer);
- db.writeLoadBalancer(loadBalancer);
- }
- return Collections.unmodifiableMap(loadBalancers);
- }
+ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type
+ if (cluster.type() != ClusterSpec.Type.container) return; // Nothing to provision for this cluster type
+ provision(application, cluster.id(), false);
+ }
+
+ /**
+ * Activate load balancer for given application and cluster.
+ *
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes and the load balancer itself will be moved to {@link LoadBalancer.State#active}.
+ *
+ * Calling this when no load balancer has been prepared for given cluster is a no-op.
+ */
+ public void activate(ApplicationId application, List<ClusterSpec> clusters) {
+ for (var clusterId : containerClusterIdsOf(clusters)) {
+ // Provision again to ensure that load balancer instance re-configured with correct nodes
+ provision(application, clusterId, true);
}
}
/**
* Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any
- * load balancer(s)
+ * load balancer(s).
*/
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex applicationLock = nodeRepository.lock(application)) {
try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- List<LoadBalancer> deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
- .map(LoadBalancer::deactivate)
- .collect(Collectors.toList());
+ var now = nodeRepository.clock().instant();
+ var deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
+ .map(lb -> lb.with(LoadBalancer.State.inactive, now))
+ .collect(Collectors.toList());
db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
}
}
}
- private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes) {
+ /** Idempotently provision a load balancer for given application and cluster */
+ private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate) {
+ try (var applicationLock = nodeRepository.lock(application)) {
+ try (var loadBalancersLock = db.lockLoadBalancers()) {
+ var id = new LoadBalancerId(application, clusterId);
+ var now = nodeRepository.clock().instant();
+ var loadBalancer = db.readLoadBalancer(id);
+ if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
+
+ var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active;
+ var instance = create(application, clusterId, allocatedContainers(application, clusterId), force);
+ LoadBalancer newLoadBalancer;
+ if (loadBalancer.isEmpty()) {
+ newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
+ } else {
+ var newState = activate ? LoadBalancer.State.active : loadBalancer.get().state();
+ newLoadBalancer = loadBalancer.get().with(instance).with(newState, now);
+ }
+ db.writeLoadBalancer(newLoadBalancer);
+ }
+ }
+ }
+
+ private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes, boolean force) {
Map<HostName, Set<String>> hostnameToIpAdresses = nodes.stream()
.collect(Collectors.toMap(node -> HostName.from(node.hostname()),
this::reachableIpAddresses));
@@ -89,18 +135,25 @@ public class LoadBalancerProvisioner {
hostnameToIpAdresses.forEach((hostname, ipAddresses) -> {
ipAddresses.forEach(ipAddress -> reals.add(new Real(hostname, ipAddress)));
});
- return service.create(application, cluster, reals);
+ log.log(LogLevel.INFO, "Creating load balancer for " + cluster + " in " + application.toShortString() +
+ ", targeting: " + reals);
+ try {
+ return service.create(application, cluster, reals, force);
+ } catch (Exception e) {
+ throw new LoadBalancerServiceException("Failed to (re)configure load balancer for " + cluster + " in " +
+ application + ", targeting: " + reals + ". The operation will be " +
+ "retried on next deployment", e);
+ }
}
- /** Returns a list of active containers for given application, grouped by cluster spec */
- private Map<ClusterSpec, List<Node>> activeContainers(ApplicationId application) {
- return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.active))
+ /** Returns a list of active and reserved nodes of type container in given cluster */
+ private List<Node> allocatedContainers(ApplicationId application, ClusterSpec.Id clusterId) {
+ return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.reserved, Node.State.active))
.owner(application)
.filter(node -> node.state().isAllocated())
.type(ClusterSpec.Type.container)
- .asList()
- .stream()
- .collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster()));
+ .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterId))
+ .asList();
}
/** Find IP addresses reachable by the load balancer service */
@@ -118,4 +171,11 @@ public class LoadBalancerProvisioner {
return reachable;
}
+ private static List<ClusterSpec.Id> containerClusterIdsOf(List<ClusterSpec> clusters) {
+ return clusters.stream()
+ .filter(c -> c.type() == ClusterSpec.Type.container)
+ .map(ClusterSpec::id)
+ .collect(Collectors.toUnmodifiableList());
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 21bfc1b6886..90ca8ef4d33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -62,14 +62,14 @@ public class NodeRepositoryProvisioner implements Provisioner {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone, flavors);
this.zone = zone;
+ this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
- provisionServiceProvider.getHostProvisioner(),
- provisionServiceProvider.getHostResourcesCalculator(),
- Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource));
- this.activator = new Activator(nodeRepository);
- this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService ->
- new LoadBalancerProvisioner(nodeRepository, lbService));
+ provisionServiceProvider.getHostProvisioner(),
+ provisionServiceProvider.getHostResourcesCalculator(),
+ Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource),
+ loadBalancerProvisioner);
+ this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
}
/**
@@ -112,14 +112,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
validate(hosts);
activator.activate(application, hosts, transaction);
- transaction.onCommitted(() -> {
- try {
- loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.provision(application));
- } catch (Exception e) {
- log.log(LogLevel.ERROR, "Failed to provision load balancer for application " +
- application.toShortString(), e);
- }
- });
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index ca958f15c69..44ba24174f2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -24,15 +24,25 @@ class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
- HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled) {
+ HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled,
+ Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, dynamicProvisioningEnabled);
}
+ /** Prepare all required resources for the given application and cluster */
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
+ prepareLoadBalancer(application, cluster, requestedNodes);
+ return nodes;
+ }
+
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
@@ -41,7 +51,7 @@ class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ public List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
@@ -58,6 +68,11 @@ class Preparer {
return acceptedNodes;
}
+ /** Prepare a load balancer for given application and cluster */
+ public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
+ }
+
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
index d31834567ab..bfbf7775031 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
@@ -55,6 +55,8 @@ public class LoadBalancersResponse extends HttpResponse {
loadBalancers().forEach(lb -> {
Cursor lbObject = loadBalancerArray.addObject();
lbObject.setString("id", lb.id().serializedForm());
+ lbObject.setString("state", lb.state().name());
+ lbObject.setLong("changedAt", lb.changedAt().toEpochMilli());
lbObject.setString("application", lb.id().application().application().value());
lbObject.setString("tenant", lb.id().application().tenant().value());
lbObject.setString("instance", lb.id().application().instance().value());
@@ -76,9 +78,9 @@ public class LoadBalancersResponse extends HttpResponse {
realObject.setLong("port", real.port());
});
- lbObject.setArray("rotations"); // To avoid changing the API. This can be removed when clients stop expecting this
-
- lbObject.setBool("inactive", lb.inactive());
+ // TODO(mpolden): The following fields preserves API compatibility. These can be removed once clients stop expecting them
+ lbObject.setArray("rotations");
+ lbObject.setBool("inactive", lb.state() == LoadBalancer.State.inactive);
});
new JsonFormat(true).encode(stream, slime);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
index 299dc66c547..e628e823025 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
@@ -15,7 +15,6 @@ import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -41,7 +40,7 @@ public class MockDeployer implements Deployer {
@Inject
@SuppressWarnings("unused")
public MockDeployer() {
- this(null, Clock.systemUTC(), Collections.emptyMap());
+ this(null, Clock.systemUTC(), Map.of());
}
/**
@@ -53,7 +52,7 @@ public class MockDeployer implements Deployer {
Map<ApplicationId, ApplicationContext> applications) {
this.provisioner = provisioner;
this.clock = clock;
- this.applications = applications;
+ this.applications = new HashMap<>(applications);
}
public ReentrantLock lock() {
@@ -73,8 +72,8 @@ public class MockDeployer implements Deployer {
throw new RuntimeException(e);
}
try {
- lastDeployTimes.put(id, clock.instant());
- return Optional.of(new MockDeployment(provisioner, applications.get(id)));
+ return Optional.ofNullable(applications.get(id))
+ .map(application -> new MockDeployment(provisioner, application));
} finally {
lock.unlock();
}
@@ -90,6 +89,13 @@ public class MockDeployer implements Deployer {
return Optional.ofNullable(lastDeployTimes.get(application));
}
+ public void removeApplication(ApplicationId applicationId) {
+ new MockDeployment(provisioner, new ApplicationContext(applicationId, List.of())).activate();
+
+ applications.remove(applicationId);
+ lastDeployTimes.remove(applicationId);
+ }
+
public class MockDeployment implements Deployment {
private final NodeRepositoryProvisioner provisioner;
@@ -116,6 +122,7 @@ public class MockDeployer implements Deployer {
try (NestedTransaction t = new NestedTransaction()) {
provisioner.activate(t, application.id(), preparedHosts);
t.commit();
+ lastDeployTimes.put(application.id, clock.instant());
}
}
@@ -136,7 +143,7 @@ public class MockDeployer implements Deployer {
}
public ApplicationContext(ApplicationId id, ClusterSpec cluster, Capacity capacity, int groups) {
- this(id, Collections.singletonList(new ClusterContext(id, cluster, capacity, groups)));
+ this(id, List.of(new ClusterContext(id, cluster, capacity, groups)));
}
public ApplicationId id() { return id; }
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index 9edf368fa9c..a586bfa15c2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -13,7 +13,6 @@ import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RotationName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.transaction.NestedTransaction;
@@ -30,11 +29,9 @@ import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
-import java.util.Set;
/**
* A mock repository prepopulated with some applications.
@@ -140,29 +137,28 @@ public class MockNodeRepository extends NodeRepository {
ClusterSpec zoneCluster = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("node-admin"),
Version.fromString("6.42"),
- false,
- Set.of(RotationName.from("us-cluster")));
+ false);
activate(provisioner.prepare(zoneApp, zoneCluster, Capacity.fromRequiredNodeType(NodeType.host), 1, null), zoneApp, provisioner);
ApplicationId app1 = ApplicationId.from(TenantName.from("tenant1"), ApplicationName.from("application1"), InstanceName.from("instance1"));
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("id1"),
Version.fromString("6.42"),
- false, Collections.emptySet());
+ false);
provisioner.prepare(app1, cluster1, Capacity.fromCount(2, new NodeResources(2, 8, 50)), 1, null);
ApplicationId app2 = ApplicationId.from(TenantName.from("tenant2"), ApplicationName.from("application2"), InstanceName.from("instance2"));
ClusterSpec cluster2 = ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("id2"),
Version.fromString("6.42"),
- false, Collections.emptySet());
+ false);
activate(provisioner.prepare(app2, cluster2, Capacity.fromCount(2, new NodeResources(2, 8, 50)), 1, null), app2, provisioner);
ApplicationId app3 = ApplicationId.from(TenantName.from("tenant3"), ApplicationName.from("application3"), InstanceName.from("instance3"));
ClusterSpec cluster3 = ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("id3"),
Version.fromString("6.42"),
- false, Collections.emptySet());
+ false);
activate(provisioner.prepare(app3, cluster3, Capacity.fromCount(2, new NodeResources(1, 1, 100), false, true), 1, null), app3, provisioner);
List<Node> largeNodes = new ArrayList<>();
@@ -176,7 +172,7 @@ public class MockNodeRepository extends NodeRepository {
ClusterSpec cluster4 = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("id4"),
Version.fromString("6.42"),
- false, Collections.emptySet());
+ false);
activate(provisioner.prepare(app4, cluster4, Capacity.fromCount(2, new NodeResources(10, 48, 500), false, true), 1, null), app4, provisioner);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerServiceTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerServiceTest.java
index 40c307c6bef..5344fbc3c5f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerServiceTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerServiceTest.java
@@ -29,7 +29,7 @@ public class SharedLoadBalancerServiceTest {
@Test
public void test_create_lb() {
tester.makeReadyNodes(2, "default", NodeType.proxy);
- final var lb = loadBalancerService.create(applicationId, clusterId, reals);
+ final var lb = loadBalancerService.create(applicationId, clusterId, reals, false);
assertEquals(HostName.from("host-1.yahoo.com"), lb.hostname());
assertEquals(Optional.empty(), lb.dnsZone());
@@ -39,7 +39,7 @@ public class SharedLoadBalancerServiceTest {
@Test(expected = IllegalStateException.class)
public void test_exception_on_missing_proxies() {
- loadBalancerService.create(applicationId, clusterId, reals);
+ loadBalancerService.create(applicationId, clusterId, reals, false);
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
index 6fa5afd0c20..6be03e7969a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
@@ -50,7 +50,7 @@ public class FailedExpirerTest {
private static final ApplicationId tenantHostApplicationId = ApplicationId.from("vespa", "zone-app", "default");
private static final ClusterSpec tenantHostApplicationClusterSpec = ClusterSpec.request(
- ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Set.of());
+ ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false);
private static final Capacity tenantHostApplicationCapacity = Capacity.fromRequiredNodeType(NodeType.host);
@Test
@@ -136,7 +136,7 @@ public class FailedExpirerTest {
.withNode(NodeType.proxy, FailureScenario.defaultFlavor, "proxy3")
.setReady("proxy1", "proxy2", "proxy3")
.allocate( ApplicationId.from("vespa", "zone-app", "default"),
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("routing"), Version.fromString("6.42"), false, Set.of()),
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("routing"), Version.fromString("6.42"), false),
Capacity.fromRequiredNodeType(NodeType.proxy))
.failNode(1, "proxy1");
@@ -344,8 +344,8 @@ public class FailedExpirerTest {
ClusterSpec clusterSpec = ClusterSpec.request(clusterType,
ClusterSpec.Id.from("test"),
Version.fromString("6.42"),
- false,
- Set.of());
+ false
+ );
Capacity capacity = Capacity.fromCount(hostname.length, Optional.of(flavor), false, true);
return allocate(applicationId, clusterSpec, capacity);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
index bf078e0cddc..ae886e0babc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
@@ -28,7 +28,6 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
-import java.util.Optional;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
@@ -55,7 +54,7 @@ public class InactiveAndFailedExpirerTest {
List<Node> nodes = tester.makeReadyNodes(2, nodeResources);
// Allocate then deallocate 2 nodes
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
List<HostSpec> preparedNodes = tester.prepare(applicationId, cluster, Capacity.fromCount(2, nodeResources), 1);
tester.activate(applicationId, new HashSet<>(preparedNodes));
assertEquals(2, tester.getNodes(applicationId, Node.State.active).size());
@@ -96,7 +95,7 @@ public class InactiveAndFailedExpirerTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("test"),
Version.fromString("6.42"),
- false, Collections.emptySet());
+ false);
List<HostSpec> preparedNodes = tester.prepare(applicationId, cluster, Capacity.fromCount(2, nodeResources), 1);
tester.activate(applicationId, new HashSet<>(preparedNodes));
assertEquals(2, tester.getNodes(applicationId, Node.State.active).size());
@@ -123,7 +122,7 @@ public class InactiveAndFailedExpirerTest {
public void node_that_wants_to_retire_is_moved_to_parked() throws OrchestrationException {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"),
- Version.fromString("6.42"), false, Collections.emptySet());
+ Version.fromString("6.42"), false);
tester.makeReadyNodes(5, nodeResources);
// Allocate two nodes
@@ -179,7 +178,7 @@ public class InactiveAndFailedExpirerTest {
// Allocate then deallocate a node
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyNodes(1, nodeResources);
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
List<HostSpec> preparedNodes = tester.prepare(testerId, cluster, Capacity.fromCount(2, nodeResources), 1);
tester.activate(testerId, new HashSet<>(preparedNodes));
assertEquals(1, tester.getNodes(testerId, Node.State.active).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
index d7942cdb6e7..26b79ab9053 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
@@ -14,13 +14,14 @@ import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import org.junit.Test;
import java.time.Duration;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
@@ -31,7 +32,7 @@ public class LoadBalancerExpirerTest {
private ProvisioningTester tester = new ProvisioningTester.Builder().build();
@Test
- public void test_maintain() {
+ public void test_remove_inactive() {
LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(),
Duration.ofDays(1),
tester.loadBalancerService());
@@ -49,25 +50,67 @@ public class LoadBalancerExpirerTest {
// Remove one application deactivates load balancers for that application
removeApplication(app1);
- assertTrue(loadBalancers.get().get(lb1).inactive());
- assertFalse(loadBalancers.get().get(lb2).inactive());
+ assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state());
+ assertNotSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb2).state());
// Expirer defers removal while nodes are still allocated to application
expirer.maintain();
assertEquals(2, tester.loadBalancerService().instances().size());
-
- // Expirer removes load balancers once nodes are deallocated
dirtyNodesOf(app1);
+
+ // Expirer defers removal until expiration time passes
+ expirer.maintain();
+ assertTrue("Inactive load balancer not removed", tester.loadBalancerService().instances().containsKey(lb1));
+
+ // Expirer removes load balancers once expiration time passes
+ tester.clock().advance(Duration.ofHours(1));
expirer.maintain();
assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb1));
// Active load balancer is left alone
- assertFalse(loadBalancers.get().get(lb2).inactive());
+ assertSame(LoadBalancer.State.active, loadBalancers.get().get(lb2).state());
assertTrue("Active load balancer is not removed", tester.loadBalancerService().instances().containsKey(lb2));
}
+ @Test
+ public void test_expire_reserved() {
+ LoadBalancerExpirer expirer = new LoadBalancerExpirer(tester.nodeRepository(),
+ Duration.ofDays(1),
+ tester.loadBalancerService());
+ Supplier<Map<LoadBalancerId, LoadBalancer>> loadBalancers = () -> tester.nodeRepository().database().readLoadBalancers();
+
+
+ // Prepare application
+ ClusterSpec.Id cluster = ClusterSpec.Id.from("qrs");
+ ApplicationId app = tester.makeApplicationId();
+ LoadBalancerId lb = new LoadBalancerId(app, cluster);
+ deployApplication(app, cluster, false);
+
+ // Provisions load balancer in reserved
+ assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state());
+
+ // Expirer does nothing
+ expirer.maintain();
+ assertSame(LoadBalancer.State.reserved, loadBalancers.get().get(lb).state());
+
+ // Application never activates and nodes are dirtied. Expirer moves load balancer to inactive after timeout
+ dirtyNodesOf(app);
+ tester.clock().advance(Duration.ofHours(1));
+ expirer.maintain();
+ assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state());
+
+ // Expirer does nothing as inactive expiration time has not yet passed
+ expirer.maintain();
+ assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb).state());
+
+ // Expirer removes inactive load balancer
+ tester.clock().advance(Duration.ofHours(1));
+ expirer.maintain();
+ assertFalse("Inactive load balancer removed", loadBalancers.get().containsKey(lb));
+ }
+
private void dirtyNodesOf(ApplicationId application) {
- tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, "unit-test");
+ tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, this.getClass().getSimpleName());
}
private void removeApplication(ApplicationId application) {
@@ -77,12 +120,18 @@ public class LoadBalancerExpirerTest {
}
private void deployApplication(ApplicationId application, ClusterSpec.Id cluster) {
+ deployApplication(application, cluster, true);
+ }
+
+ private void deployApplication(ApplicationId application, ClusterSpec.Id cluster, boolean activate) {
tester.makeReadyNodes(10, "d-1-1-1");
List<HostSpec> hosts = tester.prepare(application, ClusterSpec.request(ClusterSpec.Type.container, cluster,
- Vtag.currentVersion, false, Collections.emptySet()),
+ Vtag.currentVersion, false),
2, 1,
new NodeResources(1, 1, 1));
- tester.activate(application, hosts);
+ if (activate) {
+ tester.activate(application, hosts);
+ }
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index f8dfb6e3e39..59e95cc1908 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -88,8 +88,8 @@ public class NodeFailTester {
tester.createHostNodes(3);
// Create applications
- ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Set.of());
- ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Set.of());
+ ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
+ ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
int wantedNodesApp1 = 5;
int wantedNodesApp2 = 7;
tester.activate(app1, clusterApp1, wantedNodesApp1);
@@ -118,9 +118,9 @@ public class NodeFailTester {
}
// Create applications
- ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Set.of());
- ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Set.of());
- ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Set.of());
+ ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false);
+ ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false);
+ ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false);
Capacity allHosts = Capacity.fromRequiredNodeType(NodeType.host);
Capacity capacity1 = Capacity.fromCount(3, new NodeResources(1, 1, 1), false, true);
Capacity capacity2 = Capacity.fromCount(5, new NodeResources(1, 1, 1), false, true);
@@ -152,7 +152,7 @@ public class NodeFailTester {
ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("test"),
Version.fromString("6.42"),
- false, Set.of());
+ false);
tester.activate(app1, clusterApp1, allNodes);
assertEquals(count, tester.nodeRepository.getNodes(nodeType, Node.State.active).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTest.java
deleted file mode 100644
index 93e44164f40..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTest.java
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.NodeFlavors;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetirementPolicy;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * @author freva
- */
-public class NodeRetirerTest {
-
- private NodeRetirerTester tester;
- private NodeRetirer retirer;
- private final RetirementPolicy policy = mock(RetirementPolicy.class);
-
- @Before
- public void setup() {
- doAnswer(invoke -> {
- boolean shouldRetire = ((Node) invoke.getArguments()[0]).ipAddresses().equals(Collections.singleton("::1"));
- return shouldRetire ? Optional.of("Some reason") : Optional.empty();
- }).when(policy).shouldRetire(any(Node.class));
- when(policy.isActive()).thenReturn(true);
-
- NodeFlavors nodeFlavors = NodeRetirerTester.makeFlavors(5);
- tester = new NodeRetirerTester(nodeFlavors);
- retirer = spy(tester.makeNodeRetirer(policy));
-
- tester.createReadyNodesByFlavor(21, 42, 27, 15, 8);
- tester.deployApp("vespa", "calendar", new int[]{3}, new int[]{7});
- tester.deployApp("vespa", "notes", new int[]{0}, new int[]{3});
- tester.deployApp("sports", "results", new int[]{0}, new int[]{6});
- tester.deployApp("search", "images", new int[]{3}, new int[]{4});
- tester.deployApp("search", "videos", new int[]{2}, new int[]{2});
- tester.deployApp("tester", "my-app", new int[]{1, 2}, new int[]{4, 6});
- }
-
- @Test
- public void testRetireUnallocated() {
- tester.assertCountsForStateByFlavor(Node.State.ready, 12, 38, 19, 4, 8);
- tester.setNumberAllowedUnallocatedRetirementsPerFlavor(6, 30, 15, 2, 4);
- assertFalse(retirer.retireUnallocated());
- tester.assertCountsForStateByFlavor(Node.State.parked, 6, 30, 15, 2, 4);
-
- tester.assertCountsForStateByFlavor(Node.State.ready, 6, 8, 4, 2, 4);
- tester.setNumberAllowedUnallocatedRetirementsPerFlavor(10, 20, 5, 5, 4);
- assertTrue(retirer.retireUnallocated());
- tester.assertCountsForStateByFlavor(Node.State.parked, 12, 38, 19, 4, 8);
-
- tester.nodeRepository.getNodes().forEach(node ->
- assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
- }
-
- @Test
- public void testRetireAllocated() {
- // Update IP addresses on ready nodes so that when they are deployed to, we wont retire them
- tester.nodeRepository.getNodes(Node.State.ready)
- .forEach(node -> tester.nodeRepository.write(node.with(node.ipConfig().with(Set.of("::2"))), () -> {}));
-
- tester.assertCountsForStateByFlavor(Node.State.active, 9, 4, 8, 11, -1);
-
- tester.setNumberAllowedAllocatedRetirementsPerFlavor(3, 2, 4, 2);
- retirer.retireAllocated();
- tester.assertParkedCountsByApplication(-1, -1, -1, -1, -1, -1); // Nodes should be in retired, but not yet parked
- tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
-
- // Until the nodes we set to retire are fully retired and moved to parked, we should not attempt to retire any other nodes
- retirer.retireAllocated();
- retirer.retireAllocated();
- tester.assertRetiringCountsByApplication(1, 1, 1, 1, 1, 2);
-
- tester.iterateMaintainers();
- tester.assertParkedCountsByApplication(1, 1, 1, 1, 1, 2);
-
- // We can retire 1 more of flavor-0, 1 more of flavor-1, 2 more of flavor-2:
- // app 6 has the most nodes, so it gets to retire flavor-1 and flavor-2
- // app 3 is the largest that is on flavor-0, so it gets the last node
- // app 5 is gets the last node with flavor-2
- retirer.retireAllocated();
- tester.iterateMaintainers();
- tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
-
- // No more retirements are possible
- retirer.retireAllocated();
- tester.iterateMaintainers();
- tester.assertParkedCountsByApplication(1, 1, 2, 1, 2, 4);
-
- tester.nodeRepository.getNodes().forEach(node ->
- assertEquals(node.status().wantToDeprovision(), node.state() == Node.State.parked));
- }
-
- @Test
- public void testGetActiveApplicationIds() {
- List<String> expectedOrder = Arrays.asList(
- "tester.my-app", "vespa.calendar", "sports.results", "search.images", "vespa.notes", "search.videos");
- List<String> actualOrder = retirer.getActiveApplicationIds(tester.nodeRepository.getNodes()).stream()
- .map(applicationId -> applicationId.toShortString().replace(":default", ""))
- .collect(Collectors.toList());
- assertEquals(expectedOrder, actualOrder);
- }
-
- @Test
- public void testGetRetireableNodesForApplication() {
- ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
-
- List<Node> nodes = tester.nodeRepository.getNodes(app);
- Set<String> actual = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
- Set<String> expected = nodes.stream().map(Node::hostname).collect(Collectors.toSet());
- assertEquals(expected, actual);
-
- Node nodeWantToRetire = tester.nodeRepository.getNode("host3.test.yahoo.com").orElseThrow(RuntimeException::new);
- tester.nodeRepository.write(nodeWantToRetire.with(nodeWantToRetire.status().withWantToRetire(true)), () -> {});
- Node nodeToFail = tester.nodeRepository.getNode("host5.test.yahoo.com").orElseThrow(RuntimeException::new);
- tester.nodeRepository.fail(nodeToFail.hostname(), Agent.system, "Failed for unit testing");
- Node nodeToUpdate = tester.nodeRepository.getNode("host8.test.yahoo.com").orElseThrow(RuntimeException::new);
- tester.nodeRepository.write(nodeToUpdate.with(nodeToUpdate.ipConfig().with(Set.of("::2"))), () -> {});
-
- nodes = tester.nodeRepository.getNodes(app);
- Set<String> excluded = Stream.of(nodeWantToRetire, nodeToFail, nodeToUpdate).map(Node::hostname).collect(Collectors.toSet());
- Set<String> actualAfterUpdates = retirer.filterRetireableNodes(nodes).stream().map(Node::hostname).collect(Collectors.toSet());
- Set<String> expectedAfterUpdates = nodes.stream().map(Node::hostname).filter(node -> !excluded.contains(node)).collect(Collectors.toSet());
- assertEquals(expectedAfterUpdates, actualAfterUpdates);
- }
-
- @Test
- public void testGetNumberNodesAllowToRetireForCluster() {
- ApplicationId app = new ApplicationId.Builder().tenant("vespa").applicationName("calendar").build();
- long actualAllActive = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
- assertEquals(2, actualAllActive);
-
- // Lets put 3 random nodes in wantToRetire
- List<Node> nodesToRetire = tester.nodeRepository.getNodes(app).stream().limit(3).collect(Collectors.toList());
- nodesToRetire.forEach(node -> tester.nodeRepository.write(node.with(node.status().withWantToRetire(true)), () -> {}));
- long actualOneWantToRetire = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
- assertEquals(0, actualOneWantToRetire);
-
- // Now 2 of those finish retiring and go to parked
- nodesToRetire.stream().limit(2).forEach(node ->
- tester.nodeRepository.park(node.hostname(), false, Agent.system, "Parked for unit testing"));
- long actualOneRetired = retirer.getNumberNodesAllowToRetireForCluster(tester.nodeRepository.getNodes(app), 2);
- assertEquals(1, actualOneRetired);
- }
-
- @Test
- public void inactivePolicyDoesNothingTest() {
- when(policy.isActive()).thenReturn(false);
- retirer.maintain();
-
- verify(retirer, never()).retireUnallocated();
- verify(retirer, never()).retireAllocated();
- }
-
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTester.java
deleted file mode 100644
index 832c2fc512b..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRetirerTester.java
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Capacity;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.DockerImage;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.curator.Curator;
-import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.maintenance.retire.RetirementPolicy;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorSpareChecker;
-import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner;
-import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
-import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
-import com.yahoo.vespa.hosted.provision.testutils.MockProvisionServiceProvider;
-import com.yahoo.vespa.orchestrator.OrchestrationException;
-import com.yahoo.vespa.orchestrator.Orchestrator;
-
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.LongStream;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * @author freva
- */
-public class NodeRetirerTester {
- public static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
-
- // Components with state
- public final ManualClock clock = new ManualClock();
- public final NodeRepository nodeRepository;
- private final FlavorSpareChecker flavorSpareChecker = mock(FlavorSpareChecker.class);
- private final MockDeployer deployer;
- private final List<Flavor> flavors;
-
- // Use LinkedHashMap to keep order in which applications were deployed
- private final Map<ApplicationId, MockDeployer.ApplicationContext> apps = new LinkedHashMap<>();
-
- private final Orchestrator orchestrator = mock(Orchestrator.class);
- private RetiredExpirer retiredExpirer;
- private InactiveExpirer inactiveExpirer;
- private int nextNodeId = 0;
-
- NodeRetirerTester(NodeFlavors nodeFlavors) {
- Curator curator = new MockCurator();
- nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, new MockNameResolver().mockAnyLookup(),
- DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa"), true);
- NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource());
- deployer = new MockDeployer(provisioner, clock, apps);
- flavors = nodeFlavors.getFlavors().stream().sorted(Comparator.comparing(Flavor::name)).collect(Collectors.toList());
-
- try {
- doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
- } catch (OrchestrationException e) {
- e.printStackTrace();
- }
- }
-
- NodeRetirer makeNodeRetirer(RetirementPolicy policy) {
- return new NodeRetirer(nodeRepository, flavorSpareChecker, Duration.ofDays(1), deployer, policy);
- }
-
- void createReadyNodesByFlavor(int... nums) {
- List<Node> nodes = new ArrayList<>();
- for (int i = 0; i < nums.length; i++) {
- Flavor flavor = flavors.get(i);
- for (int j = 0; j < nums[i]; j++) {
- int id = nextNodeId++;
- nodes.add(nodeRepository.createNode("node" + id, "host" + id + ".test.yahoo.com",
- new IP.Config(Set.of("::1"), Set.of()), Optional.empty(),
- Optional.empty(), flavor, NodeType.tenant));
- }
- }
-
- nodes = nodeRepository.addNodes(nodes);
- nodes = nodeRepository.setDirty(nodes, Agent.system, getClass().getSimpleName());
- nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName());
- }
-
- void deployApp(String tenantName, String applicationName, int[] flavorIds, int[] numNodes) {
- final ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
- final List<MockDeployer.ClusterContext> clusterContexts = new ArrayList<>();
-
- for (int i = 0; i < flavorIds.length; i++) {
- Flavor flavor = flavors.get(flavorIds[i]);
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("cluster-" + i), Version.fromString("6.99"), false, Collections.emptySet());
- Capacity capacity = Capacity.fromNodeCount(numNodes[i], Optional.of(flavor.name()), false, true);
- // If the number of node the app wants is divisible by 2, make it into 2 groups, otherwise as 1
- int numGroups = numNodes[i] % 2 == 0 ? 2 : 1;
- clusterContexts.add(new MockDeployer.ClusterContext(applicationId, cluster, capacity, numGroups));
- }
-
- apps.put(applicationId, new MockDeployer.ApplicationContext(applicationId, clusterContexts));
- deployer.deployFromLocalActive(applicationId, Duration.ZERO).get().activate();
- }
-
- void iterateMaintainers() {
- if (retiredExpirer == null) {
- retiredExpirer = new RetiredExpirer(nodeRepository, orchestrator, deployer, clock, Duration.ofDays(30), Duration.ofMinutes(10));
- inactiveExpirer = new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10));
- }
-
- clock.advance(Duration.ofMinutes(11));
- retiredExpirer.maintain();
-
- clock.advance(Duration.ofMinutes(11));
- inactiveExpirer.maintain();
- }
-
- void setNumberAllowedUnallocatedRetirementsPerFlavor(int... numAllowed) {
- for (int i = 0; i < numAllowed.length; i++) {
- Boolean[] responses = new Boolean[numAllowed[i]];
- Arrays.fill(responses, true);
- responses[responses.length - 1 ] = false;
- when(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
- }
- }
-
- void setNumberAllowedAllocatedRetirementsPerFlavor(int... numAllowed) {
- for (int i = 0; i < numAllowed.length; i++) {
- Boolean[] responses = new Boolean[numAllowed[i]];
- Arrays.fill(responses, true);
- responses[responses.length - 1] = false;
- when(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(eq(flavors.get(i)))).thenReturn(true, responses);
- }
- }
-
- void assertCountsForStateByFlavor(Node.State state, long... nums) {
- Map<Flavor, Long> expected = expectedCountsByFlavor(nums);
- Map<Flavor, Long> actual = nodeRepository.getNodes(state).stream()
- .collect(Collectors.groupingBy(Node::flavor, Collectors.counting()));
- assertEquals(expected, actual);
- }
-
- void assertParkedCountsByApplication(long... nums) {
- // Nodes lose allocation when parked, so just do a sum.
- long expected = LongStream.of(nums).filter(value -> value > 0L).sum();
- long actual = (long) nodeRepository.getNodes(Node.State.parked).size();
- assertEquals(expected, actual);
- }
-
- // Nodes that are being retired or about to be retired (wantToRetire flag set), but are not yet fully retired (not parked)
- void assertRetiringCountsByApplication(long... nums) {
- Map<ApplicationId, Long> expected = expectedCountsByApplication(nums);
- Map<ApplicationId, Long> actual = nodeRepository.getNodes().stream()
- .filter(node -> node.status().wantToRetire())
- .filter(node -> node.allocation().isPresent())
- .filter(node -> node.allocation().get().membership().retired())
- .filter(node -> node.state() != Node.State.parked)
- .collect(Collectors.groupingBy(node -> node.allocation().get().owner(), Collectors.counting()));
- assertEquals(expected, actual);
- }
-
- private Map<Flavor, Long> expectedCountsByFlavor(long... nums) {
- Map<Flavor, Long> countsByFlavor = new HashMap<>();
- for (int i = 0; i < nums.length; i++) {
- if (nums[i] < 0) continue;
- Flavor flavor = flavors.get(i);
- countsByFlavor.put(flavor, nums[i]);
- }
- return countsByFlavor;
- }
-
- private Map<ApplicationId, Long> expectedCountsByApplication(long... nums) {
- Map<ApplicationId, Long> countsByApplicationId = new HashMap<>();
- Iterator<ApplicationId> iterator = apps.keySet().iterator();
- for (int i = 0; iterator.hasNext(); i++) {
- ApplicationId applicationId = iterator.next();
- if (nums[i] < 0) continue;
- countsByApplicationId.put(applicationId, nums[i]);
- }
- return countsByApplicationId;
- }
-
- static NodeFlavors makeFlavors(int numFlavors) {
- FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
- for (int i = 0; i < numFlavors; i++) {
- flavorConfigBuilder.addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
- }
- return new NodeFlavors(flavorConfigBuilder.build());
- }
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
index cd136360b0d..e1ac0430ee4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
@@ -9,7 +9,6 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
@@ -18,10 +17,8 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
-import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -36,8 +33,6 @@ import org.junit.Test;
import java.time.Duration;
import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -63,33 +58,34 @@ public class OperatorChangeApplicationMaintainerTest {
new MockNameResolver().mockAnyLookup(),
DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa"),
true);
- this.fixture = new Fixture(zone, nodeRepository, nodeFlavors, curator);
+ this.fixture = new Fixture(zone, nodeRepository, nodeFlavors);
createReadyNodes(15, this.fixture.nodeResources, nodeRepository);
createHostNodes(2, nodeRepository, nodeFlavors);
// Create applications
fixture.activate();
- OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer, nodeRepository, clock, Duration.ofMinutes(1));
+ assertEquals("Initial applications are deployed", 2, fixture.deployer.redeployments);
+ OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer, nodeRepository, Duration.ofMinutes(1));
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("No changes -> no redeployments", 0, fixture.deployer.redeployments);
+ assertEquals("No changes -> no redeployments", 2, fixture.deployer.redeployments);
nodeRepository.fail(nodeRepository.getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("System change -> no redeployments", 0, fixture.deployer.redeployments);
+ assertEquals("System change -> no redeployments", 2, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
nodeRepository.fail(nodeRepository.getNodes(fixture.app2).get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("Operator change -> redeployment", 1, fixture.deployer.redeployments);
+ assertEquals("Operator change -> redeployment", 3, fixture.deployer.redeployments);
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("No further operator changes -> no (new) redeployments", 1, fixture.deployer.redeployments);
+ assertEquals("No further operator changes -> no (new) redeployments", 3, fixture.deployer.redeployments);
}
private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
@@ -119,50 +115,33 @@ public class OperatorChangeApplicationMaintainerTest {
private class Fixture {
- final NodeResources nodeResources = new NodeResources(2, 8, 50);
final NodeRepository nodeRepository;
- final NodeRepositoryProvisioner provisioner;
- final Curator curator;
+ final MockDeployer deployer;
+ final NodeResources nodeResources = new NodeResources(2, 8, 50);
final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
- final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
- final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
+ final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
final int wantedNodesApp1 = 5;
final int wantedNodesApp2 = 7;
- MockDeployer deployer; // created on activation
- Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors, Curator curator) {
+ Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors) {
this.nodeRepository = nodeRepository;
- this.curator = curator;
- this.provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource());
+ NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(
+ nodeRepository, flavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource());
+
+ Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
+ app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromCount(wantedNodesApp1, nodeResources), 1),
+ app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromCount(wantedNodesApp2, nodeResources), 1));
+ this.deployer = new MockDeployer(provisioner, nodeRepository.clock(), apps);
}
void activate() {
- activate(app1, clusterApp1, wantedNodesApp1, provisioner);
- activate(app2, clusterApp2, wantedNodesApp2, provisioner);
+ deployer.deployFromLocalActive(app1, false).get().activate();
+ deployer.deployFromLocalActive(app2, false).get().activate();
assertEquals(wantedNodesApp1, nodeRepository.getNodes(app1, Node.State.active).size());
assertEquals(wantedNodesApp2, nodeRepository.getNodes(app2, Node.State.active).size());
-
- Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
- apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1,
- Capacity.fromCount(wantedNodesApp1, nodeResources), 1));
- apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2,
- Capacity.fromCount(wantedNodesApp2, nodeResources), 1));
- this.deployer = new MockDeployer(provisioner, nodeRepository.clock(), apps);
- }
-
- private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) {
- List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromCount(nodeCount, nodeResources), 1, null);
- NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
- provisioner.activate(transaction, applicationId, hosts);
- transaction.commit();
- }
-
- void remove(ApplicationId application) {
- NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
- provisioner.remove(transaction, application);
- transaction.commit();
}
NodeList getNodes(Node.State ... states) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
index 49a14dc3777..211b4a4472f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
@@ -10,7 +10,6 @@ import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
@@ -19,10 +18,8 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
-import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -40,14 +37,12 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
/**
* @author bratseth
@@ -69,7 +64,7 @@ public class PeriodicApplicationMaintainerTest {
new MockNameResolver().mockAnyLookup(),
DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa"),
true);
- this.fixture = new Fixture(zone, nodeRepository, nodeFlavors, curator);
+ this.fixture = new Fixture(zone, nodeRepository, nodeFlavors);
createReadyNodes(15, fixture.nodeResources, nodeRepository);
createHostNodes(2, nodeRepository, nodeFlavors);
@@ -150,27 +145,32 @@ public class PeriodicApplicationMaintainerTest {
public void application_deploy_inhibits_redeploy_for_a_while() {
fixture.activate();
+ assertEquals("No deployment expected", 2, fixture.deployer.redeployments);
+
// Holds off on deployments a while after starting
fixture.runApplicationMaintainer();
- assertFalse("No deployment expected", fixture.deployer.lastDeployTime(fixture.app1).isPresent());
- assertFalse("No deployment expected", fixture.deployer.lastDeployTime(fixture.app2).isPresent());
+ assertEquals("No deployment expected", 2, fixture.deployer.redeployments);
+
// Exhaust initial wait period
clock.advance(Duration.ofMinutes(30).plus(Duration.ofSeconds(1)));
// First deployment of applications
fixture.runApplicationMaintainer();
+ assertEquals("No deployment expected", 4, fixture.deployer.redeployments);
Instant firstDeployTime = clock.instant();
assertEquals(firstDeployTime, fixture.deployer.lastDeployTime(fixture.app1).get());
assertEquals(firstDeployTime, fixture.deployer.lastDeployTime(fixture.app2).get());
clock.advance(Duration.ofMinutes(5));
fixture.runApplicationMaintainer();
// Too soon: Not redeployed:
+ assertEquals("No deployment expected", 4, fixture.deployer.redeployments);
assertEquals(firstDeployTime, fixture.deployer.lastDeployTime(fixture.app1).get());
assertEquals(firstDeployTime, fixture.deployer.lastDeployTime(fixture.app2).get());
clock.advance(Duration.ofMinutes(30));
fixture.runApplicationMaintainer();
// Redeployed:
+ assertEquals("No deployment expected", 6, fixture.deployer.redeployments);
assertEquals(clock.instant(), fixture.deployer.lastDeployTime(fixture.app1).get());
assertEquals(clock.instant(), fixture.deployer.lastDeployTime(fixture.app2).get());
}
@@ -239,54 +239,41 @@ public class PeriodicApplicationMaintainerTest {
private class Fixture {
- final NodeResources nodeResources = new NodeResources(2, 8, 50);
final NodeRepository nodeRepository;
- final NodeRepositoryProvisioner provisioner;
- final Curator curator;
final MockDeployer deployer;
+ final NodeResources nodeResources = new NodeResources(2, 8, 50);
final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
- final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
- final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ final ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
+ final ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
final int wantedNodesApp1 = 5;
final int wantedNodesApp2 = 7;
private final TestablePeriodicApplicationMaintainer maintainer;
- Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors, Curator curator) {
+ Fixture(Zone zone, NodeRepository nodeRepository, NodeFlavors flavors) {
this.nodeRepository = nodeRepository;
- this.curator = curator;
- this.provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource());
-
- Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
- apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1,
- Capacity.fromCount(wantedNodesApp1, nodeResources), 1));
- apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2,
- Capacity.fromCount(wantedNodesApp2, nodeResources), 1));
+ NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(
+ nodeRepository, flavors, zone, new MockProvisionServiceProvider(), new InMemoryFlagSource());
+
+ Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
+ app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromCount(wantedNodesApp1, nodeResources), 1),
+ app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromCount(wantedNodesApp2, nodeResources), 1));
this.deployer = new MockDeployer(provisioner, nodeRepository.clock(), apps);
this.maintainer = new TestablePeriodicApplicationMaintainer(deployer, nodeRepository, Duration.ofDays(1), // Long duration to prevent scheduled runs during test
Duration.ofMinutes(30));
}
void activate() {
- activate(app1, clusterApp1, wantedNodesApp1, provisioner);
- activate(app2, clusterApp2, wantedNodesApp2, provisioner);
+ deployer.deployFromLocalActive(app1, false).get().activate();
+ deployer.deployFromLocalActive(app2, false).get().activate();
assertEquals(wantedNodesApp1, nodeRepository.getNodes(app1, Node.State.active).size());
assertEquals(wantedNodesApp2, nodeRepository.getNodes(app2, Node.State.active).size());
}
- private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) {
- List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromCount(nodeCount, nodeResources), 1, null);
- NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
- provisioner.activate(transaction, applicationId, hosts);
- transaction.commit();
- }
-
void remove(ApplicationId application) {
- NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
- provisioner.remove(transaction, application);
- transaction.commit();
+ deployer.removeApplication(application);
}
void runApplicationMaintainer() {
@@ -325,11 +312,6 @@ public class PeriodicApplicationMaintainerTest {
: super.nodesNeedingMaintenance();
}
- @Override
- protected boolean shouldBeDeployedOnThisServer(ApplicationId application) {
- return true;
- }
-
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
index 491f506c3eb..f8efb4fdea1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
@@ -26,7 +26,6 @@ import org.junit.Test;
import java.time.Duration;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
@@ -62,7 +61,7 @@ public class ReservationExpirerTest {
assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.dirty).size());
nodeRepository.setReady(nodes, Agent.system, getClass().getSimpleName());
ApplicationId applicationId = new ApplicationId.Builder().tenant("foo").applicationName("bar").instanceName("fuz").build();
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
provisioner.prepare(applicationId, cluster, Capacity.fromCount(2, new NodeResources(2, 8, 50)), 1, null);
assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.reserved).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index e37a139700f..5d3485ab447 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -86,7 +86,7 @@ public class RetiredExpirerTest {
// Allocate content cluster of sizes 7 -> 2 -> 3:
// Should end up with 3 nodes in the cluster (one previously retired), and 4 retired
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1, provisioner);
activate(applicationId, cluster, wantedNodes=2, 1, provisioner);
@@ -117,7 +117,7 @@ public class RetiredExpirerTest {
ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz"));
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
activate(applicationId, cluster, 8, 8, provisioner);
activate(applicationId, cluster, 2, 2, provisioner);
assertEquals(8, nodeRepository.getNodes(applicationId, Node.State.active).size());
@@ -148,7 +148,7 @@ public class RetiredExpirerTest {
// Allocate content cluster of sizes 7 -> 2 -> 3:
// Should end up with 3 nodes in the cluster (one previously retired), and 4 retired
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
int wantedNodes;
activate(applicationId, cluster, wantedNodes=7, 1, provisioner);
activate(applicationId, cluster, wantedNodes=2, 1, provisioner);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodesTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodesTest.java
deleted file mode 100644
index b40d091b346..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodesTest.java
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance.retire;
-
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * @author freva
- */
-public class RetireIPv4OnlyNodesTest {
- private final RetireIPv4OnlyNodes policy = new RetireIPv4OnlyNodes(null);
- private final List<Flavor> nodeFlavors = initFlavors();
-
- @Test
- public void testSingleIPv4Address() {
- Node node = createNodeWithAddresses("127.0.0.1");
- assertTrue(policy.shouldRetire(node).isPresent());
- }
-
- @Test
- public void testSingleIPv6Address() {
- Node node = createNodeWithAddresses("::1");
- assertFalse(policy.shouldRetire(node).isPresent());
- }
-
- @Test
- public void testMultipleIPv4Address() {
- Node node = createNodeWithAddresses("127.0.0.1", "10.0.0.1", "192.168.0.1");
- assertTrue(policy.shouldRetire(node).isPresent());
- }
-
- @Test
- public void testMultipleIPv6Address() {
- Node node = createNodeWithAddresses("::1", "::2", "1234:5678:90ab::cdef");
- assertFalse(policy.shouldRetire(node).isPresent());
- }
-
- @Test
- public void testCombinationAddress() {
- Node node = createNodeWithAddresses("127.0.0.1", "::1", "10.0.0.1", "::2");
- assertFalse(policy.shouldRetire(node).isPresent());
- }
-
- @Test
- public void testNeverRetireVMs() {
- Node node = createVMWithAddresses("127.0.0.1", "10.0.0.1", "192.168.0.1");
- assertFalse(policy.shouldRetire(node).isPresent());
-
- node = createNodeWithAddresses("::1", "::2", "1234:5678:90ab::cdef");
- assertFalse(policy.shouldRetire(node).isPresent());
-
- node = createNodeWithAddresses("127.0.0.1", "::1", "10.0.0.1", "::2");
- assertFalse(policy.shouldRetire(node).isPresent());
- }
-
- private Node createNodeWithAddresses(String... addresses) {
- Set<String> ipAddresses = Arrays.stream(addresses).collect(Collectors.toSet());
- return Node.create("openstackid", ipAddresses, Collections.emptySet(), "hostname", Optional.empty(),
- Optional.empty(), nodeFlavors.get(0), NodeType.tenant);
- }
-
- private Node createVMWithAddresses(String... addresses) {
- Set<String> ipAddresses = Arrays.stream(addresses).collect(Collectors.toSet());
- return Node.create("openstackid", ipAddresses, Collections.emptySet(), "hostname", Optional.empty(),
- Optional.empty(), nodeFlavors.get(1), NodeType.tenant);
- }
-
- private List<Flavor> initFlavors() {
- FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
- flavorConfigBuilder.addFlavor("default", 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
- flavorConfigBuilder.addFlavor("vm", 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.VIRTUAL_MACHINE);
- return flavorConfigBuilder.build().flavor().stream().map(Flavor::new).collect(Collectors.toList());
- }
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
index 460764b50db..bcb78844666 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
@@ -12,8 +12,10 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.Real;
import org.junit.Test;
+import java.time.Instant;
import java.util.Optional;
+import static java.time.temporal.ChronoUnit.MILLIS;
import static org.junit.Assert.assertEquals;
/**
@@ -23,30 +25,33 @@ public class LoadBalancerSerializerTest {
@Test
public void test_serialization() {
- LoadBalancer loadBalancer = new LoadBalancer(new LoadBalancerId(ApplicationId.from("tenant1",
- "application1",
- "default"),
- ClusterSpec.Id.from("qrs")),
- new LoadBalancerInstance(
- HostName.from("lb-host"),
- Optional.of(new DnsZone("zone-id-1")),
- ImmutableSet.of(4080, 4443),
- ImmutableSet.of("10.2.3.4/24"),
- ImmutableSet.of(new Real(HostName.from("real-1"),
- "127.0.0.1",
- 4080),
- new Real(HostName.from("real-2"),
- "127.0.0.2",
- 4080))),
- false);
+ var now = Instant.now();
+ var loadBalancer = new LoadBalancer(new LoadBalancerId(ApplicationId.from("tenant1",
+ "application1",
+ "default"),
+ ClusterSpec.Id.from("qrs")),
+ new LoadBalancerInstance(
+ HostName.from("lb-host"),
+ Optional.of(new DnsZone("zone-id-1")),
+ ImmutableSet.of(4080, 4443),
+ ImmutableSet.of("10.2.3.4/24"),
+ ImmutableSet.of(new Real(HostName.from("real-1"),
+ "127.0.0.1",
+ 4080),
+ new Real(HostName.from("real-2"),
+ "127.0.0.2",
+ 4080))),
+ LoadBalancer.State.active,
+ now);
- LoadBalancer serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer));
+ var serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer));
assertEquals(loadBalancer.id(), serialized.id());
assertEquals(loadBalancer.instance().hostname(), serialized.instance().hostname());
assertEquals(loadBalancer.instance().dnsZone(), serialized.instance().dnsZone());
assertEquals(loadBalancer.instance().ports(), serialized.instance().ports());
assertEquals(loadBalancer.instance().networks(), serialized.instance().networks());
- assertEquals(loadBalancer.inactive(), serialized.inactive());
+ assertEquals(loadBalancer.state(), serialized.state());
+ assertEquals(loadBalancer.changedAt().truncatedTo(MILLIS), serialized.changedAt());
assertEquals(loadBalancer.instance().reals(), serialized.instance().reals());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
index f5205332e26..be1c138de54 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
@@ -200,7 +200,7 @@ public class AclProvisioningTest {
private List<Node> deploy(ApplicationId application, Capacity capacity) {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"),
- Version.fromString("6.42"), false, Collections.emptySet());
+ Version.fromString("6.42"), false);
List<HostSpec> prepared = tester.prepare(application, cluster, capacity, 1);
tester.activate(application, new HashSet<>(prepared));
return tester.getNodes(application, Node.State.active).asList();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
index 1275ad0781a..76d988a291b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
@@ -112,7 +112,7 @@ public class AllocationSimulator {
}
private ClusterSpec cluster() {
- return ClusterSpec.from(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), ClusterSpec.Group.from(1), Version.fromString("6.41"), false, Collections.emptySet());
+ return ClusterSpec.from(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), ClusterSpec.Group.from(1), Version.fromString("6.41"), false);
}
/* ------------ Methods to add events to the system ----------------*/
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index 173fe31a32c..9b051184885 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -11,7 +11,6 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
@@ -19,7 +18,6 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import org.junit.Test;
-import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
@@ -51,7 +49,7 @@ public class DockerProvisioningTest {
Version wantedVespaVersion = Version.fromString("6.39");
int nodeCount = 7;
List<HostSpec> hosts = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false),
nodeCount, 1, dockerFlavor);
tester.activate(application1, new HashSet<>(hosts));
@@ -62,7 +60,7 @@ public class DockerProvisioningTest {
// Upgrade Vespa version on nodes
Version upgradedWantedVespaVersion = Version.fromString("6.40");
List<HostSpec> upgradedHosts = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), upgradedWantedVespaVersion, false, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), upgradedWantedVespaVersion, false),
nodeCount, 1, dockerFlavor);
tester.activate(application1, new HashSet<>(upgradedHosts));
NodeList upgradedNodes = tester.getNodes(application1, Node.State.active);
@@ -84,7 +82,7 @@ public class DockerProvisioningTest {
Version wantedVespaVersion = Version.fromString("6.39");
int nodeCount = 7;
List<HostSpec> nodes = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false),
nodeCount, 1, dockerFlavor);
try {
tester.activate(application1, new HashSet<>(nodes));
@@ -93,13 +91,13 @@ public class DockerProvisioningTest {
// Activate the zone-app, thereby allocating the parents
List<HostSpec> hosts = tester.prepare(zoneApplication,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("zone-app"), wantedVespaVersion, false, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("zone-app"), wantedVespaVersion, false),
Capacity.fromRequiredNodeType(NodeType.host), 1);
tester.activate(zoneApplication, hosts);
// Try allocating tenants again
nodes = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), wantedVespaVersion, false),
nodeCount, 1, dockerFlavor);
tester.activate(application1, new HashSet<>(nodes));
@@ -222,7 +220,7 @@ public class DockerProvisioningTest {
ApplicationId application1 = tester.makeApplicationId();
tester.makeReadyVirtualDockerNodes(1, dockerFlavor, "dockerHost");
- List<HostSpec> hosts = tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false, Collections.emptySet()), 1, 1, dockerFlavor);
+ List<HostSpec> hosts = tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false), 1, 1, dockerFlavor);
tester.activate(application1, new HashSet<>(hosts));
NodeList nodes = tester.getNodes(application1, Node.State.active);
@@ -236,7 +234,7 @@ public class DockerProvisioningTest {
private void prepareAndActivate(ApplicationId application, int nodeCount, boolean exclusive, ProvisioningTester tester) {
Set<HostSpec> hosts = new HashSet<>(tester.prepare(application,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.39"), exclusive, Collections.emptySet()),
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.39"), exclusive),
Capacity.fromCount(nodeCount, Optional.of(dockerFlavor), false, true),
1));
tester.activate(application, hosts);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
index 50e19e15da5..aa1763db487 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
@@ -386,7 +386,6 @@ public class DynamicDockerAllocationTest {
tester.activate(application, hosts1);
NodeResources resources = new NodeResources(1.5, 8, 50);
- System.out.println("Redeploying with " + resources);
List<HostSpec> hosts2 = tester.prepare(application, cluster, Capacity.fromCount(2, resources), 1);
tester.activate(application, hosts2);
@@ -444,7 +443,7 @@ public class DynamicDockerAllocationTest {
ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("node-admin"),
Version.fromString("6.42"),
- false, Collections.emptySet()),
+ false),
Capacity.fromRequiredNodeType(NodeType.host),
1);
tester.activate(applicationId, ImmutableSet.copyOf(list));
@@ -461,6 +460,6 @@ public class DynamicDockerAllocationTest {
}
private ClusterSpec clusterSpec(String clusterId) {
- return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet());
+ return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false);
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
index 7d450018353..a497afc9fc9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
@@ -18,7 +18,6 @@ import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
import org.junit.Test;
-import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
@@ -110,7 +109,7 @@ public class DynamicDockerProvisionTest {
ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from("node-admin"),
Version.fromString("6.42"),
- false, Collections.emptySet()),
+ false),
Capacity.fromRequiredNodeType(NodeType.host),
1);
tester.activate(applicationId, ImmutableSet.copyOf(list));
@@ -118,7 +117,7 @@ public class DynamicDockerProvisionTest {
private static ClusterSpec clusterSpec(String clusterId) {
- return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false, Collections.emptySet());
+ return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId), Version.fromString("6.42"), false);
}
@SuppressWarnings("unchecked")
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCheckerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCheckerTest.java
deleted file mode 100644
index c60e1d94cac..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCheckerTest.java
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.config.provision.Flavor;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * @author freva
- */
-public class FlavorSpareCheckerTest {
- /* Creates flavors where 'replaces' graph that looks like this (largest flavor at the bottom):
- * 5
- * |
- * |
- * 3 4 8
- * \ / \ |
- * \ / \ |
- * 1 6 7
- * / \
- * / \
- * 0 2
- */
- private static final List<Flavor> flavors = FlavorSpareCountTest.makeFlavors(
- Collections.singletonList(1), // 0 -> {1}
- Arrays.asList(3, 4), // 1 -> {3, 4}
- Collections.singletonList(1), // 2 -> {1}
- Collections.singletonList(5), // 3 -> {5}
- Collections.emptyList(), // 4 -> {}
- Collections.emptyList(), // 5 -> {}
- Collections.singletonList(4), // 6 -> {4}
- Collections.singletonList(8), // 7 -> {8}
- Collections.emptyList()); // 8 -> {}
-
- private final Map<Flavor, FlavorSpareCount> flavorSpareCountByFlavor = flavors.stream()
- .collect(Collectors.toMap(
- i -> i,
- i -> mock(FlavorSpareCount.class)));
-
- private final FlavorSpareChecker.SpareNodesPolicy spareNodesPolicy = mock(FlavorSpareChecker.SpareNodesPolicy.class);
- private FlavorSpareChecker flavorSpareChecker = new FlavorSpareChecker(spareNodesPolicy, flavorSpareCountByFlavor);
-
-
- @Test
- public void canRetireUnallocated_Successfully() {
- Flavor flavorToRetire = flavors.get(0);
- FlavorSpareCount flavorSpareCount = flavorSpareCountByFlavor.get(flavorToRetire);
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
-
- assertTrue(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement(0);
- }
-
- @Test
- public void canRetireUnallocated_NoReadyForFlavor() {
- Flavor flavorToRetire = flavors.get(0);
- FlavorSpareCount flavorSpareCount = flavorSpareCountByFlavor.get(flavorToRetire);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
-
- assertFalse(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement();
- }
-
- @Test
- public void canRetireUnallocated_NoSpareForFlavor() {
- Flavor flavorToRetire = flavors.get(0);
- FlavorSpareCount flavorSpareCount = flavorSpareCountByFlavor.get(flavorToRetire);
- when(flavorSpareCount.hasReady()).thenReturn(true);
-
- assertFalse(flavorSpareChecker.canRetireUnallocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement();
- }
-
- @Test
- public void canRetireAllocated_LeafFlavor_Successfully() {
- Flavor flavorToRetire = flavors.get(0);
-
- // If we want to retire flavor 0, then we must have enough spares & ready of flavor 0 and all
- // other flavor that it replaces transitively
- Stream.of(0, 1, 3, 4, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
-
- assertTrue(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement(0, 1, 3, 4, 5);
- }
-
- @Test
- public void canRetireAllocated_LeafFlavor_NoSparesForPossibleWantedFlavor() {
- Flavor flavorToRetire = flavors.get(0);
-
- // Flavor 4 is transitively replaced by flavor 0, even though we have enough spares of flavor 0,
- // we cannot retire it if there are not enough spares of flavor 4
- Stream.of(0, 1, 3, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
-
- assertFalse(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement();
- }
-
- @Test
- public void canRetireAllocated_CenterNode_Successfully() {
- Flavor flavorToRetire = flavors.get(1);
-
- Stream.of(1, 3, 4, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
-
- assertTrue(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement(1, 3, 4, 5);
- }
-
- @Test
- public void canRetireAllocated_CenterNode_NoNodeRepoFlavorNodes_Successfully() {
- Flavor flavorToRetire = flavors.get(1);
-
- // If we want to retire a node with node-repo flavor 1, but there are no ready nodes of flavor-1,
- // we must ensure there are spare nodes of flavors that replace flavor 1
- Stream.of(0, 1, 2, 3, 4, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
- when(flavorSpareCountByFlavor.get(flavorToRetire).hasReady()).thenReturn(false);
- when(flavorSpareCountByFlavor.get(flavors.get(0)).getNumReadyAmongReplacees()).thenReturn(1L);
- when(flavorSpareCountByFlavor.get(flavors.get(2)).getNumReadyAmongReplacees()).thenReturn(1L);
-
- assertTrue(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement(0, 2, 3, 4, 5);
- }
-
- @Test
- public void canRetireAllocated_CenterNode_NoNodeRepoFlavorNodes_NoImmediateSpare() {
- Flavor flavorToRetire = flavors.get(1);
-
- // Same as above, but now one of the flavors that could replace flavor 1 (flavor 2) does not have enough spares
- Stream.of(0, 1, 3, 4, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
- when(flavorSpareCountByFlavor.get(flavorToRetire).hasReady()).thenReturn(false);
- when(flavorSpareCountByFlavor.get(flavors.get(0)).getNumReadyAmongReplacees()).thenReturn(1L);
- when(flavorSpareCountByFlavor.get(flavors.get(2)).getNumReadyAmongReplacees()).thenReturn(1L);
-
- assertFalse(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement();
- }
-
- @Test
- public void canRetireAllocated_CenterNode_NoNodeRepoFlavorNodes_SkipEmptyImmediate() {
- Flavor flavorToRetire = flavors.get(1);
-
- // Flavor 2 still has no spares, but also the sum of ready nodes in its replaces tree is 0, so we should
- // be able to continue
- Stream.of(0, 1, 3, 4, 5)
- .map(flavors::get)
- .map(flavorSpareCountByFlavor::get)
- .forEach(flavorSpareCount -> {
- when(flavorSpareCount.hasReady()).thenReturn(true);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(true);
- });
- when(flavorSpareCountByFlavor.get(flavorToRetire).hasReady()).thenReturn(false);
- when(flavorSpareCountByFlavor.get(flavors.get(0)).getNumReadyAmongReplacees()).thenReturn(1L);
- when(flavorSpareCountByFlavor.get(flavors.get(2)).getNumReadyAmongReplacees()).thenReturn(0L);
-
- assertTrue(flavorSpareChecker.canRetireAllocatedNodeWithFlavor(flavorToRetire));
- verifyDecrement(0, 3, 4, 5);
- }
-
- private void verifyDecrement(int... decrementFlavorIds) {
- Set<Flavor> decrementedFlavors = Arrays.stream(decrementFlavorIds).boxed().map(flavors::get).collect(Collectors.toSet());
- for (Flavor flavor : flavors) {
- int times = decrementedFlavors.contains(flavor) ? 1 : 0;
- verify(flavorSpareCountByFlavor.get(flavor), times(times)).decrementNumberOfReady();
- }
- }
-
- @Before
- public void setup() {
- Map<Flavor, FlavorSpareCount> flavorSpareCountGraph = FlavorSpareCount.constructFlavorSpareCountGraph(flavors);
- flavorSpareCountByFlavor.forEach((flavor, flavorSpareCount) -> {
- Set<FlavorSpareCount> possibleWantedFlavors = flavorSpareCountGraph.get(flavor).getPossibleWantedFlavors()
- .stream().map(FlavorSpareCount::getFlavor).map(flavorSpareCountByFlavor::get).collect(Collectors.toSet());
- Set<FlavorSpareCount> immediateReplacees = flavorSpareCountGraph.get(flavor).getImmediateReplacees()
- .stream().map(FlavorSpareCount::getFlavor).map(flavorSpareCountByFlavor::get).collect(Collectors.toSet());
-
- doNothing().when(flavorSpareCount).decrementNumberOfReady();
- when(flavorSpareCount.hasReady()).thenReturn(false);
- when(flavorSpareCount.getPossibleWantedFlavors()).thenReturn(possibleWantedFlavors);
- when(flavorSpareCount.getImmediateReplacees()).thenReturn(immediateReplacees);
- when(spareNodesPolicy.hasSpare(flavorSpareCount)).thenReturn(false);
- });
- }
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCountTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCountTest.java
deleted file mode 100644
index cb9c5c02c65..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorSpareCountTest.java
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
-import com.yahoo.config.provisioning.FlavorsConfig;
-import org.junit.Test;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * @author freva
- */
-public class FlavorSpareCountTest {
- /* Creates flavors where 'replaces' graph that looks like this (largest flavor at the bottom):
- * 5
- * |
- * |
- * 3 4 8
- * \ / \ |
- * \ / \ |
- * 1 6 7
- * / \
- * / \
- * 0 2
- */
- private final List<Flavor> flavors = makeFlavors(
- Collections.singletonList(1), // 0 -> {1}
- Arrays.asList(3, 4), // 1 -> {3, 4}
- Collections.singletonList(1), // 2 -> {1}
- Collections.singletonList(5), // 3 -> {5}
- Collections.emptyList(), // 4 -> {}
- Collections.emptyList(), // 5 -> {}
- Collections.singletonList(4), // 6 -> {4}
- Collections.singletonList(8), // 7 -> {8}
- Collections.emptyList()); // 8 -> {}
-
- private final Map<Flavor, FlavorSpareCount> flavorSpareCountByFlavor =
- FlavorSpareCount.constructFlavorSpareCountGraph(flavors);
-
- @Test
- public void testFlavorSpareCountGraph() {
- List<List<Integer>> expectedPossibleWantedFlavorsByFlavorId = Arrays.asList(
- Arrays.asList(0, 1, 3, 4, 5),
- Arrays.asList(1, 3, 4, 5),
- Arrays.asList(1, 2, 3, 4, 5),
- Arrays.asList(3, 5),
- Collections.singletonList(4),
- Collections.singletonList(5),
- Arrays.asList(4, 6),
- Arrays.asList(7, 8),
- Collections.singletonList(8));
-
- List<List<Integer>> expectedImmediateReplaceesByFlavorId = Arrays.asList(
- Collections.emptyList(),
- Arrays.asList(0, 2),
- Collections.emptyList(),
- Collections.singletonList(1),
- Arrays.asList(1, 6),
- Collections.singletonList(3),
- Collections.emptyList(),
- Collections.emptyList(),
- Collections.singletonList(7));
-
- for (int i = 0; i < flavors.size(); i++) {
- Flavor flavor = flavors.get(i);
- FlavorSpareCount flavorSpareCount = flavorSpareCountByFlavor.get(flavor);
- Set<FlavorSpareCount> expectedPossibleWantedFlavors = expectedPossibleWantedFlavorsByFlavorId.get(i)
- .stream().map(flavors::get).map(flavorSpareCountByFlavor::get).collect(Collectors.toSet());
- Set<FlavorSpareCount> expectedImmediateReplacees = expectedImmediateReplaceesByFlavorId.get(i)
- .stream().map(flavors::get).map(flavorSpareCountByFlavor::get).collect(Collectors.toSet());
-
- assertEquals(expectedPossibleWantedFlavors, flavorSpareCount.getPossibleWantedFlavors());
- assertEquals(expectedImmediateReplacees, flavorSpareCount.getImmediateReplacees());
- }
- }
-
- @Test
- public void testSumOfReadyAmongReplacees() {
- long[] numReadyPerFlavor = {3, 5, 2, 6, 2, 7, 4, 3, 4};
- for (int i = 0; i < numReadyPerFlavor.length; i++) {
- flavorSpareCountByFlavor.get(flavors.get(i))
- .updateReadyAndActiveCounts(numReadyPerFlavor[i], (long) (100 * Math.random()));
- }
-
- long[] expectedSumTrees = {3, 10, 2, 16, 16, 23, 4, 3, 7};
- for (int i = 0; i < expectedSumTrees.length; i++) {
- assertEquals(expectedSumTrees[i], flavorSpareCountByFlavor.get(flavors.get(i)).getNumReadyAmongReplacees());
- }
- }
-
- /**
- * Takes in variable number of List of Integers:
- * For each list a flavor is created
- * For each element, n, in list, the new flavor replace n'th flavor
- */
- @SafeVarargs
- static List<Flavor> makeFlavors(List<Integer>... replaces) {
- FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
- for (int i = 0; i < replaces.length; i++) {
- FlavorsConfig.Flavor.Builder builder = flavorConfigBuilder
- .addFlavor("flavor-" + i, 1. /* cpu*/, 3. /* mem GB*/, 2. /*disk GB*/, Flavor.Type.BARE_METAL);
-
- for (Integer replacesId : replaces[i]) {
- flavorConfigBuilder.addReplaces("flavor-" + replacesId, builder);
- }
- }
- return new NodeFlavors(flavorConfigBuilder.build())
- .getFlavors().stream()
- .sorted(Comparator.comparing(Flavor::name))
- .collect(Collectors.toList());
- }
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
index c6f91b6a0b9..8e1bf4380cc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
@@ -25,7 +25,6 @@ import org.junit.runners.Parameterized;
import java.util.List;
import java.util.Optional;
-import java.util.Set;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
@@ -225,7 +224,7 @@ public class InfraDeployerImplTest {
private Node addNode(int id, Node.State state, Optional<Version> wantedVespaVersion) {
Node node = tester.addNode("id-" + id, "node-" + id, "default", nodeType);
Optional<Node> nodeWithAllocation = wantedVespaVersion.map(version -> {
- ClusterSpec clusterSpec = ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id("clusterid"), ClusterSpec.Group.from(0), version, false, Set.of());
+ ClusterSpec clusterSpec = ClusterSpec.from(ClusterSpec.Type.admin, new ClusterSpec.Id("clusterid"), ClusterSpec.Group.from(0), version, false);
ClusterMembership membership = ClusterMembership.from(clusterSpec, 1);
Allocation allocation = new Allocation(application.getApplicationId(), membership, new Generation(0, 0), false);
return node.with(allocation);
@@ -233,4 +232,4 @@ public class InfraDeployerImplTest {
return nodeRepository.database().writeTo(state, nodeWithAllocation.orElse(node), Agent.system, Optional.empty());
}
-} \ No newline at end of file
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index f97460713a5..77273f98f76 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -4,29 +4,34 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.google.common.collect.Iterators;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.HostSpec;
-import com.yahoo.config.provision.RotationName;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.node.IP;
import org.junit.Test;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
@@ -37,30 +42,35 @@ public class LoadBalancerProvisionerTest {
private final ApplicationId app1 = ApplicationId.from("tenant1", "application1", "default");
private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default");
+ private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default");
+
private ProvisioningTester tester = new ProvisioningTester.Builder().build();
@Test
public void provision_load_balancer() {
+ Supplier<List<LoadBalancer>> lbApp1 = () -> tester.nodeRepository().loadBalancers().owner(app1).asList();
+ Supplier<List<LoadBalancer>> lbApp2 = () -> tester.nodeRepository().loadBalancers().owner(app2).asList();
ClusterSpec.Id containerCluster1 = ClusterSpec.Id.from("qrs1");
ClusterSpec.Id contentCluster = ClusterSpec.Id.from("content");
- Set<RotationName> rotationsCluster1 = Set.of(RotationName.from("r1-1"), RotationName.from("r1-2"));
- tester.activate(app1, prepare(app1,
- clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1),
- clusterRequest(ClusterSpec.Type.content, contentCluster)));
- tester.activate(app2, prepare(app2,
- clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
// Provision a load balancer for each application
- Supplier<List<LoadBalancer>> loadBalancers = () -> tester.nodeRepository().loadBalancers().owner(app1).asList();
- assertEquals(1, loadBalancers.get().size());
-
- assertEquals(app1, loadBalancers.get().get(0).id().application());
- assertEquals(containerCluster1, loadBalancers.get().get(0).id().cluster());
- assertEquals(Collections.singleton(4443), loadBalancers.get().get(0).instance().ports());
- assertEquals("127.0.0.1", get(loadBalancers.get().get(0).instance().reals(), 0).ipAddress());
- assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 0).port());
- assertEquals("127.0.0.2", get(loadBalancers.get().get(0).instance().reals(), 1).ipAddress());
- assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 1).port());
+ var nodes = prepare(app1,
+ clusterRequest(ClusterSpec.Type.container, containerCluster1),
+ clusterRequest(ClusterSpec.Type.content, contentCluster));
+ assertEquals(1, lbApp1.get().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbApp1.get().get(0).instance().reals().size());
+ tester.activate(app1, nodes);
+ tester.activate(app2, prepare(app2, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
+ assertEquals(1, lbApp2.get().size());
+
+ // Reals are configured after activation
+ assertEquals(app1, lbApp1.get().get(0).id().application());
+ assertEquals(containerCluster1, lbApp1.get().get(0).id().cluster());
+ assertEquals(Collections.singleton(4443), lbApp1.get().get(0).instance().ports());
+ assertEquals("127.0.0.1", get(lbApp1.get().get(0).instance().reals(), 0).ipAddress());
+ assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 0).port());
+ assertEquals("127.0.0.2", get(lbApp1.get().get(0).instance().reals(), 1).ipAddress());
+ assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 1).port());
// A container is failed
Supplier<List<Node>> containers = () -> tester.getNodes(app1).type(ClusterSpec.Type.container).asList();
@@ -79,17 +89,17 @@ public class LoadBalancerProvisionerTest {
.noneMatch(hostname -> hostname.equals(toFail.hostname())));
assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().reals(), 0).hostname().value());
assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().reals(), 1).hostname().value());
+ assertSame("State is unchanged", LoadBalancer.State.active, loadBalancer.state());
// Add another container cluster
- Set<RotationName> rotationsCluster2 = Set.of(RotationName.from("r2-1"), RotationName.from("r2-2"));
ClusterSpec.Id containerCluster2 = ClusterSpec.Id.from("qrs2");
tester.activate(app1, prepare(app1,
- clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1),
- clusterRequest(ClusterSpec.Type.container, containerCluster2, rotationsCluster2),
+ clusterRequest(ClusterSpec.Type.container, containerCluster1),
+ clusterRequest(ClusterSpec.Type.container, containerCluster2),
clusterRequest(ClusterSpec.Type.content, contentCluster)));
// Load balancer is provisioned for second container cluster
- assertEquals(2, loadBalancers.get().size());
+ assertEquals(2, lbApp1.get().size());
List<HostName> activeContainers = tester.getNodes(app1, Node.State.active)
.type(ClusterSpec.Type.container).asList()
.stream()
@@ -97,7 +107,7 @@ public class LoadBalancerProvisionerTest {
.map(HostName::from)
.sorted()
.collect(Collectors.toList());
- List<HostName> reals = loadBalancers.get().stream()
+ List<HostName> reals = lbApp1.get().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::reals)
.flatMap(Collection::stream)
@@ -106,43 +116,120 @@ public class LoadBalancerProvisionerTest {
.collect(Collectors.toList());
assertEquals(activeContainers, reals);
- // Application is removed and load balancer is deactivated
+ // Application is removed, nodes and load balancer are deactivated
NestedTransaction removeTransaction = new NestedTransaction();
tester.provisioner().remove(removeTransaction, app1);
removeTransaction.commit();
-
- assertEquals(2, loadBalancers.get().size());
- assertTrue("Deactivated load balancers", loadBalancers.get().stream().allMatch(LoadBalancer::inactive));
+ dirtyNodesOf(app1);
+ assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().getNodes(app1, Node.State.reserved, Node.State.active).isEmpty());
+ assertEquals(2, lbApp1.get().size());
+ assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive));
+ assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active));
// Application is redeployed with one cluster and load balancer is re-activated
tester.activate(app1, prepare(app1,
clusterRequest(ClusterSpec.Type.container, containerCluster1),
clusterRequest(ClusterSpec.Type.content, contentCluster)));
- assertFalse("Re-activated load balancer for " + containerCluster1,
- loadBalancers.get().stream()
+ assertSame("Re-activated load balancer for " + containerCluster1, LoadBalancer.State.active,
+ lbApp1.get().stream()
.filter(lb -> lb.id().cluster().equals(containerCluster1))
+ .map(LoadBalancer::state)
.findFirst()
- .orElseThrow()
- .inactive());
+ .orElseThrow());
}
- private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) {
- return clusterRequest(type, id, Collections.emptySet());
+ @Test
+ public void provision_load_balancers_with_dynamic_node_provisioning() {
+ var nodes = prepare(app1, Capacity.fromCount(2, new NodeResources(1, 1, 1), false, true),
+ true,
+ clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")));
+ Supplier<LoadBalancer> lb = () -> tester.nodeRepository().loadBalancers().owner(app1).asList().get(0);
+ assertTrue("Load balancer provisioned with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
+ assignIps(tester.nodeRepository().getNodes(app1));
+ tester.activate(app1, nodes);
+ assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
+
+ // Application is removed, nodes are deleted and load balancer is deactivated
+ NestedTransaction removeTransaction = new NestedTransaction();
+ tester.provisioner().remove(removeTransaction, app1);
+ removeTransaction.commit();
+ tester.nodeRepository().database().removeNodes(tester.nodeRepository().getNodes());
+ assertTrue("Nodes are deleted", tester.nodeRepository().getNodes().isEmpty());
+ assertSame("Load balancer is deactivated", LoadBalancer.State.inactive, lb.get().state());
+
+ // Application is redeployed
+ nodes = prepare(app1, Capacity.fromCount(2, new NodeResources(1, 1, 1), false, true),
+ true,
+ clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")));
+ assertTrue("Load balancer is reconfigured with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
+ assignIps(tester.nodeRepository().getNodes(app1));
+ tester.activate(app1, nodes);
+ assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
}
- private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id, Set<RotationName> rotations) {
- return ClusterSpec.request(type, id, Version.fromString("6.42"), false, rotations);
+ @Test
+ public void does_not_provision_load_balancers_for_non_tenant_node_type() {
+ tester.activate(infraApp1, prepare(infraApp1, Capacity.fromRequiredNodeType(NodeType.host),
+ false,
+ clusterRequest(ClusterSpec.Type.container,
+ ClusterSpec.Id.from("tenant-host"))));
+ assertTrue("No load balancer provisioned", tester.loadBalancerService().instances().isEmpty());
+ assertEquals(List.of(), tester.nodeRepository().loadBalancers().owner(infraApp1).asList());
+ }
+
+ @Test
+ public void does_not_provision_load_balancers_for_non_container_cluster() {
+ tester.activate(app1, prepare(app1, clusterRequest(ClusterSpec.Type.content,
+ ClusterSpec.Id.from("tenant-host"))));
+ assertTrue("No load balancer provisioned", tester.loadBalancerService().instances().isEmpty());
+ assertEquals(List.of(), tester.nodeRepository().loadBalancers().owner(app1).asList());
+ }
+
+ private void dirtyNodesOf(ApplicationId application) {
+ tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, this.getClass().getSimpleName());
}
private Set<HostSpec> prepare(ApplicationId application, ClusterSpec... specs) {
- tester.makeReadyNodes(specs.length * 2, "d-1-1-1");
+ return prepare(application, Capacity.fromCount(2, new NodeResources(1, 1, 1), false, true), false, specs);
+ }
+
+ private Set<HostSpec> prepare(ApplicationId application, Capacity capacity, boolean dynamicDockerNodes, ClusterSpec... specs) {
+ if (dynamicDockerNodes) {
+ makeDynamicDockerNodes(specs.length * 2, capacity.type());
+ } else {
+ tester.makeReadyNodes(specs.length * 2, "d-1-1-1", capacity.type());
+ }
Set<HostSpec> allNodes = new LinkedHashSet<>();
for (ClusterSpec spec : specs) {
- allNodes.addAll(tester.prepare(application, spec, 2, 1, new NodeResources(1, 1, 1)));
+ allNodes.addAll(tester.prepare(application, spec, capacity, 1, false));
}
return allNodes;
}
+ private void makeDynamicDockerNodes(int n, NodeType nodeType) {
+ List<Node> nodes = new ArrayList<>(n);
+ for (int i = 1; i <= n; i++) {
+ var node = Node.createDockerNode(Set.of(), Set.of(), "node" + i, Optional.empty(),
+ NodeResources.fromLegacyName("d-1-1-1"), nodeType);
+ nodes.add(node);
+ }
+ nodes = tester.nodeRepository().database().addNodesInState(nodes, Node.State.reserved);
+ nodes = tester.nodeRepository().setDirty(nodes, Agent.system, getClass().getSimpleName());
+ tester.nodeRepository().setReady(nodes, Agent.system, getClass().getSimpleName());
+ }
+
+ private void assignIps(List<Node> nodes) {
+ try (var lock = tester.nodeRepository().lockAllocation()) {
+ for (int i = 0; i < nodes.size(); i++) {
+ tester.nodeRepository().write(nodes.get(i).with(IP.Config.EMPTY.with(Set.of("127.0.0." + i))), lock);
+ }
+ }
+ }
+
+ private static ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) {
+ return ClusterSpec.request(type, id, Version.fromString("6.42"), false);
+ }
+
private static <T> T get(Set<T> set, int position) {
return Iterators.get(set.iterator(), position, null);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
index 14720d4215c..a5d5fb81147 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
@@ -201,7 +201,7 @@ public class MultigroupProvisioningTest {
assertEquals("No additional groups are retained containing retired nodes", wantedGroups, allGroups.size());
}
- private ClusterSpec cluster() { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet()); }
+ private ClusterSpec cluster() { return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false); }
private Set<HostSpec> prepare(ApplicationId application, Capacity capacity, int groupCount, ProvisioningTester tester) {
return new HashSet<>(tester.prepare(application, cluster(), capacity, groupCount));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizerTest.java
index b2b80c6f4e4..b65c4d8b4f6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizerTest.java
@@ -50,7 +50,7 @@ public class NodePrioritizerTest {
Assert.assertTrue(NodePrioritizer.isPreferredNodeToBeRelocated(nodes, c, parent));
// Unallocated over allocated
- ClusterSpec spec = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("mycluster"), ClusterSpec.Group.from(0), Version.fromString("6.142.22"), false, Collections.emptySet());
+ ClusterSpec spec = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("mycluster"), ClusterSpec.Group.from(0), Version.fromString("6.142.22"), false);
c = c.allocate(ApplicationId.defaultId(), ClusterMembership.from(spec, 0), Instant.now());
nodes.remove(c);
nodes.add(c);
@@ -60,7 +60,7 @@ public class NodePrioritizerTest {
Assert.assertFalse(NodePrioritizer.isPreferredNodeToBeRelocated(nodes, c, parent));
// Container over content
- ClusterSpec spec2 = ClusterSpec.from(ClusterSpec.Type.container, ClusterSpec.Id.from("mycluster"), ClusterSpec.Group.from(0), Version.fromString("6.142.22"), false, Collections.emptySet());
+ ClusterSpec spec2 = ClusterSpec.from(ClusterSpec.Type.container, ClusterSpec.Id.from("mycluster"), ClusterSpec.Group.from(0), Version.fromString("6.142.22"), false);
d = d.allocate(ApplicationId.defaultId(), ClusterMembership.from(spec2, 0), Instant.now());
nodes.remove(d);
nodes.add(d);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
index 43b2657342f..cfad4757446 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
@@ -36,7 +36,7 @@ public class NodeTypeProvisioningTest {
private final ApplicationId application = tester.makeApplicationId(); // application using proxy nodes
private final Capacity capacity = Capacity.fromRequiredNodeType(NodeType.proxy);
private final ClusterSpec clusterSpec = ClusterSpec.request(
- ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false);
@Before
public void setup() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 0d1f334c66d..e87a41455c1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
-import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
@@ -14,7 +13,6 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.InstanceName;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.config.provision.RegionName;
@@ -497,7 +495,7 @@ public class ProvisioningTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content,
ClusterSpec.Id.from("music"),
new com.yahoo.component.Version(4, 5, 6),
- false, Collections.emptySet());
+ false);
tester.prepare(application, cluster, Capacity.fromNodeCount(5, Optional.empty(), false, false), 1);
// No exception; Success
}
@@ -792,8 +790,8 @@ public class ProvisioningTest {
tester.makeReadyNodes(6, "large-variant-variant"); //cost = 11
ApplicationId applicationId = tester.makeApplicationId();
- ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false, Collections.emptySet());
- ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.42"), false, Collections.emptySet());
+ ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false);
+ ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.42"), false);
List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1,
NodeResources.fromLegacyName("large"));
@@ -830,10 +828,10 @@ public class ProvisioningTest {
int content1Size, boolean required, NodeResources flavor, Version wantedVersion,
ProvisioningTester tester) {
// "deploy prepare" with a two container clusters and a storage cluster having of two groups
- ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0"), wantedVersion, false, Collections.emptySet());
- ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1"), wantedVersion, false, Collections.emptySet());
- ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0"), wantedVersion, false, Collections.emptySet());
- ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1"), wantedVersion, false, Collections.emptySet());
+ ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0"), wantedVersion, false);
+ ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1"), wantedVersion, false);
+ ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0"), wantedVersion, false);
+ ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1"), wantedVersion, false);
Set<HostSpec> container0 = prepare(application, containerCluster0, container0Size, 1, required, flavor, tester);
Set<HostSpec> container1 = prepare(application, containerCluster1, container1Size, 1, required, flavor, tester);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index c8051c3bdee..294c153f86f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -139,16 +139,21 @@ public class ProvisioningTester {
}
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups) {
+ return prepare(application, cluster, capacity, groups, true);
+ }
+
+ public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups, boolean idempotentPrepare) {
Set<String> reservedBefore = toHostNames(nodeRepository.getNodes(application, Node.State.reserved));
Set<String> inactiveBefore = toHostNames(nodeRepository.getNodes(application, Node.State.inactive));
- // prepare twice to ensure idempotence
List<HostSpec> hosts1 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
- List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
- assertEquals(hosts1, hosts2);
+ if (idempotentPrepare) { // prepare twice to ensure idempotence
+ List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
+ assertEquals(hosts1, hosts2);
+ }
Set<String> newlyActivated = toHostNames(nodeRepository.getNodes(application, Node.State.reserved));
newlyActivated.removeAll(reservedBefore);
newlyActivated.removeAll(inactiveBefore);
- return hosts2;
+ return hosts1;
}
public void activate(ApplicationId application, Collection<HostSpec> hosts) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 1f03db8f7c2..57a9e3c0047 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -15,7 +15,6 @@ import com.yahoo.vespa.hosted.provision.Node;
import org.junit.Test;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
@@ -36,8 +35,8 @@ import static org.junit.Assert.assertNotNull;
public class VirtualNodeProvisioningTest {
private static final NodeResources flavor = new NodeResources(4, 8, 100);
- private static final ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false, Collections.emptySet());
- private static final ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.42"), false, Collections.emptySet());
+ private static final ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.42"), false);
+ private static final ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Version.fromString("6.42"), false);
private ProvisioningTester tester = new ProvisioningTester.Builder().build();
private ApplicationId applicationId = tester.makeApplicationId();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
index 6524292f48c..bfb24d30284 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
@@ -831,7 +831,7 @@ public class RestApiTest {
@Test
public void test_load_balancers() throws Exception {
assertFile(new Request("http://localhost:8080/loadbalancers/v1/"), "load-balancers.json");
- assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers.json");
+ assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers-single.json");
assertResponse(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant.nonexistent.default"), "{\"loadBalancers\":[]}");
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json
new file mode 100644
index 00000000000..67d2c3bfa4b
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json
@@ -0,0 +1,36 @@
+{
+ "loadBalancers": [
+ {
+ "id": "tenant4:application4:instance4:id4",
+ "state": "active",
+ "changedAt": 123,
+ "application": "application4",
+ "tenant": "tenant4",
+ "instance": "instance4",
+ "cluster": "id4",
+ "hostname": "lb-tenant4.application4.instance4-id4",
+ "dnsZone": "zone-id-1",
+ "networks": [
+ "10.2.3.0/24",
+ "10.4.5.0/24"
+ ],
+ "ports": [
+ 4443
+ ],
+ "reals": [
+ {
+ "hostname": "host13.yahoo.com",
+ "ipAddress": "127.0.13.1",
+ "port": 4080
+ },
+ {
+ "hostname": "host14.yahoo.com",
+ "ipAddress": "127.0.14.1",
+ "port": 4080
+ }
+ ],
+ "rotations": [],
+ "inactive": false
+ }
+ ]
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
index d2c4d0ac857..c9a45a9c3da 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
@@ -1,7 +1,41 @@
{
"loadBalancers": [
{
+ "id": "tenant1:application1:instance1:id1",
+ "state": "reserved",
+ "changedAt": 123,
+ "application": "application1",
+ "tenant": "tenant1",
+ "instance": "instance1",
+ "cluster": "id1",
+ "hostname": "lb-tenant1.application1.instance1-id1",
+ "dnsZone": "zone-id-1",
+ "networks": [
+ "10.2.3.0/24",
+ "10.4.5.0/24"
+ ],
+ "ports": [
+ 4443
+ ],
+ "reals": [
+ {
+ "hostname": "host1.yahoo.com",
+ "ipAddress": "127.0.1.1",
+ "port": 4080
+ },
+ {
+ "hostname": "host10.yahoo.com",
+ "ipAddress": "127.0.10.1",
+ "port": 4080
+ }
+ ],
+ "rotations": [],
+ "inactive": false
+ },
+ {
"id": "tenant4:application4:instance4:id4",
+ "state": "active",
+ "changedAt": 123,
"application": "application4",
"tenant": "tenant4",
"instance": "instance4",
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/maintenance.json
index 1432d2f4ea5..b72523963c0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/maintenance.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/maintenance.json
@@ -25,9 +25,6 @@
"name": "NodeRebooter"
},
{
- "name": "NodeRetirer"
- },
- {
"name": "OperatorChangeApplicationMaintainer"
},
{
diff --git a/parent/pom.xml b/parent/pom.xml
index 1855553bc20..e2012214d89 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -515,6 +515,16 @@
<version>${curator.version}</version>
</dependency>
<dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <version>${junit.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
+ <version>${junit.version}</version>
+ </dependency>
+ <dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
@@ -765,7 +775,8 @@
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<test.hide>true</test.hide>
<doclint>all</doclint>
- <surefire.version>2.21.0</surefire.version>
+ <surefire.version>2.22.0</surefire.version>
+ <junit.version>5.4.2</junit.version>
<protobuf.version>3.7.0</protobuf.version>
</properties>
diff --git a/searchcommon/src/tests/schema/load-save-cfg/indexschema.cfg b/searchcommon/src/tests/schema/load-save-cfg/indexschema.cfg
index b6c547c52c9..b9d82b9b569 100644
--- a/searchcommon/src/tests/schema/load-save-cfg/indexschema.cfg
+++ b/searchcommon/src/tests/schema/load-save-cfg/indexschema.cfg
@@ -5,7 +5,7 @@ indexfield[1].name b
indexfield[1].datatype INT64
indexfield[2].name c
indexfield[2].datatype STRING
-indexfield[2].experimentalpostinglistformat true
+indexfield[2].interleavedfeatures true
fieldset[1]
fieldset[0].name default
fieldset[0].field[2]
diff --git a/searchcommon/src/tests/schema/schema_test.cpp b/searchcommon/src/tests/schema/schema_test.cpp
index e360ee1ba7a..3d35b11a51a 100644
--- a/searchcommon/src/tests/schema/schema_test.cpp
+++ b/searchcommon/src/tests/schema/schema_test.cpp
@@ -32,7 +32,7 @@ assertIndexField(const Schema::IndexField& exp,
{
assertField(exp, act);
EXPECT_EQ(exp.getAvgElemLen(), act.getAvgElemLen());
- EXPECT_EQ(exp.use_experimental_posting_list_format(), act.use_experimental_posting_list_format());
+ EXPECT_EQ(exp.use_interleaved_features(), act.use_interleaved_features());
}
void
@@ -183,7 +183,7 @@ TEST(SchemaTest, test_load_and_save)
EXPECT_EQ(3u, s.getNumIndexFields());
assertIndexField(SIF("a", SDT::STRING), s.getIndexField(0));
assertIndexField(SIF("b", SDT::INT64), s.getIndexField(1));
- assertIndexField(SIF("c", SDT::STRING).set_experimental_posting_list_format(true), s.getIndexField(2));
+ assertIndexField(SIF("c", SDT::STRING).set_interleaved_features(true), s.getIndexField(2));
EXPECT_EQ(9u, s.getNumAttributeFields());
assertField(SAF("a", SDT::STRING, SCT::SINGLE),
@@ -448,7 +448,7 @@ TEST(SchemaTest, require_that_index_field_is_loaded_with_default_values_when_pro
ASSERT_EQ(1, index_fields.size());
assertIndexField(SIF("foo", DataType::STRING, CollectionType::SINGLE).
setAvgElemLen(512).
- set_experimental_posting_list_format(false),
+ set_interleaved_features(false),
index_fields[0]);
assertIndexField(SIF("foo", DataType::STRING, CollectionType::SINGLE), index_fields[0]);
}
diff --git a/searchcommon/src/vespa/searchcommon/common/schema.cpp b/searchcommon/src/vespa/searchcommon/common/schema.cpp
index 6d3bae31508..afc023a68d7 100644
--- a/searchcommon/src/vespa/searchcommon/common/schema.cpp
+++ b/searchcommon/src/vespa/searchcommon/common/schema.cpp
@@ -132,7 +132,7 @@ Schema::Field::operator!=(const Field &rhs) const
Schema::IndexField::IndexField(vespalib::stringref name, DataType dt)
: Field(name, dt),
_avgElemLen(512),
- _experimental_posting_list_format(false)
+ _interleaved_features(false)
{
}
@@ -140,14 +140,14 @@ Schema::IndexField::IndexField(vespalib::stringref name, DataType dt,
CollectionType ct)
: Field(name, dt, ct),
_avgElemLen(512),
- _experimental_posting_list_format(false)
+ _interleaved_features(false)
{
}
Schema::IndexField::IndexField(const std::vector<vespalib::string> &lines)
: Field(lines),
_avgElemLen(ConfigParser::parse<int32_t>("averageelementlen", lines, 512)),
- _experimental_posting_list_format(ConfigParser::parse<bool>("experimentalpostinglistformat", lines, false))
+ _interleaved_features(ConfigParser::parse<bool>("interleavedfeatures", lines, false))
{
}
@@ -156,7 +156,7 @@ Schema::IndexField::write(vespalib::asciistream & os, vespalib::stringref prefix
{
Field::write(os, prefix);
os << prefix << "averageelementlen " << static_cast<int32_t>(_avgElemLen) << "\n";
- os << prefix << "experimentalpostinglistformat " << (_experimental_posting_list_format ? "true" : "false") << "\n";
+ os << prefix << "interleavedfeatures " << (_interleaved_features ? "true" : "false") << "\n";
// TODO: Remove prefix, phrases and positions when breaking downgrade is no longer an issue.
os << prefix << "prefix false" << "\n";
@@ -169,7 +169,7 @@ Schema::IndexField::operator==(const IndexField &rhs) const
{
return Field::operator==(rhs) &&
_avgElemLen == rhs._avgElemLen &&
- _experimental_posting_list_format == rhs._experimental_posting_list_format;
+ _interleaved_features == rhs._interleaved_features;
}
bool
@@ -177,7 +177,7 @@ Schema::IndexField::operator!=(const IndexField &rhs) const
{
return Field::operator!=(rhs) ||
_avgElemLen != rhs._avgElemLen ||
- _experimental_posting_list_format != rhs._experimental_posting_list_format;
+ _interleaved_features != rhs._interleaved_features;
}
Schema::FieldSet::FieldSet(const std::vector<vespalib::string> & lines) :
diff --git a/searchcommon/src/vespa/searchcommon/common/schema.h b/searchcommon/src/vespa/searchcommon/common/schema.h
index bb2163e5577..0b675710e8b 100644
--- a/searchcommon/src/vespa/searchcommon/common/schema.h
+++ b/searchcommon/src/vespa/searchcommon/common/schema.h
@@ -77,8 +77,8 @@ public:
class IndexField : public Field {
private:
uint32_t _avgElemLen;
- // TODO: Remove when experimental posting list format is made default
- bool _experimental_posting_list_format;
+ // TODO: Remove when posting list format with interleaved features is made default
+ bool _interleaved_features;
public:
IndexField(vespalib::stringref name, DataType dt);
@@ -89,8 +89,8 @@ public:
IndexField(const std::vector<vespalib::string> &lines);
IndexField &setAvgElemLen(uint32_t avgElemLen) { _avgElemLen = avgElemLen; return *this; }
- IndexField &set_experimental_posting_list_format(bool value) {
- _experimental_posting_list_format = value;
+ IndexField &set_interleaved_features(bool value) {
+ _interleaved_features = value;
return *this;
}
@@ -98,7 +98,7 @@ public:
vespalib::stringref prefix) const override;
uint32_t getAvgElemLen() const { return _avgElemLen; }
- bool use_experimental_posting_list_format() const { return _experimental_posting_list_format; }
+ bool use_interleaved_features() const { return _interleaved_features; }
bool operator==(const IndexField &rhs) const;
bool operator!=(const IndexField &rhs) const;
diff --git a/searchcommon/src/vespa/searchcommon/common/schemaconfigurer.cpp b/searchcommon/src/vespa/searchcommon/common/schemaconfigurer.cpp
index 59ed15eefb0..a46f99d158d 100644
--- a/searchcommon/src/vespa/searchcommon/common/schemaconfigurer.cpp
+++ b/searchcommon/src/vespa/searchcommon/common/schemaconfigurer.cpp
@@ -145,7 +145,7 @@ SchemaBuilder::build(const IndexschemaConfig &cfg, Schema &schema)
schema.addIndexField(Schema::IndexField(f.name, convertIndexDataType(f.datatype),
convertIndexCollectionType(f.collectiontype)).
setAvgElemLen(f.averageelementlen).
- set_experimental_posting_list_format(f.experimentalpostinglistformat));
+ set_interleaved_features(f.interleavedfeatures));
}
for (size_t i = 0; i < cfg.fieldset.size(); ++i) {
const IndexschemaConfig::Fieldset &fs = cfg.fieldset[i];
diff --git a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
index f02bc99a645..9ddf1ff8ac9 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
@@ -226,6 +226,8 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
total_matching_time.start();
MatchingStats my_stats;
SearchReply::UP reply = std::make_unique<SearchReply>();
+ size_t covered = 0;
+ uint32_t numActiveLids = 0;
{ // we want to measure full set-up and tear-down time as part of
// collateral time
GroupingContext groupingContext(_clock, request.getTimeOfDoom(),
@@ -289,7 +291,7 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
}
reply = std::move(result->_reply);
- uint32_t numActiveLids = metaStore.getNumActiveLids();
+ numActiveLids = metaStore.getNumActiveLids();
// note: this is actually totalSpace+1, since 0 is reserved
uint32_t totalSpace = metaStore.getCommittedDocIdLimit();
LOG(debug, "docid limit = %d", totalSpace);
@@ -302,7 +304,7 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
// account for docid 0 reserved
spaceEstimate += 1;
}
- size_t covered = (spaceEstimate * numActiveLids) / totalSpace;
+ covered = (spaceEstimate * numActiveLids) / totalSpace;
LOG(debug, "covered = %zd", covered);
SearchReply::Coverage & coverage = reply->coverage;
@@ -336,8 +338,9 @@ Matcher::match(const SearchRequest &request, vespalib::ThreadBundle &threadBundl
adjustedDuration = 0;
}
_stats.updatesoftDoomFactor(request.getTimeout(), softLimit, adjustedDuration);
- LOG(info, "Triggered softtimeout factor adjustment. request=%1.3f, doomOvertime=%1.3f, limit=%1.3f and duration=%1.3f, rankprofile=%s"
+ LOG(info, "Triggered softtimeout factor adjustment. Coverage = %lu of %u documents. request=%1.3f, doomOvertime=%1.3f, limit=%1.3f and duration=%1.3f, rankprofile=%s"
", factor adjusted from %1.3f to %1.3f",
+ covered, numActiveLids,
request.getTimeout().sec(), my_stats.doomOvertime().sec(), softLimit.sec(), duration.sec(),
request.ranking.c_str(), old, _stats.softDoomFactor());
}
diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
index a174592eb55..083fe70969c 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
+++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
@@ -11,6 +11,7 @@
#include <vespa/fastos/file.h>
#include <vespa/searchcorespi/flush/closureflushtask.h>
#include <vespa/searchlib/common/serialnumfileheadercontext.h>
+#include <vespa/searchlib/index/schemautil.h>
#include <vespa/searchlib/util/dirtraverse.h>
#include <vespa/searchlib/util/filekit.h>
#include <vespa/vespalib/io/fileutil.h>
@@ -28,6 +29,7 @@ using document::Document;
using search::FixedSourceSelector;
using search::TuneFileAttributes;
using search::index::Schema;
+using search::index::SchemaUtil;
using search::common::FileHeaderContext;
using search::queryeval::ISourceSelector;
using search::queryeval::Source;
@@ -466,7 +468,9 @@ IndexMaintainer::doneInitFlush(FlushArgs *args, IMemoryIndex::SP *new_index)
{
LockGuard lock(_index_update_lock);
if (!_current_index->hasReceivedDocumentInsert() &&
- _source_selector_changes == 0) {
+ _source_selector_changes == 0 &&
+ !_flush_empty_current_index)
+ {
args->_skippedEmptyLast = true; // Skip flush of empty memory index
}
@@ -480,6 +484,7 @@ IndexMaintainer::doneInitFlush(FlushArgs *args, IMemoryIndex::SP *new_index)
_source_selector_changes = 0;
}
_current_index = *new_index;
+ _flush_empty_current_index = false;
}
if (args->_skippedEmptyLast) {
replaceSource(_current_index_id, _current_index);
@@ -723,6 +728,23 @@ IndexMaintainer::warmupDone(ISearchableIndexCollection::SP current)
}
}
+namespace {
+
+bool
+has_matching_interleaved_features(const Schema& old_schema, const Schema& new_schema)
+{
+ for (SchemaUtil::IndexIterator itr(new_schema); itr.isValid(); ++itr) {
+ if (itr.hasMatchingOldFields(old_schema) &&
+ !itr.has_matching_use_interleaved_features(old_schema))
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+}
+
void
IndexMaintainer::doneSetSchema(SetSchemaArgs &args, IMemoryIndex::SP &newIndex)
@@ -758,6 +780,12 @@ IndexMaintainer::doneSetSchema(SetSchemaArgs &args, IMemoryIndex::SP &newIndex)
_frozenMemoryIndexes.emplace_back(args._oldIndex, freezeSerialNum, std::move(saveInfo), oldAbsoluteId);
}
_current_index = newIndex;
+ // Non-matching interleaved features in schemas means that we need to
+ // reconstruct or drop interleaved features in posting lists.
+ // If so, we must flush the new index to disk even if it is empty.
+ // This ensures that 2x triggerFlush will run fusion
+ // to reconstruct or drop interleaved features in the posting lists.
+ _flush_empty_current_index = !has_matching_interleaved_features(args._oldSchema, args._newSchema);
}
if (dropEmptyLast) {
replaceSource(_current_index_id, _current_index);
@@ -822,6 +850,7 @@ IndexMaintainer::IndexMaintainer(const IndexMaintainerConfig &config,
_next_id(),
_current_index_id(),
_current_index(),
+ _flush_empty_current_index(false),
_current_serial_num(0),
_flush_serial_num(0),
_lastFlushTime(),
diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h
index c30825cbb3c..e95613017fa 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h
+++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h
@@ -91,6 +91,7 @@ class IndexMaintainer : public IIndexManager,
uint32_t _next_id; // Protected by SL + IUL
uint32_t _current_index_id; // Protected by SL + IUL
IMemoryIndex::SP _current_index; // Protected by SL + IUL
+ bool _flush_empty_current_index;
SerialNum _current_serial_num;// Protected by IUL
SerialNum _flush_serial_num; // Protected by SL
fastos::TimeStamp _lastFlushTime; // Protected by SL
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index f032bbe9c30..07ac6bb699c 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -106,6 +106,7 @@ vespa_define_module(
src/tests/diskindex/bitvector
src/tests/diskindex/diskindex
src/tests/diskindex/fieldwriter
+ src/tests/diskindex/field_length_scanner
src/tests/diskindex/fusion
src/tests/diskindex/pagedict4
src/tests/docstore/chunk
diff --git a/searchlib/src/tests/diskindex/field_length_scanner/CMakeLists.txt b/searchlib/src/tests/diskindex/field_length_scanner/CMakeLists.txt
new file mode 100644
index 00000000000..985aaa38401
--- /dev/null
+++ b/searchlib/src/tests/diskindex/field_length_scanner/CMakeLists.txt
@@ -0,0 +1,11 @@
+# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+find_package(GTest REQUIRED)
+vespa_add_executable(searchlib_field_length_scanner_test_app TEST
+ SOURCES
+ field_length_scanner_test.cpp
+ DEPENDS
+ searchlib
+ searchlib_test
+ GTest::GTest
+)
+vespa_add_test(NAME searchlib_field_length_scanner_test_app COMMAND searchlib_field_length_scanner_test_app)
diff --git a/searchlib/src/tests/diskindex/field_length_scanner/field_length_scanner_test.cpp b/searchlib/src/tests/diskindex/field_length_scanner/field_length_scanner_test.cpp
new file mode 100644
index 00000000000..1b8a4c9655d
--- /dev/null
+++ b/searchlib/src/tests/diskindex/field_length_scanner/field_length_scanner_test.cpp
@@ -0,0 +1,73 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchlib/diskindex/field_length_scanner.h>
+#include <vespa/searchlib/index/docidandfeatures.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using search::index::DocIdAndFeatures;
+
+
+namespace search::diskindex {
+
+
+class FieldLengthScannerTest : public ::testing::Test
+{
+protected:
+ FieldLengthScanner _scanner;
+ FieldLengthScannerTest()
+ : _scanner(3)
+ {
+ }
+};
+
+TEST_F(FieldLengthScannerTest, require_that_no_scan_gives_empty_length)
+{
+ EXPECT_EQ(0, _scanner.get_field_length(1));
+}
+
+TEST_F(FieldLengthScannerTest, require_that_single_length_is_registered)
+{
+ DocIdAndFeatures features;
+ features.set_doc_id(1);
+ features.elements().emplace_back(0, 1, 5);
+ _scanner.scan_features(features);
+ EXPECT_EQ(5u, _scanner.get_field_length(1));
+}
+
+TEST_F(FieldLengthScannerTest, require_that_duplicate_element_is_ignored)
+{
+ DocIdAndFeatures features;
+ features.set_doc_id(1);
+ features.elements().emplace_back(10, 1, 5);
+ features.elements().emplace_back(100, 1, 23);
+ _scanner.scan_features(features);
+ EXPECT_EQ(28u, _scanner.get_field_length(1));
+ _scanner.scan_features(features); // elements 10 and 100 already scanned
+ EXPECT_EQ(28u, _scanner.get_field_length(1));
+ features.elements()[0].setElementId(11);
+ _scanner.scan_features(features); // element 100 already scanned
+ EXPECT_EQ(33u, _scanner.get_field_length(1));
+ features.elements()[1].setElementId(101);
+ _scanner.scan_features(features); // elements 10 already scanned
+ EXPECT_EQ(56u, _scanner.get_field_length(1));
+}
+
+TEST_F(FieldLengthScannerTest, require_that_documents_are_not_mixed)
+{
+ DocIdAndFeatures features1;
+ DocIdAndFeatures features2;
+ features1.set_doc_id(1);
+ features1.elements().emplace_back(10, 1, 5);
+ features1.elements().emplace_back(100, 1, 23);
+ features2.set_doc_id(2);
+ features2.elements().emplace_back(10, 1, 7);
+ features2.elements().emplace_back(100, 1, 9);
+ _scanner.scan_features(features1);
+ _scanner.scan_features(features2);
+ EXPECT_EQ(28u, _scanner.get_field_length(1));
+ EXPECT_EQ(16u, _scanner.get_field_length(2));
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
index 4779ddcb10d..b77df846ebb 100644
--- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
+++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
@@ -59,8 +59,8 @@ protected:
const Schema & getSchema() const { return _schema; }
void requireThatFusionIsWorking(const vespalib::string &prefix, bool directio, bool readmmap);
- void make_empty_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector);
- void merge_empty_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources);
+ void make_simple_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector);
+ void merge_simple_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources);
public:
FusionTest();
};
@@ -97,6 +97,72 @@ toString(FieldPositionsIterator posItr, bool hasElements = false, bool hasWeight
return ss.str();
}
+std::unique_ptr<Document>
+make_doc10(DocBuilder &b)
+{
+ b.startDocument("doc::10");
+ b.startIndexField("f0").
+ addStr("a").addStr("b").addStr("c").addStr("d").
+ addStr("e").addStr("f").addStr("z").
+ endField();
+ b.startIndexField("f1").
+ addStr("w").addStr("x").
+ addStr("y").addStr("z").
+ endField();
+ b.startIndexField("f2").
+ startElement(4).addStr("ax").addStr("ay").addStr("z").endElement().
+ startElement(5).addStr("ax").endElement().
+ endField();
+ b.startIndexField("f3").
+ startElement(4).addStr("wx").addStr("z").endElement().
+ endField();
+
+ return b.endDocument();
+}
+
+Schema::IndexField
+make_index_field(vespalib::stringref name, CollectionType collection_type, bool interleaved_features)
+{
+ Schema::IndexField index_field(name, DataType::STRING, collection_type);
+ index_field.set_interleaved_features(interleaved_features);
+ return index_field;
+}
+
+Schema
+make_schema(bool interleaved_features)
+{
+ Schema schema;
+ schema.addIndexField(make_index_field("f0", CollectionType::SINGLE, interleaved_features));
+ schema.addIndexField(make_index_field("f1", CollectionType::SINGLE, interleaved_features));
+ schema.addIndexField(make_index_field("f2", CollectionType::ARRAY, interleaved_features));
+ schema.addIndexField(make_index_field("f3", CollectionType::WEIGHTEDSET, interleaved_features));
+ return schema;
+}
+
+void
+assert_interleaved_features(DiskIndex &d, const vespalib::string &field, const vespalib::string &term, uint32_t doc_id, uint32_t exp_num_occs, uint32_t exp_field_length)
+{
+ using LookupResult = DiskIndex::LookupResult;
+ using PostingListHandle = index::PostingListHandle;
+ using SearchIterator = search::queryeval::SearchIterator;
+
+ const Schema &schema = d.getSchema();
+ uint32_t field_id(schema.getIndexFieldId(field));
+ std::unique_ptr<LookupResult> lookup_result(d.lookup(field_id, term));
+ ASSERT_TRUE(lookup_result);
+ std::unique_ptr<PostingListHandle> handle(d.readPostingList(*lookup_result));
+ ASSERT_TRUE(handle);
+ TermFieldMatchData tfmd;
+ TermFieldMatchDataArray tfmda;
+ tfmda.add(&tfmd);
+ std::unique_ptr<SearchIterator> sbap(handle->createIterator(lookup_result->counts, tfmda));
+ sbap->initFullRange();
+ EXPECT_TRUE(sbap->seek(doc_id));
+ sbap->unpack(doc_id);
+ EXPECT_EQ(exp_num_occs, tfmd.getNumOccs());
+ EXPECT_EQ(exp_field_length, tfmd.getFieldLength());
+}
+
void
validateDiskIndex(DiskIndex &dw, bool f2HasElements, bool f3HasWeights)
{
@@ -253,24 +319,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
DocumentInverter inv(schema, invertThreads, pushThreads, fic);
Document::UP doc;
- b.startDocument("doc::10");
- b.startIndexField("f0").
- addStr("a").addStr("b").addStr("c").addStr("d").
- addStr("e").addStr("f").addStr("z").
- endField();
- b.startIndexField("f1").
- addStr("w").addStr("x").
- addStr("y").addStr("z").
- endField();
- b.startIndexField("f2").
- startElement(4).addStr("ax").addStr("ay").addStr("z").endElement().
- startElement(5).addStr("ax").endElement().
- endField();
- b.startIndexField("f3").
- startElement(4).addStr("wx").addStr("z").endElement().
- endField();
-
- doc = b.endDocument();
+ doc = make_doc10(b);
inv.invertDocument(10, *doc);
invertThreads.sync();
myPushDocument(inv);
@@ -400,11 +449,21 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
}
void
-FusionTest::make_empty_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector)
+FusionTest::make_simple_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector)
{
FieldIndexCollection fic(_schema, field_length_inspector);
- uint32_t numDocs = 1;
- uint32_t numWords = 1;
+ uint32_t numDocs = 20;
+ uint32_t numWords = 1000;
+ DocBuilder b(_schema);
+ SequencedTaskExecutor invertThreads(2);
+ SequencedTaskExecutor pushThreads(2);
+ DocumentInverter inv(_schema, invertThreads, pushThreads, fic);
+
+ inv.invertDocument(10, *make_doc10(b));
+ invertThreads.sync();
+ myPushDocument(inv);
+ pushThreads.sync();
+
IndexBuilder ib(_schema);
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
@@ -415,12 +474,12 @@ FusionTest::make_empty_index(const vespalib::string &dump_dir, const IFieldLengt
}
void
-FusionTest::merge_empty_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources)
+FusionTest::merge_simple_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources)
{
vespalib::ThreadStackExecutor executor(4, 0x10000);
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
- SelectorArray selector(1, 0);
+ SelectorArray selector(20, 0);
ASSERT_TRUE(Fusion::merge(_schema, dump_dir, sources, selector,
false,
tuneFileIndexing, fileHeaderContext, executor));
@@ -428,12 +487,8 @@ FusionTest::merge_empty_indexes(const vespalib::string &dump_dir, const std::vec
FusionTest::FusionTest()
: ::testing::Test(),
- _schema()
+ _schema(make_schema(false))
{
- _schema.addIndexField(Schema::IndexField("f0", DataType::STRING));
- _schema.addIndexField(Schema::IndexField("f1", DataType::STRING));
- _schema.addIndexField(Schema::IndexField("f2", DataType::STRING, CollectionType::ARRAY));
- _schema.addIndexField(Schema::IndexField("f3", DataType::STRING, CollectionType::WEIGHTEDSET));
}
TEST_F(FusionTest, require_that_normal_fusion_is_working)
@@ -470,15 +525,31 @@ void clean_field_length_testdirs()
TEST_F(FusionTest, require_that_average_field_length_is_preserved)
{
clean_field_length_testdirs();
- make_empty_index("fldump2", MockFieldLengthInspector());
- make_empty_index("fldump3", MyMockFieldLengthInspector());
- merge_empty_indexes("fldump4", {"fldump2", "fldump3"});
+ make_simple_index("fldump2", MockFieldLengthInspector());
+ make_simple_index("fldump3", MyMockFieldLengthInspector());
+ merge_simple_indexes("fldump4", {"fldump2", "fldump3"});
DiskIndex disk_index("fldump4");
ASSERT_TRUE(disk_index.setup(TuneFileSearch()));
EXPECT_EQ(3.5, disk_index.get_field_length_info("f0").get_average_field_length());
clean_field_length_testdirs();
}
+TEST_F(FusionTest, require_that_interleaved_features_can_be_reconstructed)
+{
+ clean_field_length_testdirs();
+ make_simple_index("fldump2", MockFieldLengthInspector());
+ _schema = make_schema(true); // want interleaved features
+ merge_simple_indexes("fldump4", {"fldump2"});
+ DiskIndex disk_index("fldump4");
+ ASSERT_TRUE(disk_index.setup(TuneFileSearch()));
+ assert_interleaved_features(disk_index, "f0", "a", 10, 1, 7);
+ assert_interleaved_features(disk_index, "f1", "w", 10, 1, 4);
+ assert_interleaved_features(disk_index, "f2", "ax", 10, 2, 4);
+ assert_interleaved_features(disk_index, "f2", "ay", 10, 1, 4);
+ assert_interleaved_features(disk_index, "f3", "wx", 10, 1, 2);
+ clean_field_length_testdirs();
+}
+
}
}
diff --git a/searchlib/src/tests/memoryindex/compact_words_store/CMakeLists.txt b/searchlib/src/tests/memoryindex/compact_words_store/CMakeLists.txt
index ee31ef7c7aa..754ff796690 100644
--- a/searchlib/src/tests/memoryindex/compact_words_store/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/compact_words_store/CMakeLists.txt
@@ -4,5 +4,6 @@ vespa_add_executable(searchlib_compact_words_store_test_app TEST
compact_words_store_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_compact_words_store_test_app COMMAND searchlib_compact_words_store_test_app)
diff --git a/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp b/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp
index bda29115db6..52c85a70160 100644
--- a/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/compact_words_store/compact_words_store_test.cpp
@@ -1,10 +1,10 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/memoryindex/compact_words_store.h>
#include <vespa/vespalib/datastore/entryref.h>
-#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/string.h>
#include <iostream>
#include <map>
@@ -13,9 +13,9 @@ using namespace search::datastore;
using namespace search::memoryindex;
using vespalib::MemoryUsage;
-typedef CompactWordsStore::Builder Builder;
-typedef CompactWordsStore::Iterator Iterator;
-typedef Builder::WordRefVector WordRefVector;
+using Builder = CompactWordsStore::Builder;
+using Iterator = CompactWordsStore::Iterator;
+using WordRefVector = Builder::WordRefVector;
const EntryRef w1(1);
const EntryRef w2(2);
@@ -52,54 +52,52 @@ toStr(Iterator itr)
return oss.str();
}
-struct SingleFixture
-{
- CompactWordsStore _store;
- SingleFixture() : _store() {
- _store.insert(Builder(d1).insert(w1).insert(w2).insert(w3));
+struct SingleDocumentTest : public ::testing::Test {
+ CompactWordsStore store;
+ SingleDocumentTest() : store() {
+ store.insert(Builder(d1).insert(w1).insert(w2).insert(w3));
}
};
-struct MultiFixture
-{
- CompactWordsStore _store;
- MultiFixture() : _store() {
- _store.insert(Builder(d1).insert(w1));
- _store.insert(Builder(d2).insert(w2));
- _store.insert(Builder(d3).insert(w3));
+struct MultiDocumentTest : public ::testing::Test {
+ CompactWordsStore store;
+ MultiDocumentTest() : store() {
+ store.insert(Builder(d1).insert(w1));
+ store.insert(Builder(d2).insert(w2));
+ store.insert(Builder(d3).insert(w3));
}
};
-TEST_F("require that fields and words can be added for a document", SingleFixture)
+TEST_F(SingleDocumentTest, fields_and_words_can_be_added_for_a_document)
{
- EXPECT_EQUAL("[1,2,3]", toStr(f._store.get(d1)));
+ EXPECT_EQ("[1,2,3]", toStr(store.get(d1)));
}
-TEST_F("require that multiple documents can be added", MultiFixture)
+TEST_F(MultiDocumentTest, multiple_documents_can_be_added)
{
- EXPECT_EQUAL("[1]", toStr(f._store.get(d1)));
- EXPECT_EQUAL("[2]", toStr(f._store.get(d2)));
- EXPECT_EQUAL("[3]", toStr(f._store.get(d3)));
- EXPECT_FALSE(f._store.get(d4).valid());
+ EXPECT_EQ("[1]", toStr(store.get(d1)));
+ EXPECT_EQ("[2]", toStr(store.get(d2)));
+ EXPECT_EQ("[3]", toStr(store.get(d3)));
+ EXPECT_FALSE(store.get(d4).valid());
}
-TEST_F("require that documents can be removed", MultiFixture)
+TEST_F(MultiDocumentTest, documents_can_be_removed)
{
- f._store.remove(d2);
- EXPECT_TRUE(f._store.get(d1).valid());
- EXPECT_FALSE(f._store.get(d2).valid());
- EXPECT_TRUE(f._store.get(d3).valid());
+ store.remove(d2);
+ EXPECT_TRUE(store.get(d1).valid());
+ EXPECT_FALSE(store.get(d2).valid());
+ EXPECT_TRUE(store.get(d3).valid());
}
-TEST_F("require that documents can be removed and re-inserted", MultiFixture)
+TEST_F(MultiDocumentTest, documents_can_be_removed_and_reinserted)
{
- f._store.remove(d2);
- f._store.insert(Builder(d2).insert(w4));
- EXPECT_EQUAL("[4]", toStr(f._store.get(d2)));
+ store.remove(d2);
+ store.insert(Builder(d2).insert(w4));
+ EXPECT_EQ("[4]", toStr(store.get(d2)));
}
-TEST("require that a lot of words can be inserted, retrieved and removed")
+TEST(CompactWordStoreTest, multiple_words_can_be_inserted_retrieved_and_removed)
{
CompactWordsStore store;
for (uint32_t docId = 0; docId < 50; ++docId) {
@@ -113,10 +111,10 @@ TEST("require that a lot of words can be inserted, retrieved and removed")
}
for (uint32_t docId = 0; docId < 50; ++docId) {
WordRefVector words = build(store.get(docId));
- EXPECT_EQUAL(20000u, words.size());
+ EXPECT_EQ(20000u, words.size());
uint32_t wordRef = 0;
for (auto word : words) {
- EXPECT_EQUAL(wordRef++, word.ref());
+ EXPECT_EQ(wordRef++, word.ref());
}
store.remove(docId);
MemoryUsage usage = store.getMemoryUsage();
@@ -124,7 +122,7 @@ TEST("require that a lot of words can be inserted, retrieved and removed")
}
}
-TEST("require that initial memory usage is reported")
+TEST(CompactWordStoreTest, initial_memory_usage_is_reported)
{
CompactWordsStore store;
CompactWordsStore::DocumentWordsMap docs;
@@ -134,24 +132,24 @@ TEST("require that initial memory usage is reported")
initExp.incUsedBytes(docs.getMemoryUsed());
initExp.merge(internalStore.getMemoryUsage());
MemoryUsage init = store.getMemoryUsage();
- EXPECT_EQUAL(initExp.allocatedBytes(), init.allocatedBytes());
- EXPECT_EQUAL(initExp.usedBytes(), init.usedBytes());
- EXPECT_GREATER(init.allocatedBytes(), init.usedBytes());
- EXPECT_GREATER(init.allocatedBytes(), 0u);
- EXPECT_GREATER(init.usedBytes(), 0u);
+ EXPECT_EQ(initExp.allocatedBytes(), init.allocatedBytes());
+ EXPECT_EQ(initExp.usedBytes(), init.usedBytes());
+ EXPECT_GT(init.allocatedBytes(), init.usedBytes());
+ EXPECT_GT(init.allocatedBytes(), 0u);
+ EXPECT_GT(init.usedBytes(), 0u);
}
-TEST("require that memory usage is updated after insert")
+TEST(CompactWordStoreTest, memory_usage_is_updated_after_insert)
{
CompactWordsStore store;
MemoryUsage init = store.getMemoryUsage();
store.insert(Builder(d1).insert(w1));
MemoryUsage after = store.getMemoryUsage();
- EXPECT_GREATER_EQUAL(after.allocatedBytes(), init.allocatedBytes());
- EXPECT_GREATER(after.usedBytes(), init.usedBytes());
+ EXPECT_GE(after.allocatedBytes(), init.allocatedBytes());
+ EXPECT_GT(after.usedBytes(), init.usedBytes());
}
+GTEST_MAIN_RUN_ALL_TESTS()
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/memoryindex/datastore/CMakeLists.txt b/searchlib/src/tests/memoryindex/datastore/CMakeLists.txt
index 45507f3b0ae..be1a193cd3c 100644
--- a/searchlib/src/tests/memoryindex/datastore/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/datastore/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_executable(searchlib_feature_store_test_app TEST
feature_store_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_feature_store_test_app COMMAND searchlib_feature_store_test_app)
vespa_add_executable(searchlib_word_store_test_app TEST
@@ -11,5 +12,6 @@ vespa_add_executable(searchlib_word_store_test_app TEST
word_store_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_word_store_test_app COMMAND searchlib_word_store_test_app)
diff --git a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
index aca83d67a8a..c6368bee6eb 100644
--- a/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/feature_store_test.cpp
@@ -1,8 +1,10 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchlib/memoryindex/feature_store.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
#include <vespa/log/log.h>
LOG_SETUP("feature_store_test");
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/searchlib/memoryindex/feature_store.h>
using namespace search::btree;
using namespace search::datastore;
@@ -11,62 +13,55 @@ using namespace search::index;
using search::index::schema::CollectionType;
using search::index::schema::DataType;
-namespace search
-{
-
+namespace search::memoryindex {
-namespace memoryindex
-{
+class FeatureStoreTest : public ::testing::Test {
+public:
+ Schema schema;
+ FeatureStore fs;
+ Schema make_schema() const;
+ FeatureStoreTest();
+};
-class Test : public vespalib::TestApp
+Schema
+FeatureStoreTest::make_schema() const
{
-private:
- Schema _schema;
-
- const Schema & getSchema() const { return _schema; }
- bool assertFeatures(const DocIdAndFeatures &exp, const DocIdAndFeatures &act);
- void requireThatFeaturesCanBeAddedAndRetrieved();
- void requireThatNextWordsAreWorking();
- void requireThatAddFeaturesTriggersChangeOfBuffer();
-
-public:
- Test();
- int Main() override;
-};
+ Schema result;
+ result.addIndexField(Schema::IndexField("f0", DataType::STRING));
+ result.addIndexField(Schema::IndexField("f1", DataType::STRING, CollectionType::WEIGHTEDSET));
+ return result;
+}
+FeatureStoreTest::FeatureStoreTest()
+ : schema(make_schema()),
+ fs(schema)
+{
+}
-bool
-Test::assertFeatures(const DocIdAndFeatures &exp,
- const DocIdAndFeatures &act)
+void
+assertFeatures(const DocIdAndFeatures& exp,
+ const DocIdAndFeatures& act)
{
// docid is not encoded as part of features
- if (!EXPECT_EQUAL(exp.elements().size(),
- act.elements().size()))
- return false;
+ ASSERT_EQ(exp.elements().size(),
+ act.elements().size());
for (size_t i = 0; i < exp.elements().size(); ++i) {
- if (!EXPECT_EQUAL(exp.elements()[i].getElementId(),
- act.elements()[i].getElementId()))
- return false;
- if (!EXPECT_EQUAL(exp.elements()[i].getNumOccs(),
- act.elements()[i].getNumOccs()))
- return false;
- if (!EXPECT_EQUAL(exp.elements()[i].getWeight(), act.elements()[i].getWeight()))
- return false;
- if (!EXPECT_EQUAL(exp.elements()[i].getElementLen(),
- act.elements()[i].getElementLen()))
- return false;
+ EXPECT_EQ(exp.elements()[i].getElementId(),
+ act.elements()[i].getElementId());
+ EXPECT_EQ(exp.elements()[i].getNumOccs(),
+ act.elements()[i].getNumOccs());
+ EXPECT_EQ(exp.elements()[i].getWeight(), act.elements()[i].getWeight());
+ EXPECT_EQ(exp.elements()[i].getElementLen(),
+ act.elements()[i].getElementLen());
}
- if (!EXPECT_EQUAL(exp.word_positions().size(), act.word_positions().size()))
- return false;
+ ASSERT_EQ(exp.word_positions().size(), act.word_positions().size());
for (size_t i = 0; i < exp.word_positions().size(); ++i) {
- if (!EXPECT_EQUAL(exp.word_positions()[i].getWordPos(),
- act.word_positions()[i].getWordPos())) return false;
+ EXPECT_EQ(exp.word_positions()[i].getWordPos(),
+ act.word_positions()[i].getWordPos());
}
- return true;
}
-
DocIdAndFeatures
getFeatures(uint32_t numOccs,
int32_t weight,
@@ -84,11 +79,8 @@ getFeatures(uint32_t numOccs,
return f;
}
-
-void
-Test::requireThatFeaturesCanBeAddedAndRetrieved()
+TEST_F(FeatureStoreTest, features_can_be_added_and_retrieved)
{
- FeatureStore fs(getSchema());
DocIdAndFeatures act;
EntryRef r1;
EntryRef r2;
@@ -98,9 +90,9 @@ Test::requireThatFeaturesCanBeAddedAndRetrieved()
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQUAL(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
- EXPECT_EQUAL(0u, FeatureStore::RefType(r1).bufferId());
+ EXPECT_EQ(FeatureStore::RefType::align(1u),
+ FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
r.second,
@@ -108,7 +100,7 @@ Test::requireThatFeaturesCanBeAddedAndRetrieved()
FeatureStore::RefType(r1).bufferId());
fs.getFeatures(0, r1, act);
// weight not encoded for single value
- EXPECT_TRUE(assertFeatures(getFeatures(2, 1, 8), act));
+ ASSERT_NO_FATAL_FAILURE(assertFeatures(getFeatures(2, 1, 8), act));
}
{
DocIdAndFeatures f = getFeatures(4, 8, 16);
@@ -117,22 +109,19 @@ Test::requireThatFeaturesCanBeAddedAndRetrieved()
EXPECT_TRUE(r.second > 0);
EXPECT_TRUE(FeatureStore::RefType(r2).offset() >
FeatureStore::RefType(r1).offset());
- EXPECT_EQUAL(0u, FeatureStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
r.second,
FeatureStore::RefType(r2).offset(),
FeatureStore::RefType(r2).bufferId());
fs.getFeatures(1, r2, act);
- EXPECT_TRUE(assertFeatures(f, act));
+ ASSERT_NO_FATAL_FAILURE(assertFeatures(f, act));
}
}
-
-void
-Test::requireThatNextWordsAreWorking()
+TEST_F(FeatureStoreTest, next_words_are_working)
{
- FeatureStore fs(getSchema());
DocIdAndFeatures act;
EntryRef r1;
EntryRef r2;
@@ -142,9 +131,9 @@ Test::requireThatNextWordsAreWorking()
r = fs.addFeatures(0, f);
r1 = r.first;
EXPECT_TRUE(r.second > 0);
- EXPECT_EQUAL(FeatureStore::RefType::align(1u),
- FeatureStore::RefType(r1).offset());
- EXPECT_EQUAL(0u, FeatureStore::RefType(r1).bufferId());
+ EXPECT_EQ(FeatureStore::RefType::align(1u),
+ FeatureStore::RefType(r1).offset());
+ EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
r.second,
@@ -152,7 +141,7 @@ Test::requireThatNextWordsAreWorking()
FeatureStore::RefType(r1).bufferId());
fs.getFeatures(0, r1, act);
// weight not encoded for single value
- EXPECT_TRUE(assertFeatures(getFeatures(2, 1, 8), act));
+ ASSERT_NO_FATAL_FAILURE(assertFeatures(getFeatures(2, 1, 8), act));
}
{
DocIdAndFeatures f = getFeatures(4, 8, 16);
@@ -161,22 +150,19 @@ Test::requireThatNextWordsAreWorking()
EXPECT_TRUE(r.second > 0);
EXPECT_TRUE(FeatureStore::RefType(r2).offset() >
FeatureStore::RefType(r1).offset());
- EXPECT_EQUAL(0u, FeatureStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, FeatureStore::RefType(r1).bufferId());
LOG(info,
"bits(%" PRIu64 "), ref.offset(%zu), ref.bufferId(%u)",
r.second,
FeatureStore::RefType(r2).offset(),
FeatureStore::RefType(r2).bufferId());
fs.getFeatures(1, r2, act);
- EXPECT_TRUE(assertFeatures(f, act));
+ ASSERT_NO_FATAL_FAILURE(assertFeatures(f, act));
}
}
-
-void
-Test::requireThatAddFeaturesTriggersChangeOfBuffer()
+TEST_F(FeatureStoreTest, add_features_triggers_change_of_buffer)
{
- FeatureStore fs(getSchema());
size_t cnt = 1;
DocIdAndFeatures act;
uint32_t lastId = 0;
@@ -185,7 +171,7 @@ Test::requireThatAddFeaturesTriggersChangeOfBuffer()
DocIdAndFeatures f = getFeatures(numOccs, 1, numOccs + 1);
std::pair<EntryRef, uint64_t> r = fs.addFeatures(0, f);
fs.getFeatures(0, r.first, act);
- EXPECT_TRUE(assertFeatures(f, act));
+ ASSERT_NO_FATAL_FAILURE(assertFeatures(f, act));
uint32_t bufferId = FeatureStore::RefType(r.first).bufferId();
if (bufferId > lastId) {
LOG(info,
@@ -197,36 +183,10 @@ Test::requireThatAddFeaturesTriggersChangeOfBuffer()
break;
}
}
- EXPECT_EQUAL(1u, lastId);
+ EXPECT_EQ(1u, lastId);
LOG(info, "Added %zu feature sets in 1 buffer", cnt);
}
-
-Test::Test()
- : _schema()
-{
- _schema.addIndexField(Schema::IndexField("f0", DataType::STRING));
- _schema.addIndexField(Schema::IndexField("f1", DataType::STRING, CollectionType::WEIGHTEDSET));
}
-
-int
-Test::Main()
-{
- TEST_INIT("feature_store_test");
-
- requireThatFeaturesCanBeAddedAndRetrieved();
- requireThatNextWordsAreWorking();
- requireThatAddFeaturesTriggersChangeOfBuffer();
-
- TEST_DONE();
-}
-
-
-}
-
-
-}
-
-
-TEST_APPHOOK(search::memoryindex::Test);
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
index b7f454bfdf7..86365287b29 100644
--- a/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
+++ b/searchlib/src/tests/memoryindex/datastore/word_store_test.cpp
@@ -1,24 +1,15 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/searchlib/memoryindex/word_store.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
#include <vespa/log/log.h>
LOG_SETUP("word_store_test");
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/searchlib/memoryindex/word_store.h>
using namespace search::datastore;
-namespace search {
-namespace memoryindex {
+namespace search::memoryindex {
-class Test : public vespalib::TestApp {
-private:
- void requireThatWordsCanBeAddedAndRetrieved();
- void requireThatAddWordTriggersChangeOfBuffer();
-public:
- int Main() override;
-};
-
-void
-Test::requireThatWordsCanBeAddedAndRetrieved()
+TEST(WordStoreTest, words_can_be_added_and_retrieved)
{
std::string w1 = "require";
std::string w2 = "that";
@@ -32,19 +23,18 @@ Test::requireThatWordsCanBeAddedAndRetrieved()
uint32_t w1p = WordStore::RefType::pad(w1s);
uint32_t w2s = w2.size() + 1;
uint32_t w2p = WordStore::RefType::pad(w2s);
- EXPECT_EQUAL(invp, WordStore::RefType(r1).offset());
- EXPECT_EQUAL(invp + w1s + w1p, WordStore::RefType(r2).offset());
- EXPECT_EQUAL(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset());
- EXPECT_EQUAL(0u, WordStore::RefType(r1).bufferId());
- EXPECT_EQUAL(0u, WordStore::RefType(r2).bufferId());
- EXPECT_EQUAL(0u, WordStore::RefType(r3).bufferId());
- EXPECT_EQUAL(std::string("require"), ws.getWord(r1));
- EXPECT_EQUAL(std::string("that"), ws.getWord(r2));
- EXPECT_EQUAL(std::string("words"), ws.getWord(r3));
+ EXPECT_EQ(invp, WordStore::RefType(r1).offset());
+ EXPECT_EQ(invp + w1s + w1p, WordStore::RefType(r2).offset());
+ EXPECT_EQ(invp + w1s + w1p + w2s + w2p, WordStore::RefType(r3).offset());
+ EXPECT_EQ(0u, WordStore::RefType(r1).bufferId());
+ EXPECT_EQ(0u, WordStore::RefType(r2).bufferId());
+ EXPECT_EQ(0u, WordStore::RefType(r3).bufferId());
+ EXPECT_EQ(std::string("require"), ws.getWord(r1));
+ EXPECT_EQ(std::string("that"), ws.getWord(r2));
+ EXPECT_EQ(std::string("words"), ws.getWord(r3));
}
-void
-Test::requireThatAddWordTriggersChangeOfBuffer()
+TEST(WordStoreTest, add_word_triggers_change_of_buffer)
{
WordStore ws;
size_t word = 0;
@@ -54,7 +44,7 @@ Test::requireThatAddWordTriggersChangeOfBuffer()
sprintf(wordStr, "%6zu", word);
// all words uses 12 bytes (include padding)
EntryRef r = ws.addWord(std::string(wordStr));
- EXPECT_EQUAL(std::string(wordStr), ws.getWord(r));
+ EXPECT_EQ(std::string(wordStr), ws.getWord(r));
uint32_t bufferId = WordStore::RefType(r).bufferId();
if (bufferId > lastId) {
LOG(info,
@@ -68,23 +58,11 @@ Test::requireThatAddWordTriggersChangeOfBuffer()
}
}
LOG(info, "Added %zu words in 4 buffers", word);
- EXPECT_EQUAL(2047u, word);
- EXPECT_EQUAL(4u, lastId);
-}
-
-int
-Test::Main()
-{
- TEST_INIT("word_store_test");
-
- requireThatWordsCanBeAddedAndRetrieved();
- requireThatAddWordTriggersChangeOfBuffer();
-
- TEST_DONE();
+ EXPECT_EQ(2047u, word);
+ EXPECT_EQ(4u, lastId);
}
}
-}
-TEST_APPHOOK(search::memoryindex::Test);
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/document_inverter/CMakeLists.txt b/searchlib/src/tests/memoryindex/document_inverter/CMakeLists.txt
index 1058a19d0ce..ecf33ee48fd 100644
--- a/searchlib/src/tests/memoryindex/document_inverter/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/document_inverter/CMakeLists.txt
@@ -5,5 +5,6 @@ vespa_add_executable(searchlib_document_inverter_test_app TEST
DEPENDS
searchlib_test
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_document_inverter_test_app COMMAND searchlib_document_inverter_test_app)
diff --git a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
index 08645f38712..38862dfe94b 100644
--- a/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/document_inverter/document_inverter_test.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/* -*- mode: C++; coding: utf-8; -*- */
-
+#include <vespa/searchlib/common/sequencedtaskexecutor.h>
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/index/field_length_calculator.h>
#include <vespa/searchlib/memoryindex/document_inverter.h>
@@ -10,8 +9,7 @@
#include <vespa/searchlib/memoryindex/i_field_index_collection.h>
#include <vespa/searchlib/memoryindex/word_store.h>
#include <vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h>
-#include <vespa/searchlib/common/sequencedtaskexecutor.h>
-#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace search {
@@ -37,7 +35,6 @@ makeDoc10(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc11(DocBuilder &b)
{
@@ -51,7 +48,6 @@ makeDoc11(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc12(DocBuilder &b)
{
@@ -62,7 +58,6 @@ makeDoc12(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc13(DocBuilder &b)
{
@@ -73,7 +68,6 @@ makeDoc13(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc14(DocBuilder &b)
{
@@ -84,7 +78,6 @@ makeDoc14(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc15(DocBuilder &b)
{
@@ -94,8 +87,7 @@ makeDoc15(DocBuilder &b)
}
-class MockFieldIndexCollection : public IFieldIndexCollection
-{
+class MockFieldIndexCollection : public IFieldIndexCollection {
FieldIndexRemover &_remover;
test::OrderedFieldIndexInserter &_inserter;
FieldLengthCalculator &_calculator;
@@ -122,8 +114,7 @@ public:
};
-struct Fixture
-{
+struct DocumentInverterTest : public ::testing::Test {
Schema _schema;
DocBuilder _b;
SequencedTaskExecutor _invertThreads;
@@ -135,9 +126,7 @@ struct Fixture
MockFieldIndexCollection _fic;
DocumentInverter _inv;
- static Schema
- makeSchema()
- {
+ static Schema makeSchema() {
Schema schema;
schema.addIndexField(Schema::IndexField("f0", DataType::STRING));
schema.addIndexField(Schema::IndexField("f1", DataType::STRING));
@@ -146,7 +135,7 @@ struct Fixture
return schema;
}
- Fixture()
+ DocumentInverterTest()
: _schema(makeSchema()),
_b(_schema),
_invertThreads(2),
@@ -160,9 +149,7 @@ struct Fixture
{
}
- void
- pushDocuments()
- {
+ void pushDocuments() {
_invertThreads.sync();
uint32_t fieldId = 0;
for (auto &inverter : _inv.getInverters()) {
@@ -174,153 +161,143 @@ struct Fixture
}
};
-
-TEST_F("requireThatFreshInsertWorks", Fixture)
+TEST_F(DocumentInverterTest, require_that_fresh_insert_works)
{
- f._inv.invertDocument(10, *makeDoc10(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10",
- f._inserter.toStr());
+ _inv.invertDocument(10, *makeDoc10(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatMultipleDocsWork", Fixture)
+TEST_F(DocumentInverterTest, require_that_multiple_docs_work)
{
- f._inv.invertDocument(10, *makeDoc10(f._b));
- f._inv.invertDocument(11, *makeDoc11(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,a=11,"
- "w=b,a=10,a=11,"
- "w=c,a=10,w=d,a=10,"
- "w=e,a=11,"
- "w=f,a=11,"
- "f=1,w=a,a=11,"
- "w=g,a=11",
- f._inserter.toStr());
+ _inv.invertDocument(10, *makeDoc10(_b));
+ _inv.invertDocument(11, *makeDoc11(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,a=11,"
+ "w=b,a=10,a=11,"
+ "w=c,a=10,w=d,a=10,"
+ "w=e,a=11,"
+ "w=f,a=11,"
+ "f=1,w=a,a=11,"
+ "w=g,a=11",
+ _inserter.toStr());
}
-
-TEST_F("requireThatRemoveWorks", Fixture)
+TEST_F(DocumentInverterTest, require_that_remove_works)
{
- f._inv.getInverter(0)->remove("b", 10);
- f._inv.getInverter(0)->remove("a", 10);
- f._inv.getInverter(0)->remove("b", 11);
- f._inv.getInverter(2)->remove("c", 12);
- f._inv.getInverter(1)->remove("a", 10);
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,r=10,"
- "w=b,r=10,r=11,"
- "f=1,w=a,r=10,"
- "f=2,w=c,r=12",
- f._inserter.toStr());
+ _inv.getInverter(0)->remove("b", 10);
+ _inv.getInverter(0)->remove("a", 10);
+ _inv.getInverter(0)->remove("b", 11);
+ _inv.getInverter(2)->remove("c", 12);
+ _inv.getInverter(1)->remove("a", 10);
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,r=10,"
+ "w=b,r=10,r=11,"
+ "f=1,w=a,r=10,"
+ "f=2,w=c,r=12",
+ _inserter.toStr());
}
-
-TEST_F("requireThatReputWorks", Fixture)
+TEST_F(DocumentInverterTest, require_that_reput_works)
{
- f._inv.invertDocument(10, *makeDoc10(f._b));
- f._inv.invertDocument(10, *makeDoc11(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=e,a=10,"
- "w=f,a=10,"
- "f=1,w=a,a=10,"
- "w=g,a=10",
- f._inserter.toStr());
+ _inv.invertDocument(10, *makeDoc10(_b));
+ _inv.invertDocument(10, *makeDoc11(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=e,a=10,"
+ "w=f,a=10,"
+ "f=1,w=a,a=10,"
+ "w=g,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatAbortPendingDocWorks", Fixture)
+TEST_F(DocumentInverterTest, require_that_abort_pending_doc_works)
{
- Document::UP doc10 = makeDoc10(f._b);
- Document::UP doc11 = makeDoc11(f._b);
- Document::UP doc12 = makeDoc12(f._b);
- Document::UP doc13 = makeDoc13(f._b);
- Document::UP doc14 = makeDoc14(f._b);
-
- f._inv.invertDocument(10, *doc10);
- f._inv.invertDocument(11, *doc11);
- f._inv.removeDocument(10);
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=11,"
- "w=b,a=11,"
- "w=e,a=11,"
- "w=f,a=11,"
- "f=1,w=a,a=11,"
- "w=g,a=11",
- f._inserter.toStr());
-
- f._inv.invertDocument(10, *doc10);
- f._inv.invertDocument(11, *doc11);
- f._inv.invertDocument(12, *doc12);
- f._inv.invertDocument(13, *doc13);
- f._inv.invertDocument(14, *doc14);
- f._inv.removeDocument(11);
- f._inv.removeDocument(13);
- f._inserter.reset();
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10,"
- "w=doc12,a=12,"
- "w=doc14,a=14,"
- "w=h,a=12,"
- "w=j,a=14",
- f._inserter.toStr());
-
- f._inv.invertDocument(10, *doc10);
- f._inv.invertDocument(11, *doc11);
- f._inv.invertDocument(12, *doc12);
- f._inv.invertDocument(13, *doc13);
- f._inv.invertDocument(14, *doc14);
- f._inv.removeDocument(11);
- f._inv.removeDocument(12);
- f._inv.removeDocument(13);
- f._inv.removeDocument(14);
- f._inserter.reset();
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10",
- f._inserter.toStr());
-
-
+ auto doc10 = makeDoc10(_b);
+ auto doc11 = makeDoc11(_b);
+ auto doc12 = makeDoc12(_b);
+ auto doc13 = makeDoc13(_b);
+ auto doc14 = makeDoc14(_b);
+
+ _inv.invertDocument(10, *doc10);
+ _inv.invertDocument(11, *doc11);
+ _inv.removeDocument(10);
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=11,"
+ "w=b,a=11,"
+ "w=e,a=11,"
+ "w=f,a=11,"
+ "f=1,w=a,a=11,"
+ "w=g,a=11",
+ _inserter.toStr());
+
+ _inv.invertDocument(10, *doc10);
+ _inv.invertDocument(11, *doc11);
+ _inv.invertDocument(12, *doc12);
+ _inv.invertDocument(13, *doc13);
+ _inv.invertDocument(14, *doc14);
+ _inv.removeDocument(11);
+ _inv.removeDocument(13);
+ _inserter.reset();
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10,"
+ "w=doc12,a=12,"
+ "w=doc14,a=14,"
+ "w=h,a=12,"
+ "w=j,a=14",
+ _inserter.toStr());
+
+ _inv.invertDocument(10, *doc10);
+ _inv.invertDocument(11, *doc11);
+ _inv.invertDocument(12, *doc12);
+ _inv.invertDocument(13, *doc13);
+ _inv.invertDocument(14, *doc14);
+ _inv.removeDocument(11);
+ _inv.removeDocument(12);
+ _inv.removeDocument(13);
+ _inv.removeDocument(14);
+ _inserter.reset();
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatMixOfAddAndRemoveWorks", Fixture)
+TEST_F(DocumentInverterTest, require_that_mix_of_add_and_remove_works)
{
- f._inv.getInverter(0)->remove("a", 11);
- f._inv.getInverter(0)->remove("c", 9);
- f._inv.getInverter(0)->remove("d", 10);
- f._inv.getInverter(0)->remove("z", 12);
- f._inv.invertDocument(10, *makeDoc10(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,r=11,"
- "w=b,a=10,"
- "w=c,r=9,a=10,"
- "w=d,r=10,a=10,"
- "w=z,r=12",
- f._inserter.toStr());
+ _inv.getInverter(0)->remove("a", 11);
+ _inv.getInverter(0)->remove("c", 9);
+ _inv.getInverter(0)->remove("d", 10);
+ _inv.getInverter(0)->remove("z", 12);
+ _inv.invertDocument(10, *makeDoc10(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,r=11,"
+ "w=b,a=10,"
+ "w=c,r=9,a=10,"
+ "w=d,r=10,a=10,"
+ "w=z,r=12",
+ _inserter.toStr());
}
-
-TEST_F("require that empty document can be inverted", Fixture)
+TEST_F(DocumentInverterTest, require_that_empty_document_can_be_inverted)
{
- f._inv.invertDocument(15, *makeDoc15(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("",
- f._inserter.toStr());
+ _inv.invertDocument(15, *makeDoc15(_b));
+ pushDocuments();
+ EXPECT_EQ("",
+ _inserter.toStr());
}
+}
+}
-} // namespace memoryindex
-} // namespace search
-
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp
index 36e9bde5c9f..54124326507 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp
@@ -18,9 +18,7 @@ using namespace search::memoryindex;
using search::index::schema::DataType;
using search::test::SearchIteratorVerifier;
-using FieldIndexType = FieldIndex<false>;
-using PostingIteratorType = PostingIterator<false>;
-
+template <typename FieldIndexType>
class Verifier : public SearchIteratorVerifier {
private:
mutable TermFieldMatchData _tfmd;
@@ -44,8 +42,7 @@ public:
(void) strict;
TermFieldMatchDataArray match_data;
match_data.add(&_tfmd);
- return std::make_unique<PostingIteratorType>(_field_index.find("a"),
- _field_index.getFeatureStore(), 0, match_data);
+ return _field_index.make_search_iterator("a", 0, match_data);
}
};
@@ -57,9 +54,10 @@ get_schema()
return result;
}
+template <typename FieldIndexType>
struct Fixture {
Schema schema;
- Verifier verifier;
+ Verifier<FieldIndexType> verifier;
Fixture()
: schema(get_schema()),
verifier(schema)
@@ -67,7 +65,12 @@ struct Fixture {
}
};
-TEST_F("require that posting iterator conforms", Fixture)
+TEST_F("require that normal posting iterator conforms", Fixture<FieldIndex<false>>)
+{
+ f.verifier.verify();
+}
+
+TEST_F("require that interleaved posting iterator conforms", Fixture<FieldIndex<true>>)
{
f.verifier.verify();
}
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index f2cc2580cd8..ac1735e0549 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -14,6 +14,7 @@
#include <vespa/searchlib/memoryindex/field_inverter.h>
#include <vespa/searchlib/memoryindex/ordered_field_index_inserter.h>
#include <vespa/searchlib/memoryindex/posting_iterator.h>
+#include <vespa/searchlib/queryeval/iterators.h>
#include <vespa/searchlib/test/index/mock_field_length_inspector.h>
#include <vespa/searchlib/test/memoryindex/wrap_inserter.h>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
@@ -31,6 +32,7 @@ using namespace fef;
using namespace index;
using document::Document;
+using queryeval::RankedSearchIteratorBase;
using queryeval::SearchIterator;
using search::index::schema::CollectionType;
using search::index::schema::DataType;
@@ -40,10 +42,7 @@ using vespalib::GenerationHandler;
namespace memoryindex {
using test::WrapInserter;
-using FieldIndexType = FieldIndex<false>;
-using PostingList = FieldIndexType::PostingList;
-using PostingConstItr = PostingList::ConstIterator;
-using PostingIteratorType = PostingIterator<false>;
+using NormalFieldIndex = FieldIndex<false>;
class MyBuilder : public IndexBuilder {
private:
@@ -132,11 +131,21 @@ public:
}
};
+struct SimpleMatchData {
+ TermFieldMatchData term;
+ TermFieldMatchDataArray array;
+ SimpleMatchData() : term(), array() {
+ array.add(&term);
+ }
+ ~SimpleMatchData() {}
+};
+
std::string
-toString(FieldPositionsIterator posItr,
+toString(const SimpleMatchData& match_data,
bool hasElements = false,
bool hasWeights = false)
{
+ auto posItr = match_data.term.getIterator();
std::stringstream ss;
ss << "{";
ss << posItr.getFieldLength() << ":";
@@ -157,16 +166,15 @@ toString(FieldPositionsIterator posItr,
return ss.str();
}
+template <typename PostingIteratorType>
bool
assertPostingList(const std::string &exp,
- PostingConstItr itr,
+ PostingIteratorType itr,
const FeatureStore *store = nullptr)
{
std::stringstream ss;
FeatureStore::DecodeContextCooked decoder(nullptr);
- TermFieldMatchData tfmd;
- TermFieldMatchDataArray matchData;
- matchData.add(&tfmd);
+ SimpleMatchData match_data;
ss << "[";
for (size_t i = 0; itr.valid(); ++itr, ++i) {
if (i > 0) ss << ",";
@@ -176,8 +184,8 @@ assertPostingList(const std::string &exp,
EntryRef ref(itr.getData().get_features());
store->setupForField(0, decoder);
store->setupForUnpackFeatures(ref, decoder);
- decoder.unpackFeatures(matchData, docId);
- ss << toString(tfmd.getIterator());
+ decoder.unpackFeatures(match_data.array, docId);
+ ss << toString(match_data);
}
}
ss << "]";
@@ -186,8 +194,9 @@ assertPostingList(const std::string &exp,
return result;
}
+template <typename PostingIteratorType>
bool
-assertPostingList(std::vector<uint32_t> &exp, PostingConstItr itr)
+assertPostingList(std::vector<uint32_t> &exp, PostingIteratorType itr)
{
std::stringstream ss;
ss << "[";
@@ -199,22 +208,26 @@ assertPostingList(std::vector<uint32_t> &exp, PostingConstItr itr)
return assertPostingList(ss.str(), itr);
}
-FieldIndexType::PostingList::Iterator
+template <bool interleaved_features>
+typename FieldIndex<interleaved_features>::PostingList::Iterator
find_in_field_index(const vespalib::stringref word,
- uint32_t fieldId,
+ uint32_t field_id,
const FieldIndexCollection& fic)
{
- auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(fieldId));
+ using FieldIndexType = FieldIndex<interleaved_features>;
+ auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(field_id));
assert(field_index != nullptr);
return field_index->find(word);
}
-FieldIndexType::PostingList::ConstIterator
+template <bool interleaved_features>
+typename FieldIndex<interleaved_features>::PostingList::ConstIterator
find_frozen_in_field_index(const vespalib::stringref word,
- uint32_t fieldId,
+ uint32_t field_id,
const FieldIndexCollection& fic)
{
- auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(fieldId));
+ using FieldIndexType = FieldIndex<interleaved_features>;
+ auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(field_id));
assert(field_index != nullptr);
return field_index->findFrozen(word);
}
@@ -353,7 +366,7 @@ public:
bool assertPosting(const vespalib::string &word,
uint32_t fieldId) {
std::vector<uint32_t> exp = _mock.find(word, fieldId);
- PostingConstItr itr = find_in_field_index(word, fieldId, _fieldIndexes);
+ auto itr = find_in_field_index<false>(word, fieldId, _fieldIndexes);
bool result = assertPostingList(exp, itr);
EXPECT_TRUE(result);
return result;
@@ -411,7 +424,7 @@ public:
{
}
- MyDrainRemoves(FieldIndexType& field_index)
+ MyDrainRemoves(IFieldIndex& field_index)
: _remover(field_index.getDocumentRemover())
{
}
@@ -487,6 +500,7 @@ make_single_field_schema()
return result;
}
+template <typename FieldIndexType>
struct FieldIndexTest : public ::testing::Test {
Schema schema;
FieldIndexType idx;
@@ -495,8 +509,187 @@ struct FieldIndexTest : public ::testing::Test {
idx(schema, 0)
{
}
+ ~FieldIndexTest() {}
+ SearchIterator::UP search(const vespalib::stringref word,
+ const SimpleMatchData& match_data) {
+ return make_search_iterator<FieldIndexType::has_interleaved_features>(idx.find(word), idx.getFeatureStore(), 0, match_data.array);
+ }
+};
+
+using FieldIndexTestTypes = ::testing::Types<FieldIndex<false>, FieldIndex<true>>;
+TYPED_TEST_CASE(FieldIndexTest, FieldIndexTestTypes);
+
+// Disable warnings emitted by gtest generated files when using typed tests
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
+
+TYPED_TEST(FieldIndexTest, require_that_fresh_insert_works)
+{
+ EXPECT_TRUE(assertPostingList("[]", this->idx.find("a")));
+ EXPECT_TRUE(assertPostingList("[]", this->idx.findFrozen("a")));
+ EXPECT_EQ(0u, this->idx.getNumUniqueWords());
+ WrapInserter(this->idx).word("a").add(10).flush();
+ EXPECT_TRUE(assertPostingList("[10]", this->idx.find("a")));
+ EXPECT_TRUE(assertPostingList("[]", this->idx.findFrozen("a")));
+ this->idx.commit();
+ EXPECT_TRUE(assertPostingList("[10]", this->idx.findFrozen("a")));
+ EXPECT_EQ(1u, this->idx.getNumUniqueWords());
+}
+
+TYPED_TEST(FieldIndexTest, require_that_append_insert_works)
+{
+ WrapInserter(this->idx).word("a").add(10).flush().rewind().
+ word("a").add(5).flush();
+ EXPECT_TRUE(assertPostingList("[5,10]", this->idx.find("a")));
+ EXPECT_TRUE(assertPostingList("[]", this->idx.findFrozen("a")));
+ WrapInserter(this->idx).rewind().word("a").add(20).flush();
+ EXPECT_TRUE(assertPostingList("[5,10,20]", this->idx.find("a")));
+ EXPECT_TRUE(assertPostingList("[]", this->idx.findFrozen("a")));
+ this->idx.commit();
+ EXPECT_TRUE(assertPostingList("[5,10,20]", this->idx.findFrozen("a")));
+}
+
+TYPED_TEST(FieldIndexTest, require_that_remove_works)
+{
+ WrapInserter(this->idx).word("a").remove(10).flush();
+ EXPECT_TRUE(assertPostingList("[]", this->idx.find("a")));
+ WrapInserter(this->idx).add(10).add(20).add(30).flush();
+ EXPECT_TRUE(assertPostingList("[10,20,30]", this->idx.find("a")));
+ WrapInserter(this->idx).rewind().word("a").remove(10).flush();
+ EXPECT_TRUE(assertPostingList("[20,30]", this->idx.find("a")));
+ WrapInserter(this->idx).remove(20).flush();
+ EXPECT_TRUE(assertPostingList("[30]", this->idx.find("a")));
+ WrapInserter(this->idx).remove(30).flush();
+ EXPECT_TRUE(assertPostingList("[]", this->idx.find("a")));
+ EXPECT_EQ(1u, this->idx.getNumUniqueWords());
+ MyDrainRemoves(this->idx).drain(10);
+ WrapInserter(this->idx).rewind().word("a").add(10).flush();
+ EXPECT_TRUE(assertPostingList("[10]", this->idx.find("a")));
+}
+
+void
+addElement(DocIdAndFeatures &f,
+ uint32_t elemLen,
+ uint32_t numOccs,
+ int32_t weight = 1)
+{
+ f.elements().emplace_back(f.elements().size(), weight, elemLen);
+ f.elements().back().setNumOccs(numOccs);
+ for (uint32_t i = 0; i < numOccs; ++i) {
+ f.word_positions().emplace_back(i);
+ }
+}
+
+DocIdAndFeatures
+getFeatures(uint32_t elemLen, uint32_t numOccs, int32_t weight = 1)
+{
+ DocIdAndFeatures f;
+ addElement(f, elemLen, numOccs, weight);
+ f.set_num_occs(numOccs);
+ f.set_field_length(elemLen);
+ return f;
+}
+
+TYPED_TEST(FieldIndexTest, require_that_posting_iterator_is_working)
+{
+ WrapInserter(this->idx).word("a").add(10, getFeatures(4, 1)).
+ add(20, getFeatures(5, 2)).
+ add(30, getFeatures(6, 1)).
+ add(40, getFeatures(7, 2)).flush();
+ SimpleMatchData match_data;
+ {
+ auto itr = this->search("not", match_data);
+ itr->initFullRange();
+ EXPECT_TRUE(itr->isAtEnd());
+ }
+ {
+ auto itr = this->search("a", match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ("{4:0}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_EQ(30u, itr->getDocId());
+ itr->unpack(30);
+ EXPECT_EQ("{6:0}", toString(match_data));
+ EXPECT_TRUE(itr->seek(40));
+ EXPECT_EQ(40u, itr->getDocId());
+ itr->unpack(40);
+ EXPECT_EQ("{7:0,1}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(41));
+ EXPECT_TRUE(itr->isAtEnd());
+ }
+}
+
+#pragma GCC diagnostic pop
+
+struct FieldIndexInterleavedFeaturesTest : public FieldIndexTest<FieldIndex<true>> {
+ SimpleMatchData match_data;
+ FieldIndexInterleavedFeaturesTest()
+ : FieldIndexTest<FieldIndex<true>>()
+ {
+ WrapInserter(idx).word("a").add(10, getFeatures(5, 2)).flush();
+ }
+ void
+ expect_features_unpacked(const std::string& exp_field_positions,
+ uint32_t exp_num_occs,
+ uint32_t exp_field_length) {
+ auto itr = search("a", match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ(exp_field_positions, toString(match_data));
+ EXPECT_EQ(exp_num_occs, match_data.term.getNumOccs());
+ EXPECT_EQ(exp_field_length, match_data.term.getFieldLength());
+ EXPECT_EQ(10, match_data.term.getDocId());
+ auto& ranked_itr = dynamic_cast<RankedSearchIteratorBase&>(*itr);
+ EXPECT_TRUE(ranked_itr.getUnpacked());
+ EXPECT_TRUE(!itr->seek(11));
+ EXPECT_TRUE(itr->isAtEnd());
+ }
};
+TEST_F(FieldIndexInterleavedFeaturesTest, only_normal_features_are_unpacked)
+{
+ match_data.term.setNeedNormalFeatures(true);
+ match_data.term.setNeedInterleavedFeatures(false);
+ expect_features_unpacked("{5:0,1}", 0, 0);
+}
+
+TEST_F(FieldIndexInterleavedFeaturesTest, only_interleaved_features_are_unpacked)
+{
+ match_data.term.setNeedNormalFeatures(false);
+ match_data.term.setNeedInterleavedFeatures(true);
+ expect_features_unpacked("{1000000:}", 2, 5);
+}
+
+TEST_F(FieldIndexInterleavedFeaturesTest, both_normal_and_interleaved_features_are_unpacked)
+{
+ match_data.term.setNeedNormalFeatures(true);
+ match_data.term.setNeedInterleavedFeatures(true);
+ expect_features_unpacked("{5:0,1}", 2, 5);
+}
+
+TEST_F(FieldIndexInterleavedFeaturesTest, no_features_are_unpacked)
+{
+ match_data.term.setNeedNormalFeatures(false);
+ match_data.term.setNeedInterleavedFeatures(false);
+ expect_features_unpacked("{1000000:}", 0, 0);
+}
+
+TEST_F(FieldIndexInterleavedFeaturesTest, interleaved_features_are_capped)
+{
+ FeatureStore::DecodeContextCooked decoder(nullptr);
+ WrapInserter(idx).word("b").add(11, getFeatures(66001, 66000)).flush();
+ auto itr = this->idx.find("b");
+ EXPECT_EQ(11, itr.getKey());
+ auto &entry = itr.getData();
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(), entry.get_num_occs());
+ EXPECT_EQ(std::numeric_limits<uint16_t>::max(), entry.get_field_length());
+}
+
Schema
make_multi_field_schema()
{
@@ -508,8 +701,6 @@ make_multi_field_schema()
return result;
}
-
-
struct FieldIndexCollectionTest : public ::testing::Test {
Schema schema;
FieldIndexCollection fic;
@@ -520,38 +711,12 @@ struct FieldIndexCollectionTest : public ::testing::Test {
}
~FieldIndexCollectionTest() {}
- FieldIndexType::PostingList::Iterator find(const vespalib::stringref word,
- uint32_t fieldId) const {
- return find_in_field_index(word, fieldId, fic);
+ NormalFieldIndex::PostingList::Iterator find(const vespalib::stringref word,
+ uint32_t field_id) const {
+ return find_in_field_index<false>(word, field_id, fic);
}
};
-TEST_F(FieldIndexTest, require_that_fresh_insert_works)
-{
- EXPECT_TRUE(assertPostingList("[]", idx.find("a")));
- EXPECT_TRUE(assertPostingList("[]", idx.findFrozen("a")));
- EXPECT_EQ(0u, idx.getNumUniqueWords());
- WrapInserter(idx).word("a").add(10).flush();
- EXPECT_TRUE(assertPostingList("[10]", idx.find("a")));
- EXPECT_TRUE(assertPostingList("[]", idx.findFrozen("a")));
- idx.commit();
- EXPECT_TRUE(assertPostingList("[10]", idx.findFrozen("a")));
- EXPECT_EQ(1u, idx.getNumUniqueWords());
-}
-
-TEST_F(FieldIndexTest, require_that_append_insert_works)
-{
- WrapInserter(idx).word("a").add(10).flush().rewind().
- word("a").add(5).flush();
- EXPECT_TRUE(assertPostingList("[5,10]", idx.find("a")));
- EXPECT_TRUE(assertPostingList("[]", idx.findFrozen("a")));
- WrapInserter(idx).rewind().word("a").add(20).flush();
- EXPECT_TRUE(assertPostingList("[5,10,20]", idx.find("a")));
- EXPECT_TRUE(assertPostingList("[]", idx.findFrozen("a")));
- idx.commit();
- EXPECT_TRUE(assertPostingList("[5,10,20]", idx.findFrozen("a")));
-}
-
TEST_F(FieldIndexCollectionTest, require_that_multiple_posting_lists_across_multiple_fields_can_exist)
{
WrapInserter(fic, 0).word("a").add(10).word("b").add(11).add(15).flush();
@@ -565,24 +730,6 @@ TEST_F(FieldIndexCollectionTest, require_that_multiple_posting_lists_across_mult
EXPECT_TRUE(assertPostingList("[]", find("c", 0)));
}
-TEST_F(FieldIndexTest, require_that_remove_works)
-{
- WrapInserter(idx).word("a").remove(10).flush();
- EXPECT_TRUE(assertPostingList("[]", idx.find("a")));
- WrapInserter(idx).add(10).add(20).add(30).flush();
- EXPECT_TRUE(assertPostingList("[10,20,30]", idx.find("a")));
- WrapInserter(idx).rewind().word("a").remove(10).flush();
- EXPECT_TRUE(assertPostingList("[20,30]", idx.find("a")));
- WrapInserter(idx).remove(20).flush();
- EXPECT_TRUE(assertPostingList("[30]", idx.find("a")));
- WrapInserter(idx).remove(30).flush();
- EXPECT_TRUE(assertPostingList("[]", idx.find("a")));
- EXPECT_EQ(1u, idx.getNumUniqueWords());
- MyDrainRemoves(idx).drain(10);
- WrapInserter(idx).rewind().word("a").add(10).flush();
- EXPECT_TRUE(assertPostingList("[10]", idx.find("a")));
-}
-
TEST_F(FieldIndexCollectionTest, require_that_multiple_insert_and_remove_works)
{
MyInserter inserter(schema);
@@ -625,27 +772,6 @@ TEST_F(FieldIndexCollectionTest, require_that_multiple_insert_and_remove_works)
EXPECT_TRUE(inserter.assertPostings());
}
-void
-addElement(DocIdAndFeatures &f,
- uint32_t elemLen,
- uint32_t numOccs,
- int32_t weight = 1)
-{
- f.elements().emplace_back(f.elements().size(), weight, elemLen);
- f.elements().back().setNumOccs(numOccs);
- for (uint32_t i = 0; i < numOccs; ++i) {
- f.word_positions().emplace_back(i);
- }
-}
-
-DocIdAndFeatures
-getFeatures(uint32_t elemLen, uint32_t numOccs, int32_t weight = 1)
-{
- DocIdAndFeatures f;
- addElement(f, elemLen, numOccs, weight);
- return f;
-}
-
TEST_F(FieldIndexCollectionTest, require_that_features_are_in_posting_lists)
{
WrapInserter(fic, 0).word("a").add(1, getFeatures(4, 2)).flush();
@@ -663,43 +789,6 @@ TEST_F(FieldIndexCollectionTest, require_that_features_are_in_posting_lists)
featureStorePtr(fic, 1)));
}
-TEST_F(FieldIndexTest, require_that_posting_iterator_is_working)
-{
- WrapInserter(idx).word("a").add(10, getFeatures(4, 1)).
- add(20, getFeatures(5, 2)).
- add(30, getFeatures(6, 1)).
- add(40, getFeatures(7, 2)).flush();
- TermFieldMatchData tfmd;
- TermFieldMatchDataArray matchData;
- matchData.add(&tfmd);
- {
- PostingIteratorType itr(idx.find("not"),
- idx.getFeatureStore(),
- 0, matchData);
- itr.initFullRange();
- EXPECT_TRUE(itr.isAtEnd());
- }
- {
- PostingIteratorType itr(idx.find("a"),
- idx.getFeatureStore(),
- 0, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
- EXPECT_EQ("{4:0}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_EQ(30u, itr.getDocId());
- itr.unpack(30);
- EXPECT_EQ("{6:0}", toString(tfmd.getIterator()));
- EXPECT_TRUE(itr.seek(40));
- EXPECT_EQ(40u, itr.getDocId());
- itr.unpack(40);
- EXPECT_EQ("{7:0,1}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(41));
- EXPECT_TRUE(itr.isAtEnd());
- }
-}
-
TEST_F(FieldIndexCollectionTest, require_that_basic_dumping_to_index_builder_is_working)
{
MyBuilder b(schema);
@@ -774,6 +863,40 @@ TEST_F(FieldIndexCollectionTest, require_that_dumping_words_with_no_docs_to_inde
}
}
+
+struct FieldIndexCollectionTypeTest : public ::testing::Test {
+ Schema schema;
+ FieldIndexCollection fic;
+ FieldIndexCollectionTypeTest()
+ : schema(make_schema()),
+ fic(schema, MockFieldLengthInspector())
+ {
+ }
+ Schema make_schema() {
+ Schema result;
+ result.addIndexField(Schema::IndexField("normal", DataType::STRING));
+ Schema::IndexField interleaved("interleaved", DataType::STRING);
+ interleaved.set_interleaved_features(true);
+ result.addIndexField(interleaved);
+ return result;
+ }
+};
+
+template <typename FieldIndexType>
+void
+expect_field_index_type(const IFieldIndex* field_index)
+{
+ auto* other_type = dynamic_cast<const FieldIndexType*>(field_index);
+ EXPECT_TRUE(other_type != nullptr);
+}
+
+TEST_F(FieldIndexCollectionTypeTest, instantiates_field_index_type_based_on_schema_config)
+{
+ expect_field_index_type<FieldIndex<false>>(fic.getFieldIndex(0));
+ expect_field_index_type<FieldIndex<true>>(fic.getFieldIndex(1));
+}
+
+
class InverterTest : public ::testing::Test {
public:
Schema _schema;
@@ -792,11 +915,16 @@ public:
_inv(_schema, _invertThreads, _pushThreads, _fic)
{
}
- PostingList::Iterator find(const vespalib::stringref word, uint32_t fieldId) const {
- return find_in_field_index(word, fieldId, _fic);
+ NormalFieldIndex::PostingList::Iterator find(const vespalib::stringref word, uint32_t field_id) const {
+ return find_in_field_index<false>(word, field_id, _fic);
+ }
+ NormalFieldIndex::PostingList::ConstIterator findFrozen(const vespalib::stringref word, uint32_t field_id) const {
+ return find_frozen_in_field_index<false>(word, field_id, _fic);
}
- PostingList::ConstIterator findFrozen(const vespalib::stringref word, uint32_t fieldId) const {
- return find_frozen_in_field_index(word, fieldId, _fic);
+ SearchIterator::UP search(const vespalib::stringref word, uint32_t field_id,
+ const SimpleMatchData& match_data) {
+ return make_search_iterator<false>(findFrozen(word, field_id), featureStoreRef(_fic, field_id),
+ field_id, match_data.array);
}
};
@@ -952,58 +1080,56 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working)
afterStats._activeBuffers,
afterStats._holdBuffers);
- TermFieldMatchData tfmd;
- TermFieldMatchDataArray matchData;
- matchData.add(&tfmd);
+ SimpleMatchData match_data;
{
- PostingIteratorType itr(findFrozen("not", 0), featureStoreRef(_fic, 0), 0, matchData);
- itr.initFullRange();
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("not", 0, match_data);
+ itr->initFullRange();
+ EXPECT_TRUE(itr->isAtEnd());
}
{
- PostingIteratorType itr(findFrozen("a", 0), featureStoreRef(_fic, 0), 0, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
- EXPECT_EQ("{4:0}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_EQ(30u, itr.getDocId());
- itr.unpack(30);
- EXPECT_EQ("{6:0}", toString(tfmd.getIterator()));
- EXPECT_TRUE(itr.seek(40));
- EXPECT_EQ(40u, itr.getDocId());
- itr.unpack(40);
- EXPECT_EQ("{7:0,1,4}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(41));
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("a", 0, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ("{4:0}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_EQ(30u, itr->getDocId());
+ itr->unpack(30);
+ EXPECT_EQ("{6:0}", toString(match_data));
+ EXPECT_TRUE(itr->seek(40));
+ EXPECT_EQ(40u, itr->getDocId());
+ itr->unpack(40);
+ EXPECT_EQ("{7:0,1,4}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(41));
+ EXPECT_TRUE(itr->isAtEnd());
}
{
- PostingIteratorType itr(findFrozen("x", 0), featureStoreRef(_fic, 0), 0, matchData);
- itr.initFullRange();
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("x", 0, match_data);
+ itr->initFullRange();
+ EXPECT_TRUE(itr->isAtEnd());
}
{
- PostingIteratorType itr(findFrozen("x", 1), featureStoreRef(_fic, 1), 1, matchData);
- itr.initFullRange();
- EXPECT_EQ(30u, itr.getDocId());
- itr.unpack(30);
- EXPECT_EQ("{6:2[e=0,w=1,l=6]}", toString(tfmd.getIterator(), true, true));
+ auto itr = search("x", 1, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(30u, itr->getDocId());
+ itr->unpack(30);
+ EXPECT_EQ("{6:2[e=0,w=1,l=6]}", toString(match_data, true, true));
}
{
- PostingIteratorType itr(findFrozen("x", 2), featureStoreRef(_fic, 2), 2, matchData);
- itr.initFullRange();
- EXPECT_EQ(30u, itr.getDocId());
- itr.unpack(30);
+ auto itr = search("x", 2, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(30u, itr->getDocId());
+ itr->unpack(30);
// weight is hardcoded to 1 for new style il doc array field
- EXPECT_EQ("{2:1[e=0,w=1,l=2]}", toString(tfmd.getIterator(), true, true));
+ EXPECT_EQ("{2:1[e=0,w=1,l=2]}", toString(match_data, true, true));
}
{
- PostingIteratorType itr(findFrozen("x", 3), featureStoreRef(_fic, 3), 3, matchData);
- itr.initFullRange();
- EXPECT_EQ(30u, itr.getDocId());
- itr.unpack(30);
+ auto itr = search("x", 3, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(30u, itr->getDocId());
+ itr->unpack(30);
EXPECT_EQ("{2:1[e=0,w=6,l=2]}",
- toString(tfmd.getIterator(), true, true));
+ toString(match_data, true, true));
}
}
@@ -1190,54 +1316,44 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working)
_pushThreads.sync();
- TermFieldMatchData tfmd;
- TermFieldMatchDataArray matchData;
- matchData.add(&tfmd);
+ SimpleMatchData match_data;
{
uint32_t fieldId = _schema.getIndexFieldId("iu");
- PostingIteratorType itr(findFrozen("not", fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("not", fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_TRUE(itr->isAtEnd());
}
{
uint32_t fieldId = _schema.getIndexFieldId("iu");
- PostingIteratorType itr(findFrozen("example", fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
- EXPECT_EQ("{9:2}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("example", fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ("{9:2}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_TRUE(itr->isAtEnd());
}
{
uint32_t fieldId = _schema.getIndexFieldId("iau");
- PostingIteratorType itr(findFrozen("example", fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
+ auto itr = search("example", fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
EXPECT_EQ("{9:2[e=0,l=9]}",
- toString(tfmd.getIterator(), true, false));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_TRUE(itr.isAtEnd());
+ toString(match_data, true, false));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_TRUE(itr->isAtEnd());
}
{
uint32_t fieldId = _schema.getIndexFieldId("iwu");
- PostingIteratorType itr(findFrozen("example", fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
+ auto itr = search("example", fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
EXPECT_EQ("{9:2[e=0,w=4,l=9]}",
- toString(tfmd.getIterator(), true, true));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_TRUE(itr.isAtEnd());
+ toString(match_data, true, true));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_TRUE(itr->isAtEnd());
}
{
search::diskindex::IndexBuilder dib(_schema);
@@ -1276,42 +1392,34 @@ TEST_F(CjkInverterTest, require_that_cjk_indexing_is_working)
_pushThreads.sync();
- TermFieldMatchData tfmd;
- TermFieldMatchDataArray matchData;
- matchData.add(&tfmd);
+ SimpleMatchData match_data;
uint32_t fieldId = _schema.getIndexFieldId("f0");
{
- PostingIteratorType itr(findFrozen("not", fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("not", fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_TRUE(itr->isAtEnd());
}
{
- PostingIteratorType itr(findFrozen("我就"
- "是那个",
- fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
- EXPECT_EQ("{2:0}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("我就"
+ "是那个",
+ fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ("{2:0}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_TRUE(itr->isAtEnd());
}
{
- PostingIteratorType itr(findFrozen("大灰"
- "狼",
- fieldId),
- featureStoreRef(_fic, fieldId),
- fieldId, matchData);
- itr.initFullRange();
- EXPECT_EQ(10u, itr.getDocId());
- itr.unpack(10);
- EXPECT_EQ("{2:1}", toString(tfmd.getIterator()));
- EXPECT_TRUE(!itr.seek(25));
- EXPECT_TRUE(itr.isAtEnd());
+ auto itr = search("大灰"
+ "狼",
+ fieldId, match_data);
+ itr->initFullRange();
+ EXPECT_EQ(10u, itr->getDocId());
+ itr->unpack(10);
+ EXPECT_EQ("{2:1}", toString(match_data));
+ EXPECT_TRUE(!itr->seek(25));
+ EXPECT_TRUE(itr->isAtEnd());
}
}
diff --git a/searchlib/src/tests/memoryindex/field_index_remover/CMakeLists.txt b/searchlib/src/tests/memoryindex/field_index_remover/CMakeLists.txt
index ef75337c6b6..f18b4ba29cd 100644
--- a/searchlib/src/tests/memoryindex/field_index_remover/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/field_index_remover/CMakeLists.txt
@@ -4,5 +4,6 @@ vespa_add_executable(searchlib_field_index_remover_test_app TEST
field_index_remover_test.cpp
DEPENDS
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_field_index_remover_test_app COMMAND searchlib_field_index_remover_test_app)
diff --git a/searchlib/src/tests/memoryindex/field_index_remover/field_index_remover_test.cpp b/searchlib/src/tests/memoryindex/field_index_remover/field_index_remover_test.cpp
index fed6d963b70..c0e8871b80a 100644
--- a/searchlib/src/tests/memoryindex/field_index_remover/field_index_remover_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index_remover/field_index_remover_test.cpp
@@ -1,10 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
-
#include <vespa/searchlib/memoryindex/field_index_remover.h>
#include <vespa/searchlib/memoryindex/i_field_index_remove_listener.h>
#include <vespa/searchlib/memoryindex/word_store.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <algorithm>
@@ -14,8 +13,7 @@ LOG_SETUP("document_remover_test");
using namespace search;
using namespace search::memoryindex;
-struct WordFieldPair
-{
+struct WordFieldPair {
vespalib::string _word;
uint32_t _fieldId;
WordFieldPair(vespalib::stringref word, uint32_t fieldId)
@@ -29,7 +27,7 @@ struct WordFieldPair
}
};
-typedef std::vector<WordFieldPair> WordFieldVector;
+using WordFieldVector = std::vector<WordFieldPair>;
std::ostream &
operator<<(std::ostream &os, const WordFieldPair &val)
@@ -38,13 +36,12 @@ operator<<(std::ostream &os, const WordFieldPair &val)
return os;
}
-struct MockRemoveListener : public IFieldIndexRemoveListener
-{
+struct MockRemoveListener : public IFieldIndexRemoveListener {
WordFieldVector _words;
uint32_t _expDocId;
uint32_t _fieldId;
virtual void remove(const vespalib::stringref word, uint32_t docId) override {
- EXPECT_EQUAL(_expDocId, docId);
+ EXPECT_EQ(_expDocId, docId);
_words.emplace_back(word, _fieldId);
}
void reset(uint32_t expDocId) {
@@ -60,13 +57,13 @@ struct MockRemoveListener : public IFieldIndexRemoveListener
void setFieldId(uint32_t fieldId) { _fieldId = fieldId; }
};
-struct Fixture
-{
+struct FieldIndexRemoverTest : public ::testing::Test {
MockRemoveListener _listener;
std::vector<std::unique_ptr<WordStore>> _wordStores;
std::vector<std::map<vespalib::string, datastore::EntryRef>> _wordToRefMaps;
std::vector<std::unique_ptr<FieldIndexRemover>> _removers;
- Fixture()
+
+ FieldIndexRemoverTest()
: _listener(),
_wordStores(),
_wordToRefMaps(),
@@ -91,7 +88,7 @@ struct Fixture
}
return itr->second;
}
- Fixture &insert(const vespalib::string &word, uint32_t fieldId, uint32_t docId) {
+ FieldIndexRemoverTest &insert(const vespalib::string &word, uint32_t fieldId, uint32_t docId) {
assert(fieldId < _wordStores.size());
_removers[fieldId]->insert(getWordRef(word, fieldId), docId);
return *this;
@@ -113,32 +110,31 @@ struct Fixture
}
};
-TEST_F("require that {word,fieldId} pairs for multiple doc ids can be inserted", Fixture)
+TEST_F(FieldIndexRemoverTest, word_field_id_pairs_for_multiple_doc_ids_can_be_inserted)
{
- f.insert("a", 1, 10).insert("a", 1, 20).insert("a", 1, 30);
- f.insert("a", 2, 10).insert("a", 2, 20);
- f.insert("b", 1, 20).insert("b", 1, 30);
- f.insert("b", 2, 10).insert("b", 2, 30);
- f.insert("c", 1, 10);
- f.insert("c", 2, 20);
- f.insert("c", 3, 30);
- f.flush();
+ insert("a", 1, 10).insert("a", 1, 20).insert("a", 1, 30);
+ insert("a", 2, 10).insert("a", 2, 20);
+ insert("b", 1, 20).insert("b", 1, 30);
+ insert("b", 2, 10).insert("b", 2, 30);
+ insert("c", 1, 10);
+ insert("c", 2, 20);
+ insert("c", 3, 30);
+ flush();
- EXPECT_EQUAL("[{a,1},{a,2},{b,2},{c,1}]", f.remove(10));
- EXPECT_EQUAL("[{a,1},{a,2},{b,1},{c,2}]", f.remove(20));
- EXPECT_EQUAL("[{a,1},{b,1},{b,2},{c,3}]", f.remove(30));
+ EXPECT_EQ("[{a,1},{a,2},{b,2},{c,1}]", remove(10));
+ EXPECT_EQ("[{a,1},{a,2},{b,1},{c,2}]", remove(20));
+ EXPECT_EQ("[{a,1},{b,1},{b,2},{c,3}]", remove(30));
}
-TEST_F("require that we can insert after flush", Fixture)
+TEST_F(FieldIndexRemoverTest, we_can_insert_after_flush)
{
- f.insert("a", 1, 10).insert("b", 1, 10);
- f.flush();
- f.insert("b", 1, 20).insert("b", 2, 20);
- f.flush();
+ insert("a", 1, 10).insert("b", 1, 10);
+ flush();
+ insert("b", 1, 20).insert("b", 2, 20);
+ flush();
- EXPECT_EQUAL("[{a,1},{b,1}]", f.remove(10));
- EXPECT_EQUAL("[{b,1},{b,2}]", f.remove(20));
+ EXPECT_EQ("[{a,1},{b,1}]", remove(10));
+ EXPECT_EQ("[{b,1},{b,2}]", remove(20));
}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/field_inverter/CMakeLists.txt b/searchlib/src/tests/memoryindex/field_inverter/CMakeLists.txt
index f39e05d6823..6fefada6570 100644
--- a/searchlib/src/tests/memoryindex/field_inverter/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/field_inverter/CMakeLists.txt
@@ -5,5 +5,6 @@ vespa_add_executable(searchlib_field_inverter_test_app TEST
DEPENDS
searchlib_test
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_field_inverter_test_app COMMAND searchlib_field_inverter_test_app)
diff --git a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
index ff0629d2172..72a8f6ed239 100644
--- a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp
@@ -7,7 +7,7 @@
#include <vespa/searchlib/memoryindex/field_inverter.h>
#include <vespa/searchlib/memoryindex/word_store.h>
#include <vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h>
-#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace search {
@@ -21,10 +21,8 @@ using namespace index;
namespace memoryindex {
-
namespace {
-
Document::UP
makeDoc10(DocBuilder &b)
{
@@ -35,7 +33,6 @@ makeDoc10(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc11(DocBuilder &b)
{
@@ -49,7 +46,6 @@ makeDoc11(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc12(DocBuilder &b)
{
@@ -60,7 +56,6 @@ makeDoc12(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc13(DocBuilder &b)
{
@@ -71,7 +66,6 @@ makeDoc13(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc14(DocBuilder &b)
{
@@ -82,7 +76,6 @@ makeDoc14(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc15(DocBuilder &b)
{
@@ -90,7 +83,6 @@ makeDoc15(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc16(DocBuilder &b)
{
@@ -113,8 +105,7 @@ makeDoc17(DocBuilder &b)
}
-struct Fixture
-{
+struct FieldInverterTest : public ::testing::Test {
Schema _schema;
DocBuilder _b;
WordStore _word_store;
@@ -123,9 +114,7 @@ struct Fixture
std::vector<std::unique_ptr<FieldLengthCalculator>> _calculators;
std::vector<std::unique_ptr<FieldInverter> > _inverters;
- static Schema
- makeSchema()
- {
+ static Schema makeSchema() {
Schema schema;
schema.addIndexField(Schema::IndexField("f0", DataType::STRING));
schema.addIndexField(Schema::IndexField("f1", DataType::STRING));
@@ -134,7 +123,7 @@ struct Fixture
return schema;
}
- Fixture()
+ FieldInverterTest()
: _schema(makeSchema()),
_b(_schema),
_word_store(),
@@ -154,9 +143,7 @@ struct Fixture
}
}
- void
- invertDocument(uint32_t docId, const Document &doc)
- {
+ void invertDocument(uint32_t docId, const Document &doc) {
uint32_t fieldId = 0;
for (auto &inverter : _inverters) {
vespalib::stringref fieldName =
@@ -166,9 +153,7 @@ struct Fixture
}
}
- void
- pushDocuments()
- {
+ void pushDocuments() {
uint32_t fieldId = 0;
for (auto &inverter : _inverters) {
_inserter.setFieldId(fieldId);
@@ -177,218 +162,207 @@ struct Fixture
}
}
- void
- removeDocument(uint32_t docId) {
+ void removeDocument(uint32_t docId) {
for (auto &inverter : _inverters) {
inverter->removeDocument(docId);
}
}
void assert_calculator(uint32_t field_id, double exp_avg, uint32_t exp_samples) {
- double epsilon = 0.000000001;
const auto &calc = *_calculators[field_id];
- EXPECT_APPROX(exp_avg, calc.get_average_field_length(), epsilon);
- EXPECT_EQUAL(exp_samples, calc.get_num_samples());
+ EXPECT_DOUBLE_EQ(exp_avg, calc.get_average_field_length());
+ EXPECT_EQ(exp_samples, calc.get_num_samples());
}
};
-
-TEST_F("requireThatFreshInsertWorks", Fixture)
+TEST_F(FieldInverterTest, require_that_fresh_insert_works)
{
- f.invertDocument(10, *makeDoc10(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatMultipleDocsWork", Fixture)
+TEST_F(FieldInverterTest, require_that_multiple_docs_work)
{
- f.invertDocument(10, *makeDoc10(f._b));
- f.invertDocument(11, *makeDoc11(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,a=11,"
- "w=b,a=10,a=11,"
- "w=c,a=10,w=d,a=10,"
- "w=e,a=11,"
- "w=f,a=11,"
- "f=1,w=a,a=11,"
- "w=g,a=11",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10(_b));
+ invertDocument(11, *makeDoc11(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,a=11,"
+ "w=b,a=10,a=11,"
+ "w=c,a=10,w=d,a=10,"
+ "w=e,a=11,"
+ "w=f,a=11,"
+ "f=1,w=a,a=11,"
+ "w=g,a=11",
+ _inserter.toStr());
}
-
-TEST_F("requireThatRemoveWorks", Fixture)
+TEST_F(FieldInverterTest, require_that_remove_works)
{
- f._inverters[0]->remove("b", 10);
- f._inverters[0]->remove("a", 10);
- f._inverters[0]->remove("b", 11);
- f._inverters[2]->remove("c", 12);
- f._inverters[1]->remove("a", 10);
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,r=10,"
- "w=b,r=10,r=11,"
- "f=1,w=a,r=10,"
- "f=2,w=c,r=12",
- f._inserter.toStr());
+ _inverters[0]->remove("b", 10);
+ _inverters[0]->remove("a", 10);
+ _inverters[0]->remove("b", 11);
+ _inverters[2]->remove("c", 12);
+ _inverters[1]->remove("a", 10);
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,r=10,"
+ "w=b,r=10,r=11,"
+ "f=1,w=a,r=10,"
+ "f=2,w=c,r=12",
+ _inserter.toStr());
}
-
-TEST_F("requireThatReputWorks", Fixture)
+TEST_F(FieldInverterTest, require_that_reput_works)
{
- f.invertDocument(10, *makeDoc10(f._b));
- f.invertDocument(10, *makeDoc11(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=e,a=10,"
- "w=f,a=10,"
- "f=1,w=a,a=10,"
- "w=g,a=10",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10(_b));
+ invertDocument(10, *makeDoc11(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=e,a=10,"
+ "w=f,a=10,"
+ "f=1,w=a,a=10,"
+ "w=g,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatAbortPendingDocWorks", Fixture)
+TEST_F(FieldInverterTest, require_that_abort_pending_doc_works)
{
- Document::UP doc10 = makeDoc10(f._b);
- Document::UP doc11 = makeDoc11(f._b);
- Document::UP doc12 = makeDoc12(f._b);
- Document::UP doc13 = makeDoc13(f._b);
- Document::UP doc14 = makeDoc14(f._b);
-
- f.invertDocument(10, *doc10);
- f.invertDocument(11, *doc11);
- f.removeDocument(10);
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=11,"
- "w=b,a=11,"
- "w=e,a=11,"
- "w=f,a=11,"
- "f=1,w=a,a=11,"
- "w=g,a=11",
- f._inserter.toStr());
-
- f.invertDocument(10, *doc10);
- f.invertDocument(11, *doc11);
- f.invertDocument(12, *doc12);
- f.invertDocument(13, *doc13);
- f.invertDocument(14, *doc14);
- f.removeDocument(11);
- f.removeDocument(13);
- f._inserter.reset();
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10,"
- "w=doc12,a=12,"
- "w=doc14,a=14,"
- "w=h,a=12,"
- "w=j,a=14",
- f._inserter.toStr());
-
- f.invertDocument(10, *doc10);
- f.invertDocument(11, *doc11);
- f.invertDocument(12, *doc12);
- f.invertDocument(13, *doc13);
- f.invertDocument(14, *doc14);
- f.removeDocument(11);
- f.removeDocument(12);
- f.removeDocument(13);
- f.removeDocument(14);
- f._inserter.reset();
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,"
- "w=b,a=10,"
- "w=c,a=10,"
- "w=d,a=10",
- f._inserter.toStr());
-
-
+ auto doc10 = makeDoc10(_b);
+ auto doc11 = makeDoc11(_b);
+ auto doc12 = makeDoc12(_b);
+ auto doc13 = makeDoc13(_b);
+ auto doc14 = makeDoc14(_b);
+
+ invertDocument(10, *doc10);
+ invertDocument(11, *doc11);
+ removeDocument(10);
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=11,"
+ "w=b,a=11,"
+ "w=e,a=11,"
+ "w=f,a=11,"
+ "f=1,w=a,a=11,"
+ "w=g,a=11",
+ _inserter.toStr());
+
+ invertDocument(10, *doc10);
+ invertDocument(11, *doc11);
+ invertDocument(12, *doc12);
+ invertDocument(13, *doc13);
+ invertDocument(14, *doc14);
+ removeDocument(11);
+ removeDocument(13);
+ _inserter.reset();
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10,"
+ "w=doc12,a=12,"
+ "w=doc14,a=14,"
+ "w=h,a=12,"
+ "w=j,a=14",
+ _inserter.toStr());
+
+ invertDocument(10, *doc10);
+ invertDocument(11, *doc11);
+ invertDocument(12, *doc12);
+ invertDocument(13, *doc13);
+ invertDocument(14, *doc14);
+ removeDocument(11);
+ removeDocument(12);
+ removeDocument(13);
+ removeDocument(14);
+ _inserter.reset();
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,"
+ "w=b,a=10,"
+ "w=c,a=10,"
+ "w=d,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatMixOfAddAndRemoveWorks", Fixture)
+TEST_F(FieldInverterTest, require_that_mix_of_add_and_remove_works)
{
- f._inverters[0]->remove("a", 11);
- f._inverters[0]->remove("c", 9);
- f._inverters[0]->remove("d", 10);
- f._inverters[0]->remove("z", 12);
- f.invertDocument(10, *makeDoc10(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,w=a,a=10,r=11,"
- "w=b,a=10,"
- "w=c,r=9,a=10,"
- "w=d,r=10,a=10,"
- "w=z,r=12",
- f._inserter.toStr());
+ _inverters[0]->remove("a", 11);
+ _inverters[0]->remove("c", 9);
+ _inverters[0]->remove("d", 10);
+ _inverters[0]->remove("z", 12);
+ invertDocument(10, *makeDoc10(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,w=a,a=10,r=11,"
+ "w=b,a=10,"
+ "w=c,r=9,a=10,"
+ "w=d,r=10,a=10,"
+ "w=z,r=12",
+ _inserter.toStr());
}
-
-TEST_F("require that empty document can be inverted", Fixture)
+TEST_F(FieldInverterTest, require_that_empty_document_can_be_inverted)
{
- f.invertDocument(15, *makeDoc15(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("",
- f._inserter.toStr());
+ invertDocument(15, *makeDoc15(_b));
+ pushDocuments();
+ EXPECT_EQ("",
+ _inserter.toStr());
}
-TEST_F("require that multiple words at same position works", Fixture)
+TEST_F(FieldInverterTest, require_that_multiple_words_at_same_position_works)
{
- f.invertDocument(16, *makeDoc16(f._b));
- f._inserter.setVerbose();
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=altbaz,a=16(e=0,w=1,l=5[2]),"
- "w=alty,a=16(e=0,w=1,l=5[3]),"
- "w=bar,a=16(e=0,w=1,l=5[1]),"
- "w=baz,a=16(e=0,w=1,l=5[2]),"
- "w=foo,a=16(e=0,w=1,l=5[0]),"
- "w=y,a=16(e=0,w=1,l=5[3]),"
- "w=z,a=16(e=0,w=1,l=5[4])",
- f._inserter.toStr());
+ invertDocument(16, *makeDoc16(_b));
+ _inserter.setVerbose();
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=altbaz,a=16(e=0,w=1,l=5[2]),"
+ "w=alty,a=16(e=0,w=1,l=5[3]),"
+ "w=bar,a=16(e=0,w=1,l=5[1]),"
+ "w=baz,a=16(e=0,w=1,l=5[2]),"
+ "w=foo,a=16(e=0,w=1,l=5[0]),"
+ "w=y,a=16(e=0,w=1,l=5[3]),"
+ "w=z,a=16(e=0,w=1,l=5[4])",
+ _inserter.toStr());
}
-TEST_F("require that interleaved features are calculated", Fixture)
+TEST_F(FieldInverterTest, require_that_interleaved_features_are_calculated)
{
- f.invertDocument(17, *makeDoc17(f._b));
- f._inserter.setVerbose();
- f._inserter.set_show_interleaved_features();
- f.pushDocuments();
- EXPECT_EQUAL("f=1,"
- "w=bar0,a=17(fl=2,occs=1,e=0,w=1,l=2[1]),"
- "w=foo0,a=17(fl=2,occs=1,e=0,w=1,l=2[0]),"
- "f=2,"
- "w=bar,a=17(fl=3,occs=2,e=0,w=1,l=2[1],e=1,w=1,l=1[0]),"
- "w=foo,a=17(fl=3,occs=1,e=0,w=1,l=2[0]),"
- "f=3,"
- "w=bar2,a=17(fl=3,occs=2,e=0,w=3,l=2[1],e=1,w=4,l=1[0]),"
- "w=foo2,a=17(fl=3,occs=1,e=0,w=3,l=2[0])",
- f._inserter.toStr());
+ invertDocument(17, *makeDoc17(_b));
+ _inserter.setVerbose();
+ _inserter.set_show_interleaved_features();
+ pushDocuments();
+ EXPECT_EQ("f=1,"
+ "w=bar0,a=17(fl=2,occs=1,e=0,w=1,l=2[1]),"
+ "w=foo0,a=17(fl=2,occs=1,e=0,w=1,l=2[0]),"
+ "f=2,"
+ "w=bar,a=17(fl=3,occs=2,e=0,w=1,l=2[1],e=1,w=1,l=1[0]),"
+ "w=foo,a=17(fl=3,occs=1,e=0,w=1,l=2[0]),"
+ "f=3,"
+ "w=bar2,a=17(fl=3,occs=2,e=0,w=3,l=2[1],e=1,w=4,l=1[0]),"
+ "w=foo2,a=17(fl=3,occs=1,e=0,w=3,l=2[0])",
+ _inserter.toStr());
}
-TEST_F("require that average field length is calculated", Fixture)
+TEST_F(FieldInverterTest, require_that_average_field_length_is_calculated)
{
- f.invertDocument(10, *makeDoc10(f._b));
- f.pushDocuments();
- TEST_DO(f.assert_calculator(0, 4.0, 1));
- TEST_DO(f.assert_calculator(1, 0.0, 0));
- f.invertDocument(11, *makeDoc11(f._b));
- f.pushDocuments();
- TEST_DO(f.assert_calculator(0, (4.0 + 4.0)/2, 2));
- TEST_DO(f.assert_calculator(1, 2.0, 1));
- f.invertDocument(12, *makeDoc12(f._b));
- f.pushDocuments();
- TEST_DO(f.assert_calculator(0, (4.0 + 4.0 + 2.0)/3, 3));
- TEST_DO(f.assert_calculator(1, 2.0, 1));
+ invertDocument(10, *makeDoc10(_b));
+ pushDocuments();
+ assert_calculator(0, 4.0, 1);
+ assert_calculator(1, 0.0, 0);
+ invertDocument(11, *makeDoc11(_b));
+ pushDocuments();
+ assert_calculator(0, (4.0 + 4.0)/2, 2);
+ assert_calculator(1, 2.0, 1);
+ invertDocument(12, *makeDoc12(_b));
+ pushDocuments();
+ assert_calculator(0, (4.0 + 4.0 + 2.0)/3, 3);
+ assert_calculator(1, 2.0, 1);
}
-} // namespace memoryindex
-} // namespace search
+}
+}
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/url_field_inverter/CMakeLists.txt b/searchlib/src/tests/memoryindex/url_field_inverter/CMakeLists.txt
index 28efc8a861e..db9418b7190 100644
--- a/searchlib/src/tests/memoryindex/url_field_inverter/CMakeLists.txt
+++ b/searchlib/src/tests/memoryindex/url_field_inverter/CMakeLists.txt
@@ -5,5 +5,6 @@ vespa_add_executable(searchlib_url_field_inverter_test_app TEST
DEPENDS
searchlib_test
searchlib
+ gtest
)
vespa_add_test(NAME searchlib_url_field_inverter_test_app COMMAND searchlib_url_field_inverter_test_app)
diff --git a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
index 2ea13a20063..2151a44a66d 100644
--- a/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/url_field_inverter/url_field_inverter_test.cpp
@@ -1,15 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/* -*- mode: C++; coding: utf-8; -*- */
#include <vespa/document/repo/fixedtyperepo.h>
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/index/field_length_calculator.h>
#include <vespa/searchlib/memoryindex/field_index_remover.h>
#include <vespa/searchlib/memoryindex/field_inverter.h>
-#include <vespa/searchlib/memoryindex/word_store.h>
#include <vespa/searchlib/memoryindex/url_field_inverter.h>
+#include <vespa/searchlib/memoryindex/word_store.h>
#include <vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h>
-#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace search {
@@ -56,7 +55,6 @@ makeDoc10Single(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc10Array(DocBuilder &b)
{
@@ -169,7 +167,6 @@ makeDoc10WeightedSet(DocBuilder &b)
return b.endDocument();
}
-
Document::UP
makeDoc10Empty(DocBuilder &b)
{
@@ -179,8 +176,7 @@ makeDoc10Empty(DocBuilder &b)
}
-struct Fixture
-{
+struct UrlFieldInverterTest : public ::testing::Test {
Schema _schema;
DocBuilder _b;
WordStore _word_store;
@@ -191,15 +187,13 @@ struct Fixture
std::unique_ptr<UrlFieldInverter> _urlInverter;
index::SchemaIndexFields _schemaIndexFields;
- static Schema
- makeSchema(Schema::CollectionType collectionType)
- {
+ static Schema makeSchema(Schema::CollectionType collectionType) {
Schema schema;
schema.addUriIndexFields(Schema::IndexField("url", DataType::STRING, collectionType));
return schema;
}
- Fixture(Schema::CollectionType collectionType)
+ UrlFieldInverterTest(Schema::CollectionType collectionType)
: _schema(makeSchema(collectionType)),
_b(_schema),
_word_store(),
@@ -233,15 +227,11 @@ struct Fixture
_inverters[urlField._hostname].get());
}
- void
- invertDocument(uint32_t docId, const Document &doc)
- {
+ void invertDocument(uint32_t docId, const Document &doc) {
_urlInverter->invertField(docId, doc.getValue(url));
}
- void
- pushDocuments()
- {
+ void pushDocuments() {
uint32_t fieldId = 0;
for (auto &inverter : _inverters) {
_inserter.setFieldId(fieldId);
@@ -250,324 +240,330 @@ struct Fixture
}
}
- void
- enableAnnotations()
- {
+ void enableAnnotations() {
_urlInverter->setUseAnnotations(true);
}
};
+struct SingleInverterTest : public UrlFieldInverterTest {
+ SingleInverterTest() : UrlFieldInverterTest(CollectionType::SINGLE) {}
+};
+
+struct ArrayInverterTest : public UrlFieldInverterTest {
+ ArrayInverterTest() : UrlFieldInverterTest(CollectionType::ARRAY) {}
+};
-TEST_F("requireThatSingleUrlFieldWorks", Fixture(CollectionType::SINGLE))
-{
- f.invertDocument(10, *makeDoc10Single(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=2,a=10,"
- "w=4,a=10,"
- "w=81,a=10,"
- "w=ab,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=fluke,a=10,"
- "w=http,a=10,"
- "w=www,a=10,"
- "f=1,"
- "w=http,a=10,"
- "f=2,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=www,a=10,"
- "f=3,"
- "w=81,a=10,"
- "f=4,"
- "w=fluke,a=10,"
- "f=5,"
- "w=2,a=10,"
- "w=ab,a=10,"
- "f=6,"
- "w=4,a=10,"
- "f=7,"
- "w=EnDhOsT,a=10,"
- "w=StArThOsT,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=www,a=10",
- f._inserter.toStr());
-}
+struct WeightedSetInverterTest : public UrlFieldInverterTest {
+ WeightedSetInverterTest() : UrlFieldInverterTest(CollectionType::WEIGHTEDSET) {}
+};
-TEST_F("requireThatArrayUrlFieldWorks", Fixture(CollectionType::ARRAY))
+TEST_F(SingleInverterTest, require_that_single_url_field_works)
{
- f.invertDocument(10, *makeDoc10Array(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=2,a=10,"
- "w=8,a=10,"
- "w=82,a=10,"
- "w=9,a=10,"
- "w=ab,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=fluke,a=10,"
- "w=http,a=10,"
- "w=www,a=10,"
- "f=1,"
- "w=http,a=10,"
- "f=2,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10,"
- "f=3,"
- "w=82,a=10,"
- "f=4,"
- "w=fluke,a=10,"
- "f=5,"
- "w=2,a=10,"
- "w=ab,a=10,"
- "f=6,"
- "w=8,a=10,"
- "w=9,a=10,"
- "f=7,"
- "w=EnDhOsT,a=10,"
- "w=StArThOsT,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10Single(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=2,a=10,"
+ "w=4,a=10,"
+ "w=81,a=10,"
+ "w=ab,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=fluke,a=10,"
+ "w=http,a=10,"
+ "w=www,a=10,"
+ "f=1,"
+ "w=http,a=10,"
+ "f=2,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=www,a=10,"
+ "f=3,"
+ "w=81,a=10,"
+ "f=4,"
+ "w=fluke,a=10,"
+ "f=5,"
+ "w=2,a=10,"
+ "w=ab,a=10,"
+ "f=6,"
+ "w=4,a=10,"
+ "f=7,"
+ "w=EnDhOsT,a=10,"
+ "w=StArThOsT,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=www,a=10",
+ _inserter.toStr());
}
-TEST_F("requireThatWeightedSetFieldWorks", Fixture(CollectionType::WEIGHTEDSET))
+TEST_F(ArrayInverterTest, require_that_array_url_field_works)
{
- f.invertDocument(10, *makeDoc10WeightedSet(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=12,a=10,"
- "w=13,a=10,"
- "w=2,a=10,"
- "w=83,a=10,"
- "w=85,a=10,"
- "w=ab,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=fluke,a=10,"
- "w=http,a=10,"
- "w=www,a=10,"
- "f=1,"
- "w=http,a=10,"
- "f=2,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10,"
- "f=3,"
- "w=83,a=10,"
- "w=85,a=10,"
- "f=4,"
- "w=fluke,a=10,"
- "f=5,"
- "w=2,a=10,"
- "w=ab,a=10,"
- "f=6,"
- "w=12,a=10,"
- "w=13,a=10,"
- "f=7,"
- "w=EnDhOsT,a=10,"
- "w=StArThOsT,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10Array(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=2,a=10,"
+ "w=8,a=10,"
+ "w=82,a=10,"
+ "w=9,a=10,"
+ "w=ab,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=fluke,a=10,"
+ "w=http,a=10,"
+ "w=www,a=10,"
+ "f=1,"
+ "w=http,a=10,"
+ "f=2,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10,"
+ "f=3,"
+ "w=82,a=10,"
+ "f=4,"
+ "w=fluke,a=10,"
+ "f=5,"
+ "w=2,a=10,"
+ "w=ab,a=10,"
+ "f=6,"
+ "w=8,a=10,"
+ "w=9,a=10,"
+ "f=7,"
+ "w=EnDhOsT,a=10,"
+ "w=StArThOsT,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10",
+ _inserter.toStr());
}
-TEST_F("requireThatAnnotatedSingleUrlFieldWorks", Fixture(CollectionType::SINGLE))
+TEST_F(WeightedSetInverterTest, require_that_weighted_set_field_works)
{
- f.enableAnnotations();
- f.invertDocument(10, *makeDoc10Single(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=2,a=10,"
- "w=4,a=10,"
- "w=81,a=10,"
- "w=ab,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=fluke,a=10,"
- "w=http,a=10,"
- "w=www,a=10,"
- "f=1,"
- "w=http,a=10,"
- "f=2,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=www,a=10,"
- "f=3,"
- "w=81,a=10,"
- "f=4,"
- "w=altfluke,a=10,"
- "w=fluke,a=10,"
- "f=5,"
- "w=2,a=10,"
- "w=ab,a=10,"
- "f=6,"
- "w=4,a=10,"
- "f=7,"
- "w=EnDhOsT,a=10,"
- "w=StArThOsT,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=www,a=10",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10WeightedSet(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=12,a=10,"
+ "w=13,a=10,"
+ "w=2,a=10,"
+ "w=83,a=10,"
+ "w=85,a=10,"
+ "w=ab,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=fluke,a=10,"
+ "w=http,a=10,"
+ "w=www,a=10,"
+ "f=1,"
+ "w=http,a=10,"
+ "f=2,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10,"
+ "f=3,"
+ "w=83,a=10,"
+ "w=85,a=10,"
+ "f=4,"
+ "w=fluke,a=10,"
+ "f=5,"
+ "w=2,a=10,"
+ "w=ab,a=10,"
+ "f=6,"
+ "w=12,a=10,"
+ "w=13,a=10,"
+ "f=7,"
+ "w=EnDhOsT,a=10,"
+ "w=StArThOsT,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10",
+ _inserter.toStr());
}
-
-TEST_F("requireThatAnnotatedArrayUrlFieldWorks", Fixture(CollectionType::ARRAY))
+TEST_F(SingleInverterTest, require_that_annotated_single_url_field_works)
{
- f.enableAnnotations();
- f.invertDocument(10, *makeDoc10Array(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=2,a=10,"
- "w=8,a=10,"
- "w=82,a=10,"
- "w=9,a=10,"
- "w=ab,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=fluke,a=10,"
- "w=http,a=10,"
- "w=www,a=10,"
- "f=1,"
- "w=http,a=10,"
- "f=2,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10,"
- "f=3,"
- "w=82,a=10,"
- "f=4,"
- "w=altfluke,a=10,"
- "w=fluke,a=10,"
- "f=5,"
- "w=2,a=10,"
- "w=ab,a=10,"
- "f=6,"
- "w=8,a=10,"
- "w=9,a=10,"
- "f=7,"
- "w=EnDhOsT,a=10,"
- "w=StArThOsT,a=10,"
- "w=com,a=10,"
- "w=example,a=10,"
- "w=flickr,a=10,"
- "w=www,a=10",
- f._inserter.toStr());
+ enableAnnotations();
+ invertDocument(10, *makeDoc10Single(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=2,a=10,"
+ "w=4,a=10,"
+ "w=81,a=10,"
+ "w=ab,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=fluke,a=10,"
+ "w=http,a=10,"
+ "w=www,a=10,"
+ "f=1,"
+ "w=http,a=10,"
+ "f=2,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=www,a=10,"
+ "f=3,"
+ "w=81,a=10,"
+ "f=4,"
+ "w=altfluke,a=10,"
+ "w=fluke,a=10,"
+ "f=5,"
+ "w=2,a=10,"
+ "w=ab,a=10,"
+ "f=6,"
+ "w=4,a=10,"
+ "f=7,"
+ "w=EnDhOsT,a=10,"
+ "w=StArThOsT,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=www,a=10",
+ _inserter.toStr());
}
-TEST_F("requireThatAnnotatedWeightedSetFieldWorks",
- Fixture(CollectionType::WEIGHTEDSET))
+TEST_F(ArrayInverterTest, require_that_annotated_array_url_field_works)
{
- f.enableAnnotations();
- f._inserter.setVerbose();
- f.invertDocument(10, *makeDoc10WeightedSet(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("f=0,"
- "w=12,a=10(e=0,w=4,l=9[8]),"
- "w=13,a=10(e=1,w=7,l=9[8]),"
- "w=2,a=10(e=0,w=4,l=9[7],e=1,w=7,l=9[7]),"
- "w=83,a=10(e=0,w=4,l=9[4]),"
- "w=85,a=10(e=1,w=7,l=9[4]),"
- "w=ab,a=10(e=0,w=4,l=9[6],e=1,w=7,l=9[6]),"
- "w=com,a=10(e=0,w=4,l=9[3],e=1,w=7,l=9[3]),"
- "w=example,a=10(e=0,w=4,l=9[2]),"
- "w=flickr,a=10(e=1,w=7,l=9[2]),"
- "w=fluke,a=10(e=0,w=4,l=9[5],e=1,w=7,l=9[5]),"
- "w=http,a=10(e=0,w=4,l=9[0],e=1,w=7,l=9[0]),"
- "w=www,a=10(e=0,w=4,l=9[1],e=1,w=7,l=9[1]),"
- "f=1,"
- "w=http,a=10(e=0,w=4,l=1[0],e=1,w=7,l=1[0]),"
- "f=2,"
- "w=com,a=10(e=0,w=4,l=3[2],e=1,w=7,l=3[2]),"
- "w=example,a=10(e=0,w=4,l=3[1]),"
- "w=flickr,a=10(e=1,w=7,l=3[1]),"
- "w=www,a=10(e=0,w=4,l=3[0],e=1,w=7,l=3[0]),"
- "f=3,"
- "w=83,a=10(e=0,w=4,l=1[0]),"
- "w=85,a=10(e=1,w=7,l=1[0]),"
- "f=4,"
- "w=altfluke,a=10(e=0,w=4,l=1[0]),"
- "w=fluke,a=10(e=0,w=4,l=1[0],e=1,w=7,l=1[0]),"
- "f=5,"
- "w=2,a=10(e=0,w=4,l=2[1],e=1,w=7,l=2[1]),"
- "w=ab,a=10(e=0,w=4,l=2[0],e=1,w=7,l=2[0]),"
- "f=6,"
- "w=12,a=10(e=0,w=4,l=1[0]),"
- "w=13,a=10(e=1,w=7,l=1[0]),"
- "f=7,"
- "w=EnDhOsT,a=10(e=0,w=4,l=5[4],e=1,w=7,l=5[4]),"
- "w=StArThOsT,a=10(e=0,w=4,l=5[0],e=1,w=7,l=5[0]),"
- "w=com,a=10(e=0,w=4,l=5[3],e=1,w=7,l=5[3]),"
- "w=example,a=10(e=0,w=4,l=5[2]),"
- "w=flickr,a=10(e=1,w=7,l=5[2]),"
- "w=www,a=10(e=0,w=4,l=5[1],e=1,w=7,l=5[1])",
- f._inserter.toStr());
+ enableAnnotations();
+ invertDocument(10, *makeDoc10Array(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=2,a=10,"
+ "w=8,a=10,"
+ "w=82,a=10,"
+ "w=9,a=10,"
+ "w=ab,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=fluke,a=10,"
+ "w=http,a=10,"
+ "w=www,a=10,"
+ "f=1,"
+ "w=http,a=10,"
+ "f=2,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10,"
+ "f=3,"
+ "w=82,a=10,"
+ "f=4,"
+ "w=altfluke,a=10,"
+ "w=fluke,a=10,"
+ "f=5,"
+ "w=2,a=10,"
+ "w=ab,a=10,"
+ "f=6,"
+ "w=8,a=10,"
+ "w=9,a=10,"
+ "f=7,"
+ "w=EnDhOsT,a=10,"
+ "w=StArThOsT,a=10,"
+ "w=com,a=10,"
+ "w=example,a=10,"
+ "w=flickr,a=10,"
+ "w=www,a=10",
+ _inserter.toStr());
}
+TEST_F(WeightedSetInverterTest, require_that_annotated_weighted_set_field_works)
+{
+ enableAnnotations();
+ _inserter.setVerbose();
+ invertDocument(10, *makeDoc10WeightedSet(_b));
+ pushDocuments();
+ EXPECT_EQ("f=0,"
+ "w=12,a=10(e=0,w=4,l=9[8]),"
+ "w=13,a=10(e=1,w=7,l=9[8]),"
+ "w=2,a=10(e=0,w=4,l=9[7],e=1,w=7,l=9[7]),"
+ "w=83,a=10(e=0,w=4,l=9[4]),"
+ "w=85,a=10(e=1,w=7,l=9[4]),"
+ "w=ab,a=10(e=0,w=4,l=9[6],e=1,w=7,l=9[6]),"
+ "w=com,a=10(e=0,w=4,l=9[3],e=1,w=7,l=9[3]),"
+ "w=example,a=10(e=0,w=4,l=9[2]),"
+ "w=flickr,a=10(e=1,w=7,l=9[2]),"
+ "w=fluke,a=10(e=0,w=4,l=9[5],e=1,w=7,l=9[5]),"
+ "w=http,a=10(e=0,w=4,l=9[0],e=1,w=7,l=9[0]),"
+ "w=www,a=10(e=0,w=4,l=9[1],e=1,w=7,l=9[1]),"
+ "f=1,"
+ "w=http,a=10(e=0,w=4,l=1[0],e=1,w=7,l=1[0]),"
+ "f=2,"
+ "w=com,a=10(e=0,w=4,l=3[2],e=1,w=7,l=3[2]),"
+ "w=example,a=10(e=0,w=4,l=3[1]),"
+ "w=flickr,a=10(e=1,w=7,l=3[1]),"
+ "w=www,a=10(e=0,w=4,l=3[0],e=1,w=7,l=3[0]),"
+ "f=3,"
+ "w=83,a=10(e=0,w=4,l=1[0]),"
+ "w=85,a=10(e=1,w=7,l=1[0]),"
+ "f=4,"
+ "w=altfluke,a=10(e=0,w=4,l=1[0]),"
+ "w=fluke,a=10(e=0,w=4,l=1[0],e=1,w=7,l=1[0]),"
+ "f=5,"
+ "w=2,a=10(e=0,w=4,l=2[1],e=1,w=7,l=2[1]),"
+ "w=ab,a=10(e=0,w=4,l=2[0],e=1,w=7,l=2[0]),"
+ "f=6,"
+ "w=12,a=10(e=0,w=4,l=1[0]),"
+ "w=13,a=10(e=1,w=7,l=1[0]),"
+ "f=7,"
+ "w=EnDhOsT,a=10(e=0,w=4,l=5[4],e=1,w=7,l=5[4]),"
+ "w=StArThOsT,a=10(e=0,w=4,l=5[0],e=1,w=7,l=5[0]),"
+ "w=com,a=10(e=0,w=4,l=5[3],e=1,w=7,l=5[3]),"
+ "w=example,a=10(e=0,w=4,l=5[2]),"
+ "w=flickr,a=10(e=1,w=7,l=5[2]),"
+ "w=www,a=10(e=0,w=4,l=5[1],e=1,w=7,l=5[1])",
+ _inserter.toStr());
+}
-TEST_F("requireThatEmptySingleFieldWorks", Fixture(CollectionType::SINGLE))
+TEST_F(SingleInverterTest, require_that_empty_single_field_works)
{
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("", f._inserter.toStr());
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("", _inserter.toStr());
}
-TEST_F("requireThatEmptyArrayFieldWorks", Fixture(CollectionType::ARRAY))
+TEST_F(ArrayInverterTest, require_that_empty_array_field_works)
{
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("",
- f._inserter.toStr());
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("",
+ _inserter.toStr());
}
-TEST_F("requireThatEmptyWeightedSetFieldWorks", Fixture(CollectionType::WEIGHTEDSET))
+TEST_F(WeightedSetInverterTest, require_that_empty_weighted_set_field_works)
{
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("", f._inserter.toStr());
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("", _inserter.toStr());
}
-TEST_F("requireThatAnnotatedEmptySingleFieldWorks", Fixture(CollectionType::SINGLE))
+TEST_F(SingleInverterTest, require_that_annotated_empty_single_field_works)
{
- f.enableAnnotations();
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("", f._inserter.toStr());
+ enableAnnotations();
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("", _inserter.toStr());
}
-TEST_F("requireThatAnnotatedEmptyArrayFieldWorks", Fixture(CollectionType::ARRAY))
+TEST_F(ArrayInverterTest, require_that_annotated_empty_array_field_works)
{
- f.enableAnnotations();
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("", f._inserter.toStr());
+ enableAnnotations();
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("", _inserter.toStr());
}
-TEST_F("requireThatAnnotatedEmptyWeightedSetFieldWorks", Fixture(CollectionType::WEIGHTEDSET))
+TEST_F(WeightedSetInverterTest, require_that_annotated_empty_weighted_set_field_works)
{
- f.enableAnnotations();
- f.invertDocument(10, *makeDoc10Empty(f._b));
- f.pushDocuments();
- EXPECT_EQUAL("", f._inserter.toStr());
+ enableAnnotations();
+ invertDocument(10, *makeDoc10Empty(_b));
+ pushDocuments();
+ EXPECT_EQ("", _inserter.toStr());
}
-} // namespace memoryindex
-} // namespace search
+}
+}
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt b/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
index ba608467c8a..15678382741 100644
--- a/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
@@ -12,6 +12,7 @@ vespa_add_library(searchlib_diskindex OBJECT
extposocc.cpp
fieldreader.cpp
fieldwriter.cpp
+ field_length_scanner.cpp
fileheader.cpp
fusion.cpp
indexbuilder.cpp
diff --git a/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.cpp b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.cpp
new file mode 100644
index 00000000000..b7aad2d1996
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.cpp
@@ -0,0 +1,51 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "field_length_scanner.h"
+#include <vespa/searchlib/index/docidandfeatures.h>
+
+namespace search::diskindex {
+
+FieldLengthScanner::FieldLengthScanner(uint32_t doc_id_limit)
+ : _field_length_vector(doc_id_limit),
+ _scanned_elements_map()
+{
+}
+
+FieldLengthScanner::~FieldLengthScanner() = default;
+
+void
+FieldLengthScanner::scan_features(const index::DocIdAndFeatures &features)
+{
+ if (features.elements().empty()) {
+ return;
+ }
+ auto &entry = _field_length_vector[features.doc_id()];
+ if (features.elements().back().getElementId() < element_id_bias) {
+ for (const auto &element : features.elements()) {
+ entry.add_element_length(element.getElementLen(), element.getElementId());
+ }
+ } else {
+ auto element = features.elements().cbegin();
+ while (element->getElementId() < element_id_bias) {
+ entry.add_element_length(element->getElementLen(), element->getElementId());
+ ++element;
+ }
+ auto &scanned_elements = _scanned_elements_map[features.doc_id()];
+ auto size_needed = features.elements().back().getElementId() + 1 - element_id_bias;
+ if (size_needed > scanned_elements.size()) {
+ if (size_needed > scanned_elements.capacity()) {
+ scanned_elements.reserve(std::max(size_needed + (size_needed / 4), 32u));
+ }
+ scanned_elements.resize(size_needed);
+ }
+ while (element != features.elements().cend()) {
+ if (!scanned_elements[element->getElementId() - element_id_bias]) {
+ scanned_elements[element->getElementId() - element_id_bias] = true;
+ entry.add_element_length(element->getElementLen());
+ }
+ ++element;
+ }
+ }
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h
new file mode 100644
index 00000000000..e282a85b64f
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/diskindex/field_length_scanner.h
@@ -0,0 +1,63 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vector>
+#include <unordered_map>
+#include <limits>
+
+namespace search::index { class DocIdAndFeatures; }
+
+namespace search::diskindex {
+
+/*
+ * Class used to reconstruct field lengths based on element lengths in
+ * posting list file.
+ */
+class FieldLengthScanner {
+ class FieldLengthEntry {
+ uint16_t _field_length;
+ uint16_t _elements; // first 16 elements
+
+ static uint16_t make_element_mask(uint32_t element_id) { return (1u << element_id); }
+
+ public:
+ FieldLengthEntry()
+ : _field_length(0),
+ _elements(0)
+ {
+ }
+
+ void add_element_length(uint32_t element_length) {
+ // Cap field length
+ if (element_length < std::numeric_limits<uint16_t>::max()) {
+ uint32_t field_length32 = _field_length + element_length;
+ _field_length = std::min(field_length32, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()));
+ } else {
+ _field_length = std::numeric_limits<uint16_t>::max();
+ }
+ }
+
+ void add_element_length(uint32_t element_length, uint32_t element_id) {
+ uint16_t element_mask = make_element_mask(element_id);
+ if (!(_elements & element_mask)) {
+ _elements |= element_mask;
+ add_element_length(element_length);
+ }
+ }
+
+ uint16_t get_field_length() const { return _field_length; }
+ };
+ std::vector<FieldLengthEntry> _field_length_vector;
+ static constexpr uint32_t element_id_bias = 16;
+ // bit vectors for element >= element_id_bias
+ std::unordered_map<uint32_t, std::vector<bool>> _scanned_elements_map;
+
+public:
+ FieldLengthScanner(uint32_t doc_id_limit);
+ ~FieldLengthScanner();
+ void scan_features(const index::DocIdAndFeatures &features);
+ uint16_t get_field_length(uint32_t doc_id) const { return _field_length_vector[doc_id].get_field_length(); }
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
index d3696e2f31c..ab77a6bfd08 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
@@ -4,6 +4,7 @@
#include "zcposocc.h"
#include "extposocc.h"
#include "pagedict4file.h"
+#include "field_length_scanner.h"
#include <vespa/vespalib/util/error.h>
#include <vespa/log/log.h>
@@ -14,6 +15,9 @@ LOG_SETUP(".diskindex.fieldreader");
namespace {
vespalib::string PosOccIdCooked = "PosOcc.3.Cooked";
+vespalib::string interleaved_features("interleaved_features");
+
+uint16_t cap_u16(uint32_t val) { return std::min(val, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max())); }
}
@@ -188,17 +192,22 @@ FieldReader::get_field_length_info() const
std::unique_ptr<FieldReader>
FieldReader::allocFieldReader(const SchemaUtil::IndexIterator &index,
- const Schema &oldSchema)
+ const Schema &oldSchema,
+ std::shared_ptr<FieldLengthScanner> field_length_scanner)
{
assert(index.isValid());
if (index.hasMatchingOldFields(oldSchema)) {
- return std::make_unique<FieldReader>(); // The common case
+ if (!index.use_interleaved_features() ||
+ index.has_matching_use_interleaved_features(oldSchema)) {
+ return std::make_unique<FieldReader>(); // The common case
+ }
}
if (!index.hasOldFields(oldSchema)) {
return std::make_unique<FieldReaderEmpty>(index); // drop data
}
// field exists in old schema with different collection type setting
- return std::make_unique<FieldReaderStripInfo>(index); // degraded
+ // or old field is missing wanted interleaved features.
+ return std::make_unique<FieldReaderStripInfo>(index, field_length_scanner); // degraded
}
@@ -228,9 +237,12 @@ FieldReaderEmpty::getFeatureParams(PostingListParams &params)
}
-FieldReaderStripInfo::FieldReaderStripInfo(const IndexIterator &index)
+FieldReaderStripInfo::FieldReaderStripInfo(const IndexIterator &index, std::shared_ptr<FieldLengthScanner> field_length_scanner)
: _hasElements(false),
- _hasElementWeights(false)
+ _hasElementWeights(false),
+ _want_interleaved_features(index.use_interleaved_features()),
+ _regenerate_interleaved_features(false),
+ _field_length_scanner(std::move(field_length_scanner))
{
PosOccFieldsParams fieldsParams;
fieldsParams.setSchemaParams(index.getSchema(), index.getIndex());
@@ -247,6 +259,47 @@ FieldReaderStripInfo::allowRawFeatures()
return false;
}
+bool
+FieldReaderStripInfo::open(const vespalib::string &prefix, const TuneFileSeqRead &tuneFileRead)
+{
+ if (!FieldReader::open(prefix, tuneFileRead)) {
+ return false;
+ }
+ if (_want_interleaved_features) {
+ PostingListParams params;
+ bool decode_interleaved_features = false;
+ _oldposoccfile->getParams(params);
+ params.get(interleaved_features, decode_interleaved_features);
+ if (!decode_interleaved_features) {
+ _regenerate_interleaved_features = true;
+ }
+ if (!_hasElements) {
+ _regenerate_interleaved_features = true;
+ }
+ }
+ if (_regenerate_interleaved_features && _hasElements && _field_length_scanner) {
+ scan_element_lengths();
+ close();
+ if (!FieldReader::open(prefix, tuneFileRead)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void
+FieldReaderStripInfo::scan_element_lengths()
+{
+ for (;;) {
+ FieldReader::read();
+ if (_wordNum == noWordNumHigh()) {
+ break;
+ }
+ DocIdAndFeatures &features = _docIdAndFeatures;
+ assert(!features.has_raw_data());
+ _field_length_scanner->scan_features(features);
+ }
+}
void
FieldReaderStripInfo::read()
@@ -283,6 +336,22 @@ FieldReaderStripInfo::read()
}
break;
}
+ if (_regenerate_interleaved_features) {
+ // Regenerate interleaved featues from normal features.
+ uint32_t field_length = 0;
+ uint32_t num_occs = 0;
+ DocIdAndFeatures &features = _docIdAndFeatures;
+ for (const auto &element : features.elements()) {
+ field_length += element.getElementLen();
+ num_occs += element.getNumOccs();
+ }
+ if (_hasElements && _field_length_scanner) {
+ field_length = _field_length_scanner->get_field_length(features.doc_id());
+ }
+ // cap interleaved features to 16 bits each, to match memory index
+ features.set_field_length(cap_u16(field_length));
+ features.set_num_occs(cap_u16(num_occs));
+ }
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
index ee237f5cc69..899b3708bf9 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
@@ -13,6 +13,8 @@
namespace search::diskindex {
+class FieldLengthScanner;
+
/*
* FieldReader is used to read a dictionary and posting list file
* together, and get a sequential view of the stored data.
@@ -95,7 +97,7 @@ public:
uint32_t getDocIdLimit() const { return _docIdLimit; }
const index::FieldLengthInfo &get_field_length_info() const;
- static std::unique_ptr<FieldReader> allocFieldReader(const IndexIterator &index, const Schema &oldSchema);
+ static std::unique_ptr<FieldReader> allocFieldReader(const IndexIterator &index, const Schema &oldSchema, std::shared_ptr<FieldLengthScanner> field_length_scanner);
};
@@ -117,16 +119,22 @@ public:
/*
* Field reader that strips information from source, e.g. remove
* weights or discard nonzero elements, due to collection type change.
+ * It is also used to regenerate interleaved features from normal features.
*/
class FieldReaderStripInfo : public FieldReader
{
private:
bool _hasElements;
bool _hasElementWeights;
+ bool _want_interleaved_features;
+ bool _regenerate_interleaved_features;
+ std::shared_ptr<FieldLengthScanner> _field_length_scanner;
public:
- FieldReaderStripInfo(const IndexIterator &index);
+ FieldReaderStripInfo(const IndexIterator &index, std::shared_ptr<FieldLengthScanner>);
bool allowRawFeatures() override;
+ bool open(const vespalib::string &prefix, const TuneFileSeqRead &tuneFileRead) override;
void read() override;
+ void scan_element_lengths();
void getFeatureParams(PostingListParams &params) override;
};
diff --git a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
index 1ace5969b6b..0ad178d14b3 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
@@ -3,7 +3,9 @@
#include "fusion.h"
#include "fieldreader.h"
#include "dictionarywordreader.h"
+#include "field_length_scanner.h"
#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/searchlib/bitcompression/posocc_fields_params.h>
#include <vespa/searchlib/index/field_length_info.h>
#include <vespa/searchlib/util/filekit.h>
#include <vespa/searchlib/util/dirtraverse.h>
@@ -28,6 +30,8 @@ using search::diskindex::DocIdMapping;
using search::diskindex::WordNumMapping;
using search::docsummary::DocumentSummary;
using search::index::FieldLengthInfo;
+using search::bitcompression::PosOccFieldParams;
+using search::bitcompression::PosOccFieldsParams;
using search::index::PostingListParams;
using search::index::Schema;
using search::index::SchemaUtil;
@@ -304,17 +308,39 @@ Fusion::selectCookedOrRawFeatures(Reader &reader, Writer &writer)
}
+std::shared_ptr<FieldLengthScanner>
+Fusion::allocate_field_length_scanner(const SchemaUtil::IndexIterator &index)
+{
+ if (index.use_interleaved_features()) {
+ PosOccFieldsParams fieldsParams;
+ fieldsParams.setSchemaParams(index.getSchema(), index.getIndex());
+ assert(fieldsParams.getNumFields() > 0);
+ const PosOccFieldParams &fieldParams = fieldsParams.getFieldParams()[0];
+ if (fieldParams._hasElements) {
+ for (const auto &old_index : _oldIndexes) {
+ const Schema &old_schema = old_index.getSchema();
+ if (index.hasOldFields(old_schema) &&
+ !index.has_matching_use_interleaved_features(old_schema)) {
+ return std::make_shared<FieldLengthScanner>(_docIdLimit);
+ }
+ }
+ }
+ }
+ return std::shared_ptr<FieldLengthScanner>();
+}
+
bool
Fusion::openInputFieldReaders(const SchemaUtil::IndexIterator &index, const WordNumMappingList & list,
std::vector<std::unique_ptr<FieldReader> > & readers)
{
+ auto field_length_scanner = allocate_field_length_scanner(index);
vespalib::string indexName = index.getName();
for (const auto &oi : _oldIndexes) {
const Schema &oldSchema = oi.getSchema();
if (!index.hasOldFields(oldSchema)) {
continue; // drop data
}
- auto reader = FieldReader::allocFieldReader(index, oldSchema);
+ auto reader = FieldReader::allocFieldReader(index, oldSchema, field_length_scanner);
reader->setup(list[oi.getIndex()], oi.getDocIdMapping());
if (!reader->open(oi.getPath() + "/" + indexName + "/", _tuneFileIndexing._read)) {
return false;
@@ -331,7 +357,7 @@ Fusion::openFieldWriter(const SchemaUtil::IndexIterator &index, FieldWriter &wri
vespalib::string dir = _outDir + "/" + index.getName();
if (!writer.open(dir + "/", 64, 262144, _dynamicKPosIndexFormat,
- index.use_experimental_posting_list_format(), index.getSchema(),
+ index.use_interleaved_features(), index.getSchema(),
index.getIndex(),
field_length_info,
_tuneFileIndexing._write, _fileHeaderContext)) {
diff --git a/searchlib/src/vespa/searchlib/diskindex/fusion.h b/searchlib/src/vespa/searchlib/diskindex/fusion.h
index 28060a9c4be..d532384f6e9 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fusion.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fusion.h
@@ -15,6 +15,7 @@ namespace search::index { class FieldLengthInfo; }
namespace search::diskindex {
+class FieldLengthScanner;
class FieldReader;
class FieldWriter;
class DictionaryWordReader;
@@ -49,6 +50,7 @@ private:
bool mergeFields(vespalib::ThreadExecutor & executor);
bool mergeField(uint32_t id);
+ std::shared_ptr<FieldLengthScanner> allocate_field_length_scanner(const SchemaUtil::IndexIterator &index);
bool openInputFieldReaders(const SchemaUtil::IndexIterator &index, const WordNumMappingList & list,
std::vector<std::unique_ptr<FieldReader> > & readers);
bool openFieldWriter(const SchemaUtil::IndexIterator &index, FieldWriter & writer, const index::FieldLengthInfo &field_length_info);
diff --git a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
index c2e311f18a6..0b43bcf6b8c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
@@ -105,7 +105,7 @@ FileHandle::open(vespalib::stringref dir,
_fieldWriter = std::make_shared<FieldWriter>(docIdLimit, numWordIds);
if (!_fieldWriter->open(dir + "/", 64, 262144u, false,
- index.use_experimental_posting_list_format(),
+ index.use_interleaved_features(),
index.getSchema(), index.getIndex(),
field_length_info,
tuneFileWrite, fileHeaderContext)) {
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
index 3f154c44cb9..b8a0813e33c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
@@ -123,6 +123,7 @@ Zc4PostingSeqRead::getParams(PostingListParams &params)
params.set("minChunkDocs", _reader.get_posting_params()._min_chunk_docs);
}
params.set("minSkipDocs", _reader.get_posting_params()._min_skip_docs);
+ params.set(interleaved_features, _reader.get_posting_params()._encode_interleaved_features);
}
@@ -357,6 +358,7 @@ getParams(PostingListParams &params)
params.set("minChunkDocs", _writer.get_min_chunk_docs());
}
params.set("minSkipDocs", _writer.get_min_skip_docs());
+ params.set(interleaved_features, _writer.get_encode_interleaved_features());
}
diff --git a/searchlib/src/vespa/searchlib/index/schemautil.cpp b/searchlib/src/vespa/searchlib/index/schemautil.cpp
index 7f3b7c8c2a9..1fce4a1fe99 100644
--- a/searchlib/src/vespa/searchlib/index/schemautil.cpp
+++ b/searchlib/src/vespa/searchlib/index/schemautil.cpp
@@ -69,6 +69,20 @@ SchemaUtil::IndexIterator::hasMatchingOldFields(const Schema &oldSchema) const
}
bool
+SchemaUtil::IndexIterator::has_matching_use_interleaved_features(const Schema &oldSchema) const
+{
+ assert(isValid());
+ const Schema::IndexField &newField = getSchema().getIndexField(getIndex());
+ const vespalib::string &fieldName = newField.getName();
+ uint32_t oldFieldId = oldSchema.getIndexFieldId(fieldName);
+ if (oldFieldId == Schema::UNKNOWN_FIELD_ID) {
+ return false;
+ }
+ const Schema::IndexField &oldField = oldSchema.getIndexField(oldFieldId);
+ return (oldField.use_interleaved_features() == newField.use_interleaved_features());
+}
+
+bool
SchemaUtil::validateIndexField(const Schema::IndexField &field)
{
bool ok = true;
diff --git a/searchlib/src/vespa/searchlib/index/schemautil.h b/searchlib/src/vespa/searchlib/index/schemautil.h
index 69b79ecfedd..2e77aa67ad4 100644
--- a/searchlib/src/vespa/searchlib/index/schemautil.h
+++ b/searchlib/src/vespa/searchlib/index/schemautil.h
@@ -83,8 +83,8 @@ public:
return _schema.getIndexField(_index).getName();
}
- bool use_experimental_posting_list_format() const {
- return _schema.getIndexField(_index).use_experimental_posting_list_format();
+ bool use_interleaved_features() const {
+ return _schema.getIndexField(_index).use_interleaved_features();
}
IndexIterator &operator++() {
@@ -119,6 +119,8 @@ public:
* @param oldSchema old schema, present in an input index
*/
bool hasMatchingOldFields(const Schema &oldSchema) const;
+
+ bool has_matching_use_interleaved_features(const Schema &oldSchema) const;
};
static IndexSettings getIndexSettings(const Schema &schema, const uint32_t index);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
index a5dc921cfdf..352fd860642 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp
@@ -32,20 +32,6 @@ using vespalib::GenerationHandler;
namespace search::memoryindex {
-namespace {
-
-void set_interleaved_features(DocIdAndFeatures &features)
-{
- // Set cheap features based on normal features.
- // TODO: Update when proper cheap features are present in memory index.
- assert(!features.elements().empty());
- const auto &element = features.elements().front();
- features.set_field_length(element.getElementLen());
- features.set_num_occs(element.getNumOccs());
-}
-
-}
-
using datastore::EntryRef;
template <bool interleaved_features>
@@ -194,12 +180,12 @@ FieldIndex<interleaved_features>::dump(search::index::IndexBuilder & indexBuilde
auto pitr = tree->begin(_postingListStore.getAllocator());
assert(pitr.valid());
for (; pitr.valid(); ++pitr) {
- uint32_t docId = pitr.getKey();
- EntryRef featureRef(pitr.getData().get_features());
- _featureStore.setupForReadFeatures(featureRef, decoder);
+ features.set_doc_id(pitr.getKey());
+ const PostingListEntryType &entry(pitr.getData());
+ features.set_num_occs(entry.get_num_occs());
+ features.set_field_length(entry.get_field_length());
+ _featureStore.setupForReadFeatures(entry.get_features(), decoder);
decoder.readFeatures(features);
- features.set_doc_id(docId);
- set_interleaved_features(features);
indexBuilder.add_document(features);
}
} else {
@@ -207,12 +193,12 @@ FieldIndex<interleaved_features>::dump(search::index::IndexBuilder & indexBuilde
_postingListStore.getKeyDataEntry(plist, clusterSize);
const PostingListKeyDataType *kde = kd + clusterSize;
for (; kd != kde; ++kd) {
- uint32_t docId = kd->_key;
- EntryRef featureRef(kd->getData().get_features());
- _featureStore.setupForReadFeatures(featureRef, decoder);
+ features.set_doc_id(kd->_key);
+ const PostingListEntryType &entry(kd->getData());
+ features.set_num_occs(entry.get_num_occs());
+ features.set_field_length(entry.get_field_length());
+ _featureStore.setupForReadFeatures(entry.get_features(), decoder);
decoder.readFeatures(features);
- features.set_doc_id(docId);
- set_interleaved_features(features);
indexBuilder.add_document(features);
}
}
@@ -233,6 +219,16 @@ FieldIndex<interleaved_features>::getMemoryUsage() const
return usage;
}
+template <bool interleaved_features>
+queryeval::SearchIterator::UP
+FieldIndex<interleaved_features>::make_search_iterator(const vespalib::string& term,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data) const
+{
+ return search::memoryindex::make_search_iterator<interleaved_features>
+ (find(term), getFeatureStore(), field_id, match_data);
+}
+
namespace {
template <bool interleaved_features>
@@ -266,8 +262,7 @@ public:
}
SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray& tfmda, bool) const override {
- using PostingIteratorType = PostingIterator<interleaved_features>;
- auto result = std::make_unique<PostingIteratorType>(_posting_itr, _feature_store, _field_id, tfmda);
+ auto result = make_search_iterator<interleaved_features>(_posting_itr, _feature_store, _field_id, tfmda);
if (_use_bit_vector) {
LOG(debug, "Return BooleanMatchIteratorWrapper: field_id(%u), doc_count(%zu)",
_field_id, _posting_itr.size());
@@ -294,8 +289,8 @@ FieldIndex<interleaved_features>::make_term_blueprint(const vespalib::string& te
(std::move(guard), posting_itr, getFeatureStore(), field, field_id, use_bit_vector);
}
-template
-class FieldIndex<false>;
+template class FieldIndex<false>;
+template class FieldIndex<true>;
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.h b/searchlib/src/vespa/searchlib/memoryindex/field_index.h
index 05665945800..cb60b4a78d9 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.h
@@ -5,6 +5,7 @@
#include "field_index_base.h"
#include "posting_list_entry.h"
#include <vespa/searchlib/index/indexbuilder.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
#include <vespa/vespalib/btree/btree.h>
#include <vespa/vespalib/btree/btreenodeallocator.h>
#include <vespa/vespalib/btree/btreeroot.h>
@@ -32,6 +33,8 @@ class IOrderedFieldIndexInserter;
template <bool interleaved_features>
class FieldIndex : public FieldIndexBase {
public:
+ static constexpr bool has_interleaved_features = interleaved_features;
+
// Mapping from docid -> feature ref
using PostingListEntryType = PostingListEntry<interleaved_features>;
using PostingList = btree::BTreeRoot<uint32_t, PostingListEntryType, search::btree::NoAggregated>;
@@ -92,6 +95,13 @@ public:
trimHoldLists();
}
+ /**
+ * Should only by used by unit tests.
+ */
+ queryeval::SearchIterator::UP make_search_iterator(const vespalib::string& term,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data) const;
+
std::unique_ptr<queryeval::SimpleLeafBlueprint> make_term_blueprint(const vespalib::string& term,
const queryeval::FieldSpecBase& field,
uint32_t field_id) override;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp
index ee1fee3d935..7bf20151b11 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp
@@ -32,5 +32,7 @@ FieldIndexBase::FieldIndexBase(const index::Schema& schema, uint32_t fieldId,
{
}
+FieldIndexBase::~FieldIndexBase() = default;
+
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
index 7efec1f2ae8..9c6bf823023 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
@@ -99,6 +99,7 @@ public:
FieldIndexBase(const index::Schema& schema, uint32_t fieldId);
FieldIndexBase(const index::Schema& schema, uint32_t fieldId, const index::FieldLengthInfo& info);
+ ~FieldIndexBase();
uint64_t getNumUniqueWords() const override { return _numUniqueWords; }
const FeatureStore& getFeatureStore() const override { return _featureStore; }
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp
index dc7d35a755d..fcbe406e9ac 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp
@@ -34,8 +34,13 @@ FieldIndexCollection::FieldIndexCollection(const Schema& schema, const IFieldLen
{
for (uint32_t fieldId = 0; fieldId < _numFields; ++fieldId) {
const auto& field = schema.getIndexField(fieldId);
- auto fieldIndex = std::make_unique<FieldIndex<false>>(schema, fieldId, inspector.get_field_length_info(field.getName()));
- _fieldIndexes.push_back(std::move(fieldIndex));
+ if (field.use_interleaved_features()) {
+ _fieldIndexes.push_back(std::make_unique<FieldIndex<true>>(schema, fieldId,
+ inspector.get_field_length_info(field.getName())));
+ } else {
+ _fieldIndexes.push_back(std::make_unique<FieldIndex<false>>(schema, fieldId,
+ inspector.get_field_length_info(field.getName())));
+ }
}
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp
index c6524a2fc64..4eda7cf48bd 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp
@@ -25,6 +25,8 @@ namespace {
const vespalib::string emptyWord = "";
+uint16_t cap_u16(uint32_t val) { return std::min(val, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max())); }
+
}
template <bool interleaved_features>
@@ -118,7 +120,9 @@ OrderedFieldIndexInserter<interleaved_features>::add(uint32_t docId,
assert(_prevDocId == noDocId || _prevDocId < docId ||
(_prevDocId == docId && !_prevAdd));
datastore::EntryRef featureRef = _fieldIndex.addFeatures(features);
- _adds.push_back(PostingListKeyDataType(docId, PostingListEntryType(featureRef)));
+ _adds.push_back(PostingListKeyDataType(docId, PostingListEntryType(featureRef,
+ cap_u16(features.num_occs()),
+ cap_u16(features.field_length()))));
_listener.insert(_dItr.getKey()._wordRef, docId);
_prevDocId = docId;
_prevAdd = true;
@@ -153,7 +157,7 @@ OrderedFieldIndexInserter<interleaved_features>::getWordRef() const
return _dItr.getKey()._wordRef;
}
-template
-class OrderedFieldIndexInserter<false>;
+template class OrderedFieldIndexInserter<false>;
+template class OrderedFieldIndexInserter<true>;
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp
index 0e84c2b7968..735fec2bb5f 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "posting_iterator.h"
+#include <vespa/searchlib/queryeval/iterators.h>
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreenode.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
@@ -13,25 +14,51 @@ LOG_SETUP(".searchlib.memoryindex.posting_iterator");
namespace search::memoryindex {
+/**
+ * Base search iterator over memory field index posting list.
+ *
+ * The template parameter specifies whether the wrapped posting list has interleaved features or not.
+ */
template <bool interleaved_features>
-PostingIterator<interleaved_features>::PostingIterator(PostingListIteratorType itr,
- const FeatureStore& featureStore,
- uint32_t packedIndex,
- const fef::TermFieldMatchDataArray& matchData) :
- queryeval::RankedSearchIteratorBase(matchData),
+class PostingIteratorBase : public queryeval::RankedSearchIteratorBase {
+protected:
+ using FieldIndexType = FieldIndex<interleaved_features>;
+ using PostingListIteratorType = typename FieldIndexType::PostingList::ConstIterator;
+ PostingListIteratorType _itr;
+ const FeatureStore& _feature_store;
+ FeatureStore::DecodeContextCooked _feature_decoder;
+
+public:
+ PostingIteratorBase(PostingListIteratorType itr,
+ const FeatureStore& feature_store,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data);
+ ~PostingIteratorBase();
+
+ void doSeek(uint32_t docId) override;
+ void initRange(uint32_t begin, uint32_t end) override;
+ Trinary is_strict() const override { return Trinary::True; }
+};
+
+template <bool interleaved_features>
+PostingIteratorBase<interleaved_features>::PostingIteratorBase(PostingListIteratorType itr,
+ const FeatureStore& feature_store,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data) :
+ queryeval::RankedSearchIteratorBase(match_data),
_itr(itr),
- _featureStore(featureStore),
- _featureDecoder(nullptr)
+ _feature_store(feature_store),
+ _feature_decoder(nullptr)
{
- _featureStore.setupForField(packedIndex, _featureDecoder);
+ _feature_store.setupForField(field_id, _feature_decoder);
}
template <bool interleaved_features>
-PostingIterator<interleaved_features>::~PostingIterator() = default;
+PostingIteratorBase<interleaved_features>::~PostingIteratorBase() = default;
template <bool interleaved_features>
void
-PostingIterator<interleaved_features>::initRange(uint32_t begin, uint32_t end)
+PostingIteratorBase<interleaved_features>::initRange(uint32_t begin, uint32_t end)
{
SearchIterator::initRange(begin, end);
_itr.lower_bound(begin);
@@ -45,7 +72,7 @@ PostingIterator<interleaved_features>::initRange(uint32_t begin, uint32_t end)
template <bool interleaved_features>
void
-PostingIterator<interleaved_features>::doSeek(uint32_t docId)
+PostingIteratorBase<interleaved_features>::doSeek(uint32_t docId)
{
if (getUnpacked()) {
clearUnpacked();
@@ -58,9 +85,34 @@ PostingIterator<interleaved_features>::doSeek(uint32_t docId)
}
}
-template <bool interleaved_features>
+/**
+ * Search iterator over memory field index posting list.
+ *
+ * Template parameters:
+ * - interleaved_features: specifies whether the wrapped posting list has interleaved features or not.
+ * - unpack_normal_features: specifies whether to unpack normal features or not.
+ * - unpack_interleaved_features: specifies whether to unpack interleaved features or not.
+ */
+template <bool interleaved_features, bool unpack_normal_features, bool unpack_interleaved_features>
+class PostingIterator : public PostingIteratorBase<interleaved_features> {
+public:
+ using ParentType = PostingIteratorBase<interleaved_features>;
+
+ using ParentType::ParentType;
+ using ParentType::_feature_decoder;
+ using ParentType::_feature_store;
+ using ParentType::_itr;
+ using ParentType::_matchData;
+ using ParentType::getDocId;
+ using ParentType::getUnpacked;
+ using ParentType::setUnpacked;
+
+ void doUnpack(uint32_t docId) override;
+};
+
+template <bool interleaved_features, bool unpack_normal_features, bool unpack_interleaved_features>
void
-PostingIterator<interleaved_features>::doUnpack(uint32_t docId)
+PostingIterator<interleaved_features, unpack_normal_features, unpack_interleaved_features>::doUnpack(uint32_t docId)
{
if (!_matchData.valid() || getUnpacked()) {
return;
@@ -68,70 +120,75 @@ PostingIterator<interleaved_features>::doUnpack(uint32_t docId)
assert(docId == getDocId());
assert(_itr.valid());
assert(docId == _itr.getKey());
- datastore::EntryRef featureRef(_itr.getData().get_features());
- _featureStore.setupForUnpackFeatures(featureRef, _featureDecoder);
- _featureDecoder.unpackFeatures(_matchData, docId);
+ if (unpack_normal_features) {
+ datastore::EntryRef featureRef(_itr.getData().get_features());
+ _feature_store.setupForUnpackFeatures(featureRef, _feature_decoder);
+ _feature_decoder.unpackFeatures(_matchData, docId);
+ } else {
+ _matchData[0]->reset(docId);
+ }
+ if (interleaved_features && unpack_interleaved_features) {
+ auto* tfmd = _matchData[0];
+ tfmd->setNumOccs(_itr.getData().get_num_occs());
+ tfmd->setFieldLength(_itr.getData().get_field_length());
+ }
setUnpacked();
}
+template <bool interleaved_features>
+queryeval::SearchIterator::UP
+make_search_iterator(typename FieldIndex<interleaved_features>::PostingList::ConstIterator itr,
+ const FeatureStore& feature_store,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data)
+{
+ assert(match_data.size() == 1);
+ auto* tfmd = match_data[0];
+ if (tfmd->needs_normal_features()) {
+ if (tfmd->needs_interleaved_features()) {
+ return std::make_unique<PostingIterator<interleaved_features, true, true>>
+ (itr, feature_store, field_id, match_data);
+ } else {
+ return std::make_unique<PostingIterator<interleaved_features, true, false>>
+ (itr, feature_store, field_id, match_data);
+ }
+ } else {
+ if (tfmd->needs_interleaved_features()) {
+ return std::make_unique<PostingIterator<interleaved_features, false, true>>
+ (itr, feature_store, field_id, match_data);
+ } else {
+ return std::make_unique<PostingIterator<interleaved_features, false, false>>
+ (itr, feature_store, field_id, match_data);
+ }
+ }
+}
+
template
-class PostingIterator<false>;
+queryeval::SearchIterator::UP
+make_search_iterator<false>(typename FieldIndex<false>::PostingList::ConstIterator,
+ const FeatureStore&,
+ uint32_t,
+ const fef::TermFieldMatchDataArray&);
+
+template
+queryeval::SearchIterator::UP
+make_search_iterator<true>(typename FieldIndex<true>::PostingList::ConstIterator,
+ const FeatureStore&,
+ uint32_t,
+ const fef::TermFieldMatchDataArray&);
+
+template class PostingIteratorBase<false>;
+template class PostingIteratorBase<true>;
+
+template class PostingIterator<false, false, false>;
+template class PostingIterator<false, false, true>;
+template class PostingIterator<false, true, false>;
+template class PostingIterator<false, true, true>;
+template class PostingIterator<true, false, false>;
+template class PostingIterator<true, false, true>;
+template class PostingIterator<true, true, false>;
+template class PostingIterator<true, true, true>;
}
-namespace search::btree {
-
-template class BTreeNodeTT<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::INTERNAL_SLOTS>;
-
-template class BTreeLeafNode<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::LEAF_SLOTS>;
-
-template class BTreeNodeStore<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::INTERNAL_SLOTS,
- BTreeDefaultTraits::LEAF_SLOTS>;
-
-template class BTreeIteratorBase<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::INTERNAL_SLOTS,
- BTreeDefaultTraits::LEAF_SLOTS,
- BTreeDefaultTraits::PATH_SIZE>;
-
-template class BTreeIterator<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits>;
-
-template class BTree<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits>;
-
-template class BTreeRoot<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits>;
-
-template class BTreeRootBase<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::INTERNAL_SLOTS,
- BTreeDefaultTraits::LEAF_SLOTS>;
-
-template class BTreeNodeAllocator<uint32_t,
- search::memoryindex::PostingListEntry<false>,
- search::btree::NoAggregated,
- BTreeDefaultTraits::INTERNAL_SLOTS,
- BTreeDefaultTraits::LEAF_SLOTS>;
-}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h
index f029c837cf7..056cad90c85 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h
@@ -3,44 +3,26 @@
#pragma once
#include "field_index.h"
-#include <vespa/searchlib/queryeval/iterators.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
namespace search::memoryindex {
/**
- * Search iterator for memory field index posting list.
+ * Factory for creating search iterator over memory field index posting list.
*
* The template parameter specifies whether the wrapped posting list has interleaved features or not.
+ *
+ * @param itr the posting list iterator to base the search iterator upon.
+ * @param feature_store reference to store for features.
+ * @param field_id the id of the field searched.
+ * @param match_data the match data to unpack features into.
*/
template <bool interleaved_features>
-class PostingIterator : public queryeval::RankedSearchIteratorBase {
-private:
- using FieldIndexType = FieldIndex<interleaved_features>;
- using PostingListIteratorType = typename FieldIndexType::PostingList::ConstIterator;
- PostingListIteratorType _itr;
- const FeatureStore &_featureStore;
- FeatureStore::DecodeContextCooked _featureDecoder;
-
-public:
- /**
- * Creates a search iterator for the given posting list iterator.
- *
- * @param itr the posting list iterator to base the search iterator upon.
- * @param featureStore reference to store for features.
- * @param packedIndex the field or field collection owning features.
- * @param matchData the match data to unpack features into.
- **/
- PostingIterator(PostingListIteratorType itr,
- const FeatureStore &featureStore,
- uint32_t packedIndex,
- const fef::TermFieldMatchDataArray &matchData);
- ~PostingIterator();
-
- void doSeek(uint32_t docId) override;
- void doUnpack(uint32_t docId) override;
- void initRange(uint32_t begin, uint32_t end) override;
- Trinary is_strict() const override { return Trinary::True; }
-};
+queryeval::SearchIterator::UP
+make_search_iterator(typename FieldIndex<interleaved_features>::PostingList::ConstIterator itr,
+ const FeatureStore& feature_store,
+ uint32_t field_id,
+ const fef::TermFieldMatchDataArray& match_data);
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
index 373de21e836..f58a62ddef5 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
@@ -7,20 +7,60 @@
namespace search::memoryindex {
/**
+ * Class storing interleaved features for a posting list entry.
+ */
+class InterleavedFeatures {
+protected:
+ uint16_t _num_occs;
+ uint16_t _field_length;
+
+public:
+ InterleavedFeatures()
+ : _num_occs(0),
+ _field_length(1)
+ {
+ }
+ InterleavedFeatures(uint16_t num_occs, uint16_t field_length)
+ : _num_occs(num_occs),
+ _field_length(field_length)
+ {
+ }
+ uint16_t get_num_occs() const { return _num_occs; }
+ uint16_t get_field_length() const { return _field_length; }
+};
+
+/**
+ * Empty class used when posting list entry does not have interleaved features.
+ */
+class NoInterleavedFeatures {
+public:
+ NoInterleavedFeatures() {}
+ NoInterleavedFeatures(uint16_t num_occs, uint16_t field_length) {
+ (void) num_occs;
+ (void) field_length;
+ }
+ uint16_t get_num_occs() const { return 0; }
+ uint16_t get_field_length() const { return 1; }
+};
+
+/**
* Entry per document in memory index posting list.
*/
template <bool interleaved_features>
-class PostingListEntry {
+class PostingListEntry : public std::conditional_t<interleaved_features, InterleavedFeatures, NoInterleavedFeatures> {
+ using ParentType = std::conditional_t<interleaved_features, InterleavedFeatures, NoInterleavedFeatures>;
mutable datastore::EntryRef _features; // reference to compressed features
public:
- explicit PostingListEntry(datastore::EntryRef features)
- : _features(features)
+ explicit PostingListEntry(datastore::EntryRef features, uint16_t num_occs, uint16_t field_length)
+ : ParentType(num_occs, field_length),
+ _features(features)
{
}
PostingListEntry()
- : _features()
+ : ParentType(),
+ _features()
{
}
@@ -34,4 +74,7 @@ public:
void update_features(datastore::EntryRef features) const { _features = features; }
};
+template class PostingListEntry<false>;
+template class PostingListEntry<true>;
+
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.cpp b/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.cpp
index 0a11203c390..bf7b659dea7 100644
--- a/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.cpp
@@ -68,6 +68,20 @@ void optimize_source_blenders(IntermediateBlueprint &self, size_t begin_idx) {
}
}
+void
+need_normal_features_for_children(const IntermediateBlueprint &blueprint, fef::MatchData &md)
+{
+ for (size_t i = 0; i < blueprint.childCnt(); ++i) {
+ const Blueprint::State &cs = blueprint.getChild(i).getState();
+ for (size_t j = 0; j < cs.numFields(); ++j) {
+ auto *tfmd = cs.field(j).resolve(md);
+ if (tfmd != nullptr) {
+ tfmd->setNeedNormalFeatures(true);
+ }
+ }
+ }
+}
+
} // namespace search::queryeval::<unnamed>
//-----------------------------------------------------------------------------
@@ -375,6 +389,13 @@ NearBlueprint::inheritStrict(size_t i) const
}
SearchIterator::UP
+NearBlueprint::createSearch(fef::MatchData &md, bool strict) const
+{
+ need_normal_features_for_children(*this, md);
+ return IntermediateBlueprint::createSearch(md, strict);
+}
+
+SearchIterator::UP
NearBlueprint::createIntermediateSearch(const MultiSearch::Children &subSearches,
bool strict, search::fef::MatchData &md) const
{
@@ -416,6 +437,13 @@ ONearBlueprint::inheritStrict(size_t i) const
}
SearchIterator::UP
+ONearBlueprint::createSearch(fef::MatchData &md, bool strict) const
+{
+ need_normal_features_for_children(*this, md);
+ return IntermediateBlueprint::createSearch(md, strict);
+}
+
+SearchIterator::UP
ONearBlueprint::createIntermediateSearch(const MultiSearch::Children &subSearches,
bool strict, search::fef::MatchData &md) const
{
diff --git a/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.h b/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.h
index 440794c25d8..a217c8f303d 100644
--- a/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.h
+++ b/searchlib/src/vespa/searchlib/queryeval/intermediate_blueprints.h
@@ -102,6 +102,7 @@ public:
bool should_optimize_children() const override { return false; }
void sort(std::vector<Blueprint*> &children) const override;
bool inheritStrict(size_t i) const override;
+ SearchIteratorUP createSearch(fef::MatchData &md, bool strict) const override;
SearchIterator::UP
createIntermediateSearch(const MultiSearch::Children &subSearches,
bool strict, fef::MatchData &md) const override;
@@ -122,6 +123,7 @@ public:
bool should_optimize_children() const override { return false; }
void sort(std::vector<Blueprint*> &children) const override;
bool inheritStrict(size_t i) const override;
+ SearchIteratorUP createSearch(fef::MatchData &md, bool strict) const override;
SearchIterator::UP
createIntermediateSearch(const MultiSearch::Children &subSearches,
bool strict, fef::MatchData &md) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/iterators.h b/searchlib/src/vespa/searchlib/queryeval/iterators.h
index 43d046a78b7..0e44a6f6ff6 100644
--- a/searchlib/src/vespa/searchlib/queryeval/iterators.h
+++ b/searchlib/src/vespa/searchlib/queryeval/iterators.h
@@ -16,14 +16,13 @@ public:
private:
uint32_t _needUnpack;
protected:
- bool getUnpacked() const { return _needUnpack == 0; }
void setUnpacked() { _needUnpack = 0; }
void clearUnpacked() { _needUnpack = 1; }
uint32_t getNeedUnpack() const { return _needUnpack; }
void incNeedUnpack() { ++_needUnpack; }
-
public:
RankedSearchIteratorBase(const fef::TermFieldMatchDataArray &matchData);
+ bool getUnpacked() const { return _needUnpack == 0; }
};
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
index a4996c931e2..b7d444a0fca 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
@@ -129,10 +129,10 @@ search::queryeval::SearchIterator *
FakeMemTreeOcc::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
- return new search::memoryindex::PostingIterator<false>(_tree.begin(_allocator),
- _mgr._featureStore,
- _packedIndex,
- matchData);
+ return memoryindex::make_search_iterator<false>(_tree.begin(_allocator),
+ _mgr._featureStore,
+ _packedIndex,
+ matchData).release();
}
@@ -267,7 +267,7 @@ FakeMemTreeOccMgr::flush()
}
} else {
if (!itr.valid() || docId < itr.getKey()) {
- tree.insert(itr, docId, PostingListEntryType(i->getFeatureRef()));
+ tree.insert(itr, docId, PostingListEntryType(i->getFeatureRef(), 0, 1));
}
}
}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java
index b9010efb223..09140423010 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/InfraApplication.java
@@ -74,7 +74,7 @@ public abstract class InfraApplication implements InfraApplicationApi {
@Override
public ClusterSpec getClusterSpecWithVersion(Version version) {
- return ClusterSpec.request(clusterSpecType, clusterSpecId, version, true, Collections.emptySet());
+ return ClusterSpec.request(clusterSpecType, clusterSpecId, version, true);
}
public ClusterSpec.Type getClusterSpecType() {
diff --git a/standalone-container/src/main/java/com/yahoo/container/standalone/StandaloneContainerApplication.java b/standalone-container/src/main/java/com/yahoo/container/standalone/StandaloneContainerApplication.java
index 07a23d98095..e2de3929d9b 100644
--- a/standalone-container/src/main/java/com/yahoo/container/standalone/StandaloneContainerApplication.java
+++ b/standalone-container/src/main/java/com/yahoo/container/standalone/StandaloneContainerApplication.java
@@ -93,7 +93,7 @@ public class StandaloneContainerApplication implements Application {
try {
Pair<VespaModel, Container> tpl = withTempDir(preprocessedApplicationDir -> createContainerModel(applicationPath,
- distributedFiles, preprocessedApplicationDir, networkingOption, configModelRepo));
+ distributedFiles, preprocessedApplicationDir, networkingOption, configModelRepo));
this.modelRoot = tpl.getFirst();
this.container = tpl.getSecond();
} catch (RuntimeException r) {
diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt
index f77c11eb350..418d8dbe430 100644
--- a/storage/CMakeLists.txt
+++ b/storage/CMakeLists.txt
@@ -49,9 +49,6 @@ vespa_define_module(
vdstestlib
atomic
- TEST_EXTERNAL_DEPENDS
- cppunit
-
TESTS
src/tests
src/tests/bucketdb
diff --git a/storage/src/tests/CMakeLists.txt b/storage/src/tests/CMakeLists.txt
index ae55c80c148..e92d15c14e3 100644
--- a/storage/src/tests/CMakeLists.txt
+++ b/storage/src/tests/CMakeLists.txt
@@ -1,17 +1,3 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# Runner for unit tests written in CppUnit (DEPRECATED).
-# NOTE: All new tests should be written in gtest. Each test sub-module has a gtest runner.
-vespa_add_executable(storage_testrunner_app TEST
- SOURCES
- testrunner.cpp
- DEPENDS
- storage_testcommon
- storage_testhostreporter
- storage_testdistributor
-)
-
-vespa_add_test(
- NAME storage_testrunner_app
- COMMAND storage_testrunner_app
-)
+# Currently empty; test executables are in library subdirs
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 1f72347b7ed..e05648c62a2 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -200,7 +200,7 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
// Make sure we have at least one empty bucket
TestBucketInfo& info = (++_bucketInfo.begin())->second;
- CPPUNIT_ASSERT(info.size != 0);
+ assert(info.size != 0);
info.size = 0;
info.count = 0;
info.crc = 0;
diff --git a/storage/src/tests/bucketdb/initializertest.cpp b/storage/src/tests/bucketdb/initializertest.cpp
index 57bb3a865d5..63f990f7cc1 100644
--- a/storage/src/tests/bucketdb/initializertest.cpp
+++ b/storage/src/tests/bucketdb/initializertest.cpp
@@ -12,8 +12,8 @@
#include <vespa/storageapi/message/state.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
-#include <tests/common/testhelper.h> // TODO decouple from CppUnit
-#include <vespa/vdstestlib/cppunit/dirconfig.hpp> // TODO decouple from CppUnit
+#include <tests/common/testhelper.h>
+#include <vespa/vdstestlib/config/dirconfig.hpp>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
diff --git a/storage/src/tests/common/CMakeLists.txt b/storage/src/tests/common/CMakeLists.txt
index 075dc263be9..1922d13ca61 100644
--- a/storage/src/tests/common/CMakeLists.txt
+++ b/storage/src/tests/common/CMakeLists.txt
@@ -2,6 +2,7 @@
vespa_add_library(storage_testcommon TEST
SOURCES
dummystoragelink.cpp
+ message_sender_stub.cpp
testhelper.cpp
testnodestateupdater.cpp
teststorageapp.cpp
diff --git a/storage/src/tests/common/hostreporter/CMakeLists.txt b/storage/src/tests/common/hostreporter/CMakeLists.txt
index 7a4a23ba7aa..2fcb159cb08 100644
--- a/storage/src/tests/common/hostreporter/CMakeLists.txt
+++ b/storage/src/tests/common/hostreporter/CMakeLists.txt
@@ -1,9 +1,7 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(storage_testhostreporter TEST
SOURCES
- hostinfotest.cpp
util.cpp
- versionreportertest.cpp
DEPENDS
storage
)
@@ -11,8 +9,11 @@ vespa_add_library(storage_testhostreporter TEST
vespa_add_executable(storage_hostreporter_gtest_runner_app TEST
SOURCES
gtest_runner.cpp
+ hostinfotest.cpp
+ versionreportertest.cpp
DEPENDS
storage
+ storage_testhostreporter
gtest
)
diff --git a/storage/src/tests/common/hostreporter/hostinfotest.cpp b/storage/src/tests/common/hostreporter/hostinfotest.cpp
index 418884c2a38..467149154ee 100644
--- a/storage/src/tests/common/hostreporter/hostinfotest.cpp
+++ b/storage/src/tests/common/hostreporter/hostinfotest.cpp
@@ -1,15 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "util.h"
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/common/hostreporter/hostreporter.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/jsonstream.h>
-#include "util.h"
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
namespace storage {
namespace {
+
using Object = vespalib::JsonStream::Object;
using End = vespalib::JsonStream::End;
using JsonFormat = vespalib::slime::JsonFormat;
@@ -21,22 +24,10 @@ public:
jsonreport << "dummy" << Object() << "foo" << "bar" << End();
}
};
-}
-
-struct HostInfoReporterTest : public CppUnit::TestFixture
-{
- void testHostInfoReporter();
- CPPUNIT_TEST_SUITE(HostInfoReporterTest);
- CPPUNIT_TEST(testHostInfoReporter);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(HostInfoReporterTest);
+}
-void
-HostInfoReporterTest::testHostInfoReporter()
-{
+TEST(HostInfoReporterTest, host_info_reporter) {
HostInfo hostinfo;
DummyReporter dummyReporter;
hostinfo.registerReporter(&dummyReporter);
@@ -50,8 +41,8 @@ HostInfoReporterTest::testHostInfoReporter()
std::string jsonData = json.str();
vespalib::Slime slime;
JsonFormat::decode(Memory(jsonData), slime);
- CPPUNIT_ASSERT(slime.get()["dummy"]["foo"].asString() == "bar");
- CPPUNIT_ASSERT(!slime.get()["vtag"]["version"].asString().make_string().empty());
+ EXPECT_EQ(slime.get()["dummy"]["foo"].asString(), "bar");
+ EXPECT_FALSE(slime.get()["vtag"]["version"].asString().make_string().empty());
}
-} // storage
+} // storage
diff --git a/storage/src/tests/common/hostreporter/util.cpp b/storage/src/tests/common/hostreporter/util.cpp
index e0563a431e6..02e66b1dcc7 100644
--- a/storage/src/tests/common/hostreporter/util.cpp
+++ b/storage/src/tests/common/hostreporter/util.cpp
@@ -2,12 +2,11 @@
#include "util.h"
#include <vespa/storage/common/hostreporter/hostreporter.h>
#include <vespa/vespalib/data/slime/slime.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/util/jsonstream.h>
#include <vespa/vespalib/stllike/asciistream.h>
-namespace storage {
-namespace util {
+namespace storage::util {
+
namespace {
using Object = vespalib::JsonStream::Object;
using End = vespalib::JsonStream::End;
@@ -27,8 +26,8 @@ reporterToSlime(HostReporter &hostReporter, vespalib::Slime &slime) {
size_t parsed = JsonFormat::decode(Memory(jsonData), slime);
if (parsed == 0) {
- CPPUNIT_FAIL("jsonData is not json:\n" + jsonData);
+ throw std::runtime_error("jsonData is not json:\n" + jsonData);
}
}
-}
+
}
diff --git a/storage/src/tests/common/hostreporter/versionreportertest.cpp b/storage/src/tests/common/hostreporter/versionreportertest.cpp
index dd58493f540..ee8fe2a5ff3 100644
--- a/storage/src/tests/common/hostreporter/versionreportertest.cpp
+++ b/storage/src/tests/common/hostreporter/versionreportertest.cpp
@@ -1,38 +1,29 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
+
+#include "util.h"
#include <vespa/storage/common/hostreporter/versionreporter.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/util/jsonstream.h>
-#include "util.h"
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
-LOG_SETUP(".test.versionreporter");
+using namespace ::testing;
namespace storage {
namespace {
+
using Object = vespalib::JsonStream::Object;
using End = vespalib::JsonStream::End;
-}
-struct VersionReporterTest : public CppUnit::TestFixture
-{
- void testVersionReporter();
-
- CPPUNIT_TEST_SUITE(VersionReporterTest);
- CPPUNIT_TEST(testVersionReporter);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(VersionReporterTest);
+}
-void
-VersionReporterTest::testVersionReporter()
-{
+TEST(VersionReporterTest, version_reporter) {
VersionReporter versionReporter;
vespalib::Slime slime;
util::reporterToSlime(versionReporter, slime);
std::string version = slime.get()["vtag"]["version"].asString().make_string().c_str();
- CPPUNIT_ASSERT(version.length() > 2);
- CPPUNIT_ASSERT(version.find(".") > 0);
+ EXPECT_GT(version.size(), 2);
+ EXPECT_THAT(version, HasSubstr("."));
}
+
} // storage
diff --git a/storage/src/tests/distributor/messagesenderstub.cpp b/storage/src/tests/common/message_sender_stub.cpp
index 55eaf344e71..c127f9071e5 100644
--- a/storage/src/tests/distributor/messagesenderstub.cpp
+++ b/storage/src/tests/common/message_sender_stub.cpp
@@ -1,15 +1,16 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "messagesenderstub.h"
-#include "distributortestutil.h"
+#include "message_sender_stub.h"
+#include <vespa/storageapi/messageapi/storagecommand.h>
+#include <vespa/storageapi/messageapi/storagereply.h>
+#include <string>
+#include <sstream>
+#include <stdexcept>
namespace storage {
-MessageSenderStub::MessageSenderStub()
- : _clusterName("storage"),
- _pendingMessageTracker(0)
-{}
-MessageSenderStub::~MessageSenderStub() {}
+MessageSenderStub::MessageSenderStub() = default;
+MessageSenderStub::~MessageSenderStub() = default;
std::string
MessageSenderStub::getLastCommand(bool verbose) const
@@ -22,8 +23,8 @@ MessageSenderStub::getLastCommand(bool verbose) const
std::string
MessageSenderStub::dumpMessage(const api::StorageMessage& msg,
- bool includeAddress,
- bool verbose) const
+ bool includeAddress,
+ bool verbose) const
{
std::ostringstream ost;
diff --git a/storage/src/tests/common/message_sender_stub.h b/storage/src/tests/common/message_sender_stub.h
new file mode 100644
index 00000000000..73b1fcff9f4
--- /dev/null
+++ b/storage/src/tests/common/message_sender_stub.h
@@ -0,0 +1,47 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/storage/common/messagesender.h>
+#include <vector>
+
+namespace storage {
+
+struct MessageSenderStub : MessageSender {
+ std::vector<std::shared_ptr<api::StorageCommand>> commands;
+ std::vector<std::shared_ptr<api::StorageReply>> replies;
+
+ MessageSenderStub();
+ ~MessageSenderStub() override;
+
+ void clear() {
+ commands.clear();
+ replies.clear();
+ }
+
+ void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd) override {
+ commands.push_back(cmd);
+ }
+
+ void sendReply(const std::shared_ptr<api::StorageReply>& reply) override {
+ replies.push_back(reply);
+ }
+
+ std::string getLastCommand(bool verbose = true) const;
+
+ std::string getCommands(bool includeAddress = false,
+ bool verbose = false,
+ uint32_t fromIndex = 0) const;
+
+ std::string getLastReply(bool verbose = true) const;
+
+ std::string getReplies(bool includeAddress = false,
+ bool verbose = false) const;
+
+ std::string dumpMessage(const api::StorageMessage& msg,
+ bool includeAddress,
+ bool verbose) const;
+};
+
+
+}
diff --git a/storage/src/tests/common/metricstest.cpp b/storage/src/tests/common/metricstest.cpp
index 9a9f05d500e..d1421845b81 100644
--- a/storage/src/tests/common/metricstest.cpp
+++ b/storage/src/tests/common/metricstest.cpp
@@ -14,6 +14,7 @@
#include <vespa/config/common/exceptions.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
#include <thread>
#include <vespa/log/log.h>
@@ -211,16 +212,12 @@ TEST_F(MetricsTest, filestor_metrics) {
std::ostringstream ost;
framework::HttpUrlPath path("metrics?interval=-1&format=text");
bool retVal = _metricsConsumer->reportStatus(ost, path);
- CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal);
+ ASSERT_TRUE(retVal) << "_metricsConsumer->reportStatus failed";
std::string s = ost.str();
- CPPUNIT_ASSERT_MESSAGE("No get statistics in:\n" + s,
- s.find("vds.filestor.alldisks.allthreads.get.sum.count count=240") != std::string::npos);
- CPPUNIT_ASSERT_MESSAGE("No put statistics in:\n" + s,
- s.find("vds.filestor.alldisks.allthreads.put.sum.count count=200") != std::string::npos);
- CPPUNIT_ASSERT_MESSAGE("No remove statistics in:\n" + s,
- s.find("vds.filestor.alldisks.allthreads.remove.sum.count count=120") != std::string::npos);
- CPPUNIT_ASSERT_MESSAGE("No removenotfound stats in:\n" + s,
- s.find("vds.filestor.alldisks.allthreads.remove.sum.not_found count=20") != std::string::npos);
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.get.sum.count count=240"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.put.sum.count count=200"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.count count=120"));
+ EXPECT_THAT(s, HasSubstr("vds.filestor.alldisks.allthreads.remove.sum.not_found count=20"));
}
#define ASSERT_METRIC(interval, metric, count) \
diff --git a/storage/src/tests/common/testhelper.h b/storage/src/tests/common/testhelper.h
index 1bcc53dfe12..8c553ccce40 100644
--- a/storage/src/tests/common/testhelper.h
+++ b/storage/src/tests/common/testhelper.h
@@ -1,25 +1,10 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
-
-
-#include <fstream>
#include <vespa/messagebus/testlib/slobrok.h>
+#include <vespa/vdstestlib/config/dirconfig.h>
+#include <fstream>
#include <sstream>
-#define ASSERT_REPLY_COUNT(count, dummylink) \
- { \
- std::ostringstream msgost; \
- if ((dummylink).getNumReplies() != count) { \
- for (uint32_t ijx=0; ijx<(dummylink).getNumReplies(); ++ijx) { \
- msgost << (dummylink).getReply(ijx)->toString(true) << "\n"; \
- } \
- } \
- CPPUNIT_ASSERT_EQUAL_MSG(msgost.str(), size_t(count), \
- (dummylink).getNumReplies()); \
- }
-
namespace storage {
void addFileConfig(vdstestlib::DirConfig& dc,
diff --git a/storage/src/tests/common/teststorageapp.cpp b/storage/src/tests/common/teststorageapp.cpp
index a720cd191e4..dd89082d3e7 100644
--- a/storage/src/tests/common/teststorageapp.cpp
+++ b/storage/src/tests/common/teststorageapp.cpp
@@ -7,7 +7,6 @@
#include <vespa/config-load-type.h>
#include <vespa/config-fleetcontroller.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/config/config.h>
@@ -122,7 +121,7 @@ TestStorageApp::waitUntilInitialized(
error << " ";
initializer->reportStatus(error, framework::HttpUrlPath(""));
LOG(error, "%s", error.str().c_str());
- CPPUNIT_FAIL(error.str().c_str());
+ throw std::runtime_error(error.str());
}
}
}
@@ -170,9 +169,9 @@ TestServiceLayerApp::TestServiceLayerApp(DiskCount dc, NodeIndex index,
lib::NodeState ns(*_nodeStateUpdater.getReportedNodeState());
ns.setDiskCount(dc);
_nodeStateUpdater.setReportedNodeState(ns);
- // Tests should know how many disks they want to use. If testing auto
- // detection, you should not need this utility.
- CPPUNIT_ASSERT(dc > 0);
+ // Tests should know how many disks they want to use. If testing auto
+ // detection, you should not need this utility.
+ assert(dc > 0);
}
TestServiceLayerApp::~TestServiceLayerApp() {}
@@ -190,8 +189,7 @@ TestServiceLayerApp::setPersistenceProvider(
spi::PersistenceProvider::UP provider)
{
_partitions = provider->getPartitionStates().getList();
- CPPUNIT_ASSERT_EQUAL(spi::PartitionId(_compReg.getDiskCount()),
- _partitions.size());
+ assert(spi::PartitionId(_compReg.getDiskCount()) == _partitions.size());
_persistenceProvider = std::move(provider);
}
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index c5d23badfd6..3148540d86d 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -1,22 +1,29 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(storage_testdistributor TEST
+
+vespa_add_executable(storage_distributor_gtest_runner_app TEST
SOURCES
blockingoperationstartertest.cpp
+ btree_bucket_database_test.cpp
+ bucket_db_prune_elision_test.cpp
+ bucketdatabasetest.cpp
bucketdbmetricupdatertest.cpp
+ bucketdbupdatertest.cpp
bucketgctimecalculatortest.cpp
bucketstateoperationtest.cpp
distributor_host_info_reporter_test.cpp
+ distributor_message_sender_stub.cpp
distributortest.cpp
distributortestutil.cpp
externaloperationhandlertest.cpp
garbagecollectiontest.cpp
getoperationtest.cpp
+ gtest_runner.cpp
idealstatemanagertest.cpp
joinbuckettest.cpp
maintenanceschedulertest.cpp
+ mapbucketdatabasetest.cpp
mergelimitertest.cpp
mergeoperationtest.cpp
- messagesenderstub.cpp
nodeinfotest.cpp
nodemaintenancestatstrackertest.cpp
operation_sequencer_test.cpp
@@ -24,6 +31,7 @@ vespa_add_library(storage_testdistributor TEST
ownership_transfer_safe_time_point_calculator_test.cpp
pendingmessagetrackertest.cpp
persistence_metrics_set_test.cpp
+ putoperationtest.cpp
removebucketoperationtest.cpp
removelocationtest.cpp
removeoperationtest.cpp
@@ -38,28 +46,9 @@ vespa_add_library(storage_testdistributor TEST
updateoperationtest.cpp
visitoroperationtest.cpp
DEPENDS
- storage_distributor
storage_testcommon
storage_testhostreporter
-)
-
-vespa_add_executable(storage_distributor_gtest_runner_app TEST
- SOURCES
- btree_bucket_database_test.cpp
- bucketdatabasetest.cpp
- bucketdbupdatertest.cpp
- mapbucketdatabasetest.cpp
- putoperationtest.cpp
- # TODO: Depend on storage_testdistributor when all tests have been migrated
- # Fixture etc. dupes with non-gtest runner :
- distributortestutil.cpp
- bucket_db_prune_elision_test.cpp
- messagesenderstub.cpp
- gtest_runner.cpp
- DEPENDS
storage_distributor
- storage_testcommon
- storage_testhostreporter
gtest
)
diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp
index 0160f5c9e51..9a9e04f0f33 100644
--- a/storage/src/tests/distributor/blockingoperationstartertest.cpp
+++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp
@@ -1,30 +1,23 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
#include <vespa/storage/distributor/blockingoperationstarter.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
#include <tests/distributor/maintenancemocks.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
-
-namespace storage {
-
-namespace distributor {
-
using document::BucketId;
+using namespace ::testing;
-class BlockingOperationStarterTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(BlockingOperationStarterTest);
- CPPUNIT_TEST(testOperationNotBlockedWhenNoMessagesPending);
- CPPUNIT_TEST(testOperationBlockedWhenMessagesPending);
- CPPUNIT_TEST_SUITE_END();
+namespace storage::distributor {
+struct BlockingOperationStarterTest : Test {
std::shared_ptr<Operation> createMockOperation() {
- return std::shared_ptr<Operation>(new MockOperation(makeDocumentBucket(BucketId(16, 1))));
+ return std::make_shared<MockOperation>(makeDocumentBucket(BucketId(16, 1)));
}
std::shared_ptr<Operation> createBlockingMockOperation() {
- std::shared_ptr<MockOperation> op(new MockOperation(makeDocumentBucket(BucketId(16, 1))));
+ auto op = std::make_shared<MockOperation>(makeDocumentBucket(BucketId(16, 1)));
op->setShouldBlock(true);
return op;
}
@@ -35,43 +28,30 @@ class BlockingOperationStarterTest : public CppUnit::TestFixture {
std::unique_ptr<PendingMessageTracker> _messageTracker;
std::unique_ptr<BlockingOperationStarter> _operationStarter;
-public:
- void testOperationNotBlockedWhenNoMessagesPending();
- void testOperationBlockedWhenMessagesPending();
-
- void setUp() override;
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BlockingOperationStarterTest);
-
void
-BlockingOperationStarterTest::setUp()
+BlockingOperationStarterTest::SetUp()
{
- _starterImpl.reset(new MockOperationStarter());
- _compReg.reset(new StorageComponentRegisterImpl());
+ _starterImpl = std::make_unique<MockOperationStarter>();
+ _compReg = std::make_unique<StorageComponentRegisterImpl>();
_compReg->setClock(_clock);
_clock.setAbsoluteTimeInSeconds(1);
- _messageTracker.reset(new PendingMessageTracker(*_compReg));
- _operationStarter.reset(new BlockingOperationStarter(*_messageTracker, *_starterImpl));
+ _messageTracker = std::make_unique<PendingMessageTracker>(*_compReg);
+ _operationStarter = std::make_unique<BlockingOperationStarter>(*_messageTracker, *_starterImpl);
}
-void
-BlockingOperationStarterTest::testOperationNotBlockedWhenNoMessagesPending()
-{
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
- CPPUNIT_ASSERT_EQUAL(std::string("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 0\n"),
- _starterImpl->toString());
+TEST_F(BlockingOperationStarterTest, operation_not_blocked_when_no_messages_pending) {
+ ASSERT_TRUE(_operationStarter->start(createMockOperation(), OperationStarter::Priority(0)));
+ EXPECT_EQ("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 0\n",
+ _starterImpl->toString());
}
-void
-BlockingOperationStarterTest::testOperationBlockedWhenMessagesPending()
-{
+TEST_F(BlockingOperationStarterTest, operation_blocked_when_messages_pending) {
// start should return true but not forward message to underlying starter.
- CPPUNIT_ASSERT(_operationStarter->start(createBlockingMockOperation(),
- OperationStarter::Priority(0)));
- CPPUNIT_ASSERT_EQUAL(std::string(""), _starterImpl->toString());
+ ASSERT_TRUE(_operationStarter->start(createBlockingMockOperation(), OperationStarter::Priority(0)));
+ EXPECT_EQ("", _starterImpl->toString());
}
}
-}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.cpp b/storage/src/tests/distributor/bucketdatabasetest.cpp
index cfb54edbe78..6225865b153 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.cpp
+++ b/storage/src/tests/distributor/bucketdatabasetest.cpp
@@ -113,7 +113,7 @@ struct StoppingProcessor : public BucketDatabase::EntryProcessor {
}
-TEST_P(BucketDatabaseTest, testIterating) {
+TEST_P(BucketDatabaseTest, iterating) {
// Do some insertions
db().update(BucketDatabase::Entry(document::BucketId(16, 0x10), BI(1)));
db().update(BucketDatabase::Entry(document::BucketId(16, 0x0b), BI(2)));
@@ -186,7 +186,7 @@ BucketDatabaseTest::doFindParents(const std::vector<document::BucketId>& ids,
return ost.str();
}
-TEST_P(BucketDatabaseTest, testFindParents) {
+TEST_P(BucketDatabaseTest, find_parents) {
// test what parents in the DB (specified in vector) are parents of the
// specified bucket. Result is a list of indexes into the vector.
@@ -287,7 +287,7 @@ BucketDatabaseTest::doFindAll(const std::vector<document::BucketId>& ids,
return ost.str();
}
-TEST_P(BucketDatabaseTest, testFindAll) {
+TEST_P(BucketDatabaseTest, find_all) {
std::vector<document::BucketId> buckets;
EXPECT_EQ(
std::string(""),
@@ -389,7 +389,7 @@ BucketDatabaseTest::doCreate(const std::vector<document::BucketId>& ids,
}
// TODO rewrite in terms of bucket getter, not creator
-TEST_P(BucketDatabaseTest, testCreateAppropriateBucket) {
+TEST_P(BucketDatabaseTest, create_appropriate_bucket) {
// Use min split bits when no relevant bucket exist.
EXPECT_EQ(
document::BucketId(36,0x0000004d2),
@@ -439,7 +439,7 @@ TEST_P(BucketDatabaseTest, testCreateAppropriateBucket) {
document::BucketId(58, 0x00000000010004d2)));
}
-TEST_P(BucketDatabaseTest, testGetNext) {
+TEST_P(BucketDatabaseTest, get_next) {
db().update(BucketDatabase::Entry(document::BucketId(16, 16), BI(1)));
db().update(BucketDatabase::Entry(document::BucketId(16, 11), BI(2)));
db().update(BucketDatabase::Entry(document::BucketId(16, 42), BI(3)));
@@ -492,7 +492,7 @@ BucketDatabaseTest::doTestUpperBound(const UBoundFunc& f)
EXPECT_EQ(BucketId(8, 0xff), f(db(), BucketId(8, 0)));
}
-TEST_P(BucketDatabaseTest, testUpperBoundReturnsNextInOrderGreaterBucket) {
+TEST_P(BucketDatabaseTest, upper_bound_returns_next_in_order_greater_bucket) {
doTestUpperBound([](const BucketDatabase& bucketDb,
const document::BucketId& id)
{
@@ -500,7 +500,7 @@ TEST_P(BucketDatabaseTest, testUpperBoundReturnsNextInOrderGreaterBucket) {
});
}
-TEST_P(BucketDatabaseTest, testGetNextReturnsUpperBoundBucket) {
+TEST_P(BucketDatabaseTest, get_next_returns_upper_bound_bucket) {
// getNext() would generally be implemented in terms of upperBound(), but
// make sure it conforms to the same contract in case this changes.
doTestUpperBound([](const BucketDatabase& bucketDb,
@@ -510,7 +510,7 @@ TEST_P(BucketDatabaseTest, testGetNextReturnsUpperBoundBucket) {
});
}
-TEST_P(BucketDatabaseTest, testChildCount) {
+TEST_P(BucketDatabaseTest, child_count) {
// Empty tree; inserts cannot create inconsistencies.
EXPECT_EQ(0u, db().childCount(BucketId(3, 1)));
diff --git a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
index 152a213a4f4..1008d3ee4f2 100644
--- a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
@@ -1,57 +1,32 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <string>
-#include <sstream>
#include <vespa/storage/bucketdb/bucketdatabase.h>
#include <vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h>
#include <vespa/storage/distributor/distributormetricsset.h>
#include <vespa/storage/distributor/idealstatemetricsset.h>
#include <vespa/storage/config/config-stor-distributormanager.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <string>
+#include <sstream>
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
using document::BucketId;
+using namespace ::testing;
-class BucketDBMetricUpdaterTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(BucketDBMetricUpdaterTest);
- CPPUNIT_TEST(testDocAndByteCountsAreUpdated);
- CPPUNIT_TEST(testBucketsWithTooFewAndTooManyCopies);
- CPPUNIT_TEST(testBucketsWithVaryingTrustedness);
- CPPUNIT_TEST(testPickCountsFromTrustedCopy);
- CPPUNIT_TEST(testPickLargestCopyIfNoTrusted);
- CPPUNIT_TEST(testCompleteRoundClearsWorkingState);
- CPPUNIT_TEST(testMinBucketReplicaTrackedAndReportedPerNode);
- CPPUNIT_TEST(nonTrustedReplicasAlsoCountedInModeAny);
- CPPUNIT_TEST(minimumReplicaCountReturnedForNodeInModeAny);
- CPPUNIT_TEST_SUITE_END();
-
+struct BucketDBMetricUpdaterTest : Test {
void visitBucketWith2Copies1Trusted(BucketDBMetricUpdater& metricUpdater);
void visitBucketWith2CopiesBothTrusted(
BucketDBMetricUpdater& metricUpdater);
void visitBucketWith1Copy(BucketDBMetricUpdater& metricUpdater);
-
using NodeToReplicasMap = std::unordered_map<uint16_t, uint32_t>;
NodeToReplicasMap replicaStatsOf(BucketDBMetricUpdater& metricUpdater);
metrics::LoadTypeSet _loadTypes;
-public:
- BucketDBMetricUpdaterTest();
- void testDocAndByteCountsAreUpdated();
- void testBucketsWithTooFewAndTooManyCopies();
- void testBucketsWithVaryingTrustedness();
- void testPickCountsFromTrustedCopy();
- void testPickLargestCopyIfNoTrusted();
- void testCompleteRoundClearsWorkingState();
- void testMinBucketReplicaTrackedAndReportedPerNode();
- void nonTrustedReplicasAlsoCountedInModeAny();
- void minimumReplicaCountReturnedForNodeInModeAny();
+ BucketDBMetricUpdaterTest();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketDBMetricUpdaterTest);
-
BucketDBMetricUpdaterTest::BucketDBMetricUpdaterTest()
{
_loadTypes.push_back(metrics::LoadType(0, "foo"));
@@ -65,7 +40,7 @@ void addNode(BucketInfo& info, uint16_t node, uint32_t crc) {
info.addNode(BucketCopy(1234, node, apiInfo), order);
}
-typedef bool Trusted;
+using Trusted = bool;
BucketInfo
makeInfo(uint32_t copy0Crc)
@@ -86,22 +61,20 @@ makeInfo(uint32_t copy0Crc, uint32_t copy1Crc)
} // anonymous namespace
-void
-BucketDBMetricUpdaterTest::testDocAndByteCountsAreUpdated()
-{
+TEST_F(BucketDBMetricUpdaterTest, doc_and_byte_counts_are_updated) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
- CPPUNIT_ASSERT_EQUAL(false, metricUpdater.hasCompletedRound());
+ EXPECT_FALSE(metricUpdater.hasCompletedRound());
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
metricUpdater.completeRound(false);
- CPPUNIT_ASSERT_EQUAL(true, metricUpdater.hasCompletedRound());
+ EXPECT_TRUE(metricUpdater.hasCompletedRound());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.docsStored.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.bytesStored.getLast());
+ EXPECT_EQ(0, dms.docsStored.getLast());
+ EXPECT_EQ(0, dms.bytesStored.getLast());
{
BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(10));
metricUpdater.visit(e, 1);
@@ -110,10 +83,10 @@ BucketDBMetricUpdaterTest::testDocAndByteCountsAreUpdated()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(true, metricUpdater.hasCompletedRound());
+ EXPECT_TRUE(metricUpdater.hasCompletedRound());
- CPPUNIT_ASSERT_EQUAL(int64_t(11), dms.docsStored.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(12), dms.bytesStored.getLast());
+ EXPECT_EQ(11, dms.docsStored.getLast());
+ EXPECT_EQ(12, dms.bytesStored.getLast());
{
BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(20));
@@ -123,22 +96,20 @@ BucketDBMetricUpdaterTest::testDocAndByteCountsAreUpdated()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(32), dms.docsStored.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(34), dms.bytesStored.getLast());
+ EXPECT_EQ(32, dms.docsStored.getLast());
+ EXPECT_EQ(34, dms.bytesStored.getLast());
}
-void
-BucketDBMetricUpdaterTest::testBucketsWithTooFewAndTooManyCopies()
-{
+TEST_F(BucketDBMetricUpdaterTest, buckets_with_too_few_and_too_many_copies) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
metricUpdater.completeRound();
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toofewcopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toomanycopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets.getLast());
+ EXPECT_EQ(0, ims.buckets_toofewcopies.getLast());
+ EXPECT_EQ(0, ims.buckets_toomanycopies.getLast());
+ EXPECT_EQ(0, ims.buckets.getLast());
// 1 copy too little
{
@@ -148,9 +119,9 @@ BucketDBMetricUpdaterTest::testBucketsWithTooFewAndTooManyCopies()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_toomanycopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets.getLast());
+ EXPECT_EQ(1, ims.buckets_toofewcopies.getLast());
+ EXPECT_EQ(0, ims.buckets_toomanycopies.getLast());
+ EXPECT_EQ(1, ims.buckets.getLast());
// 1 copy too many
{
@@ -160,9 +131,9 @@ BucketDBMetricUpdaterTest::testBucketsWithTooFewAndTooManyCopies()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toomanycopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(2), ims.buckets.getLast());
+ EXPECT_EQ(1, ims.buckets_toofewcopies.getLast());
+ EXPECT_EQ(1, ims.buckets_toomanycopies.getLast());
+ EXPECT_EQ(2, ims.buckets.getLast());
// Right amount of copies, just inc bucket counter.
{
@@ -172,21 +143,19 @@ BucketDBMetricUpdaterTest::testBucketsWithTooFewAndTooManyCopies()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toofewcopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_toomanycopies.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(3), ims.buckets.getLast());
+ EXPECT_EQ(1, ims.buckets_toofewcopies.getLast());
+ EXPECT_EQ(1, ims.buckets_toomanycopies.getLast());
+ EXPECT_EQ(3, ims.buckets.getLast());
}
-void
-BucketDBMetricUpdaterTest::testBucketsWithVaryingTrustedness()
-{
+TEST_F(BucketDBMetricUpdaterTest, buckets_with_varying_trustedness) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ EXPECT_EQ(0, ims.buckets_notrusted.getLast());
// Has only trusted (implicit for first added)
{
BucketDatabase::Entry e(document::BucketId(16, 1), makeInfo(100));
@@ -194,7 +163,7 @@ BucketDBMetricUpdaterTest::testBucketsWithVaryingTrustedness()
}
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ EXPECT_EQ(0, ims.buckets_notrusted.getLast());
// Has at least one trusted (implicit for first added)
{
BucketDatabase::Entry e(document::BucketId(16, 2), makeInfo(100, 200));
@@ -202,7 +171,7 @@ BucketDBMetricUpdaterTest::testBucketsWithVaryingTrustedness()
}
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(0), ims.buckets_notrusted.getLast());
+ EXPECT_EQ(0, ims.buckets_notrusted.getLast());
// Has no trusted
{
BucketInfo info(makeInfo(100, 200));
@@ -212,12 +181,10 @@ BucketDBMetricUpdaterTest::testBucketsWithVaryingTrustedness()
}
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), ims.buckets_notrusted.getLast());
+ EXPECT_EQ(1, ims.buckets_notrusted.getLast());
}
-void
-BucketDBMetricUpdaterTest::testPickCountsFromTrustedCopy()
-{
+TEST_F(BucketDBMetricUpdaterTest, pick_counts_from_trusted_copy) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
@@ -228,13 +195,11 @@ BucketDBMetricUpdaterTest::testPickCountsFromTrustedCopy()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(101), dms.docsStored.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(102), dms.bytesStored.getLast());
+ EXPECT_EQ(101, dms.docsStored.getLast());
+ EXPECT_EQ(102, dms.bytesStored.getLast());
}
-void
-BucketDBMetricUpdaterTest::testPickLargestCopyIfNoTrusted()
-{
+TEST_F(BucketDBMetricUpdaterTest, pick_largest_copy_if_no_trusted) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
@@ -247,13 +212,11 @@ BucketDBMetricUpdaterTest::testPickLargestCopyIfNoTrusted()
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(201), dms.docsStored.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(202), dms.bytesStored.getLast());
+ EXPECT_EQ(201, dms.docsStored.getLast());
+ EXPECT_EQ(202, dms.bytesStored.getLast());
}
-void
-BucketDBMetricUpdaterTest::testCompleteRoundClearsWorkingState()
-{
+TEST_F(BucketDBMetricUpdaterTest, complete_round_clears_working_state) {
BucketDBMetricUpdater metricUpdater;
IdealStateMetricSet ims;
DistributorMetricSet dms(_loadTypes);
@@ -265,13 +228,13 @@ BucketDBMetricUpdaterTest::testCompleteRoundClearsWorkingState()
metricUpdater.completeRound();
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(11), dms.docsStored.getLast());
+ EXPECT_EQ(11, dms.docsStored.getLast());
// Completing the round again with no visits having been done will
// propagate an empty working state to the complete state.
metricUpdater.completeRound();
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
- CPPUNIT_ASSERT_EQUAL(int64_t(0), dms.docsStored.getLast());
+ EXPECT_EQ(0, dms.docsStored.getLast());
}
// Replicas on nodes 0 and 1.
@@ -316,8 +279,7 @@ BucketDBMetricUpdaterTest::replicaStatsOf(BucketDBMetricUpdater& metricUpdater)
return metricUpdater.getLastCompleteStats()._minBucketReplica;
}
-void BucketDBMetricUpdaterTest::testMinBucketReplicaTrackedAndReportedPerNode()
-{
+TEST_F(BucketDBMetricUpdaterTest, min_bucket_replica_tracked_and_reported_per_node) {
BucketDBMetricUpdater metricUpdater;
// Node 0 and 1 should have min replica 1, while node 2 should have min
@@ -325,26 +287,22 @@ void BucketDBMetricUpdaterTest::testMinBucketReplicaTrackedAndReportedPerNode()
visitBucketWith2Copies1Trusted(metricUpdater);
visitBucketWith2CopiesBothTrusted(metricUpdater);
- CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 1}, {1, 1}, {2, 2}}),
- replicaStatsOf(metricUpdater));
+ EXPECT_EQ(NodeToReplicasMap({{0, 1}, {1, 1}, {2, 2}}),
+ replicaStatsOf(metricUpdater));
}
-void
-BucketDBMetricUpdaterTest::nonTrustedReplicasAlsoCountedInModeAny()
-{
+TEST_F(BucketDBMetricUpdaterTest, non_trusted_replicas_also_counted_in_mode_any) {
BucketDBMetricUpdater metricUpdater;
using CountingMode = BucketDBMetricUpdater::ReplicaCountingMode;
metricUpdater.setMinimumReplicaCountingMode(CountingMode::ANY);
visitBucketWith2Copies1Trusted(metricUpdater);
visitBucketWith2CopiesBothTrusted(metricUpdater);
- CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 2}, {1, 2}, {2, 2}}),
- replicaStatsOf(metricUpdater));
+ EXPECT_EQ(NodeToReplicasMap({{0, 2}, {1, 2}, {2, 2}}),
+ replicaStatsOf(metricUpdater));
}
-void
-BucketDBMetricUpdaterTest::minimumReplicaCountReturnedForNodeInModeAny()
-{
+TEST_F(BucketDBMetricUpdaterTest, minimum_replica_count_returned_for_node_in_mode_any) {
BucketDBMetricUpdater metricUpdater;
using CountingMode = BucketDBMetricUpdater::ReplicaCountingMode;
metricUpdater.setMinimumReplicaCountingMode(CountingMode::ANY);
@@ -352,9 +310,8 @@ BucketDBMetricUpdaterTest::minimumReplicaCountReturnedForNodeInModeAny()
visitBucketWith1Copy(metricUpdater);
// Node 2 has a bucket with only 1 replica.
- CPPUNIT_ASSERT_EQUAL(NodeToReplicasMap({{0, 2}, {2, 1}}),
- replicaStatsOf(metricUpdater));
+ EXPECT_EQ(NodeToReplicasMap({{0, 2}, {2, 1}}),
+ replicaStatsOf(metricUpdater));
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/bucketdbupdatertest.cpp
index 2ddf41236c9..321b0cc3bba 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbupdatertest.cpp
@@ -270,16 +270,16 @@ public:
}
};
- void sortSentMessagesByIndex(MessageSenderStub& sender,
+ void sortSentMessagesByIndex(DistributorMessageSenderStub& sender,
size_t sortFromOffset = 0)
{
- std::sort(sender.commands.begin() + sortFromOffset,
- sender.commands.end(),
+ std::sort(sender.commands().begin() + sortFromOffset,
+ sender.commands().end(),
OrderByIncreasingNodeIndex());
}
void setSystemState(const lib::ClusterState& state) {
- const size_t sizeBeforeState = _sender.commands.size();
+ const size_t sizeBeforeState = _sender.commands().size();
getBucketDBUpdater().onSetSystemState(
std::make_shared<api::SetSystemStateCommand>(state));
// A lot of test logic has the assumption that all messages sent as a
@@ -291,7 +291,7 @@ public:
}
void set_cluster_state_bundle(const lib::ClusterStateBundle& state) {
- const size_t sizeBeforeState = _sender.commands.size();
+ const size_t sizeBeforeState = _sender.commands().size();
getBucketDBUpdater().onSetSystemState(
std::make_shared<api::SetSystemStateCommand>(state));
sortSentMessagesByIndex(_sender, sizeBeforeState);
@@ -303,8 +303,8 @@ public:
}
void assert_has_activate_cluster_state_reply_with_actual_version(uint32_t version) {
- ASSERT_EQ(size_t(1), _sender.replies.size());
- auto* response = dynamic_cast<api::ActivateClusterStateVersionReply*>(_sender.replies.back().get());
+ ASSERT_EQ(size_t(1), _sender.replies().size());
+ auto* response = dynamic_cast<api::ActivateClusterStateVersionReply*>(_sender.replies().back().get());
ASSERT_TRUE(response != nullptr);
ASSERT_EQ(version, response->actualVersion());
_sender.clear();
@@ -315,10 +315,10 @@ public:
uint32_t bucketCount = 1,
uint32_t invalidBucketCount = 0)
{
- ASSERT_EQ(expectedMsgs, _sender.commands.size());
+ ASSERT_EQ(expectedMsgs, _sender.commands().size());
- for (uint32_t i = 0; i < _sender.commands.size(); i++) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.commands[i],
+ for (uint32_t i = 0; i < _sender.commands().size(); i++) {
+ ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.command(i),
bucketCount, invalidBucketCount));
}
}
@@ -356,9 +356,9 @@ public:
setSystemState(newState);
for (uint32_t i=0; i< messageCount(numStorageNodes); i++) {
- ASSERT_EQ(_sender.commands[i]->getType(), MessageType::REQUESTBUCKETINFO);
+ ASSERT_EQ(_sender.command(i)->getType(), MessageType::REQUESTBUCKETINFO);
- const api::StorageMessageAddress *address = _sender.commands[i]->getAddress();
+ const api::StorageMessageAddress *address = _sender.command(i)->getAddress();
ASSERT_EQ((uint32_t)(i / _bucketSpaces.size()), (uint32_t)address->getIndex());
}
}
@@ -373,7 +373,7 @@ public:
lib::ClusterState newState(state);
for (uint32_t i=0; i< messageCount(numStorageNodes); i++) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(newState, *_sender.commands[i], numBuckets));
+ ASSERT_NO_FATAL_FAILURE(fakeBucketReply(newState, *_sender.command(i), numBuckets));
}
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, state));
}
@@ -495,7 +495,7 @@ public:
}
struct PendingClusterStateFixture {
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
framework::defaultimplementation::FakeClock clock;
std::unique_ptr<PendingClusterState> state;
@@ -556,20 +556,20 @@ BucketDBUpdaterTest::BucketDBUpdaterTest()
{
}
-TEST_F(BucketDBUpdaterTest, testNormalUsage) {
+TEST_F(BucketDBUpdaterTest, normal_usage) {
setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands.size());
+ ASSERT_EQ(messageCount(3), _sender.commands().size());
// Ensure distribution hash is set correctly
ASSERT_EQ(
defaultDistributorBucketSpace().getDistribution()
.getNodeGraph().getDistributionConfigHash(),
dynamic_cast<const RequestBucketInfoCommand&>(
- *_sender.commands[0]).getDistributionHash());
+ *_sender.command(0)).getDistributionHash());
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
- *_sender.commands[0], 10));
+ *_sender.command(0), 10));
_sender.clear();
@@ -577,9 +577,9 @@ TEST_F(BucketDBUpdaterTest, testNormalUsage) {
// change is only implemented after completion of previous cluster state
setSystemState(lib::ClusterState("distributor:2 .0.s:i storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands.size());
+ ASSERT_EQ(messageCount(3), _sender.commands().size());
// Expect reply of first set SystemState request.
- ASSERT_EQ(size_t(1), _sender.replies.size());
+ ASSERT_EQ(size_t(1), _sender.replies().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
@@ -587,74 +587,74 @@ TEST_F(BucketDBUpdaterTest, testNormalUsage) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, testDistributorChange) {
+TEST_F(BucketDBUpdaterTest, distributor_change) {
int numBuckets = 100;
// First sends request
setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands.size());
+ ASSERT_EQ(messageCount(3), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
messageCount(3), numBuckets));
_sender.clear();
// No change from initializing to up (when done with last job)
setSystemState(lib::ClusterState("distributor:2 storage:3"));
- ASSERT_EQ(size_t(0), _sender.commands.size());
+ ASSERT_EQ(size_t(0), _sender.commands().size());
_sender.clear();
// Adding node. No new read requests, but buckets thrown
setSystemState(lib::ClusterState("distributor:3 storage:3"));
- ASSERT_EQ(size_t(0), _sender.commands.size());
+ ASSERT_EQ(size_t(0), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:3 storage:3"));
_sender.clear();
// Removing distributor. Need to refetch new data from all nodes.
setSystemState(lib::ClusterState("distributor:2 storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands.size());
+ ASSERT_EQ(messageCount(3), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:2 storage:3"),
messageCount(3), numBuckets));
_sender.clear();
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, testDistributorChangeWithGrouping) {
+TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) {
std::string distConfig(getDistConfig6Nodes2Groups());
setDistribution(distConfig);
int numBuckets = 100;
setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(messageCount(6), _sender.commands.size());
+ ASSERT_EQ(messageCount(6), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:6 storage:6"),
messageCount(6), numBuckets));
_sender.clear();
// Distributor going down in other group, no change
setSystemState(lib::ClusterState("distributor:6 .5.s:d storage:6"));
- ASSERT_EQ(size_t(0), _sender.commands.size());
+ ASSERT_EQ(size_t(0), _sender.commands().size());
_sender.clear();
setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(size_t(0), _sender.commands.size());
+ ASSERT_EQ(size_t(0), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:6 storage:6"));
_sender.clear();
// Unchanged grouping cause no change.
setDistribution(distConfig);
- ASSERT_EQ(size_t(0), _sender.commands.size());
+ ASSERT_EQ(size_t(0), _sender.commands().size());
// Changed grouping cause change
setDistribution(getDistConfig6Nodes4Groups());
- ASSERT_EQ(messageCount(6), _sender.commands.size());
+ ASSERT_EQ(messageCount(6), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, testNormalUsageInitializing) {
+TEST_F(BucketDBUpdaterTest, normal_usage_initializing) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i"));
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
// Not yet passing on system state.
- ASSERT_EQ(size_t(0), _senderDown.commands.size());
+ ASSERT_EQ(size_t(0), _senderDown.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
_bucketSpaces.size(), 10, 10));
@@ -666,7 +666,7 @@ TEST_F(BucketDBUpdaterTest, testNormalUsageInitializing) {
}
// Pass on cluster state and recheck buckets now.
- ASSERT_EQ(size_t(1), _senderDown.commands.size());
+ ASSERT_EQ(size_t(1), _senderDown.commands().size());
_sender.clear();
_senderDown.clear();
@@ -674,28 +674,28 @@ TEST_F(BucketDBUpdaterTest, testNormalUsageInitializing) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
// Send a new request bucket info up.
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
_bucketSpaces.size(), 20));
// Pass on cluster state and recheck buckets now.
- ASSERT_EQ(size_t(1), _senderDown.commands.size());
+ ASSERT_EQ(size_t(1), _senderDown.commands().size());
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1"));
}
-TEST_F(BucketDBUpdaterTest, testFailedRequestBucketInfo) {
+TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
// 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
{
for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
std::shared_ptr<api::RequestBucketInfoReply> reply =
getFakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- *((RequestBucketInfoCommand*)_sender.commands[i].get()),
+ *((RequestBucketInfoCommand*)_sender.command(i).get()),
0,
10);
reply->setResult(api::ReturnCode::NOT_CONNECTED);
@@ -710,11 +710,11 @@ TEST_F(BucketDBUpdaterTest, testFailedRequestBucketInfo) {
// Should be resent.
ASSERT_EQ(getRequestBucketInfoStrings(messageCount(2)), _sender.getCommands());
- ASSERT_EQ(size_t(0), _senderDown.commands.size());
+ ASSERT_EQ(size_t(0), _senderDown.commands().size());
for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- *_sender.commands[_bucketSpaces.size() + i], 10));
+ *_sender.command(_bucketSpaces.size() + i), 10));
}
for (int i=0; i<10; i++) {
@@ -727,19 +727,19 @@ TEST_F(BucketDBUpdaterTest, testFailedRequestBucketInfo) {
EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands());
}
-TEST_F(BucketDBUpdaterTest, testDownWhileInit) {
+TEST_F(BucketDBUpdaterTest, down_while_init) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.commands[0], 5));
+ *_sender.command(0), 5));
setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.commands[2], 5));
+ *_sender.command(2), 5));
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.commands[1], 5));
+ *_sender.command(1), 5));
}
bool
@@ -790,7 +790,7 @@ BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
return res;
}
-TEST_F(BucketDBUpdaterTest, testNodeDown) {
+TEST_F(BucketDBUpdaterTest, node_down) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -805,7 +805,7 @@ TEST_F(BucketDBUpdaterTest, testNodeDown) {
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, testStorageNodeInMaintenanceClearsBucketsForNode) {
+TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -820,7 +820,7 @@ TEST_F(BucketDBUpdaterTest, testStorageNodeInMaintenanceClearsBucketsForNode) {
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, testNodeDownCopiesGetInSync) {
+TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
lib::ClusterState systemState("distributor:1 storage:3");
@@ -837,35 +837,35 @@ TEST_F(BucketDBUpdaterTest, testNodeDownCopiesGetInSync) {
dumpBucket(bid));
}
-TEST_F(BucketDBUpdaterTest, testInitializingWhileRecheck) {
+TEST_F(BucketDBUpdaterTest, initializing_while_recheck) {
lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1");
setSystemState(systemState);
- ASSERT_EQ(messageCount(2), _sender.commands.size());
- ASSERT_EQ(size_t(0), _senderDown.commands.size());
+ ASSERT_EQ(messageCount(2), _sender.commands().size());
+ ASSERT_EQ(size_t(0), _senderDown.commands().size());
getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
for (uint32_t i = 0; i < messageCount(2); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(systemState, *_sender.commands[i], 100));
+ ASSERT_NO_FATAL_FAILURE(fakeBucketReply(systemState, *_sender.command(i), 100));
}
// Now we can pass on system state.
- ASSERT_EQ(size_t(1), _senderDown.commands.size());
- EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.commands[0]->getType());
+ ASSERT_EQ(size_t(1), _senderDown.commands().size());
+ EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType());
}
-TEST_F(BucketDBUpdaterTest, testBitChange) {
+TEST_F(BucketDBUpdaterTest, bit_change) {
std::vector<document::BucketId> bucketlist;
{
setSystemState(lib::ClusterState("bits:14 storage:1 distributor:2"));
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- ASSERT_EQ(_sender.commands[bsi]->getType(), MessageType::REQUESTBUCKETINFO);
- const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.commands[bsi]);
+ ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
+ const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi));
auto sreply = std::make_shared<RequestBucketInfoReply>(req);
sreply->setAddress(storageAddress(0));
auto& vec = sreply->getBucketInfo();
@@ -904,11 +904,11 @@ TEST_F(BucketDBUpdaterTest, testBitChange) {
_sender.clear();
setSystemState(lib::ClusterState("bits:16 storage:1 distributor:2"));
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- ASSERT_EQ(_sender.commands[bsi]->getType(), MessageType::REQUESTBUCKETINFO);
- const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.commands[bsi]);
+ ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
+ const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi));
auto sreply = std::make_shared<RequestBucketInfoReply>(req);
sreply->setAddress(storageAddress(0));
sreply->setResult(api::ReturnCode::OK);
@@ -954,22 +954,22 @@ TEST_F(BucketDBUpdaterTest, testBitChange) {
}
};
-TEST_F(BucketDBUpdaterTest, testRecheckNodeWithFailure) {
+TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
- ASSERT_EQ(size_t(1), _sender.commands.size());
+ ASSERT_EQ(size_t(1), _sender.commands().size());
uint16_t index = 0;
{
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- const api::StorageMessageAddress *address = _sender.commands[0]->getAddress();
+ const api::StorageMessageAddress *address = _sender.command(0)->getAddress();
index = address->getIndex();
reply->setResult(api::ReturnCode::NOT_CONNECTED);
getBucketDBUpdater().onRequestBucketInfoReply(reply);
@@ -978,14 +978,14 @@ TEST_F(BucketDBUpdaterTest, testRecheckNodeWithFailure) {
getBucketDBUpdater().resendDelayedMessages();
}
- ASSERT_EQ(size_t(2), _sender.commands.size());
+ ASSERT_EQ(size_t(2), _sender.commands().size());
setSystemState(
lib::ClusterState(vespalib::make_string("distributor:1 storage:3 .%d.s:d", index)));
// Recheck bucket.
{
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[1]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(1));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
@@ -994,19 +994,19 @@ TEST_F(BucketDBUpdaterTest, testRecheckNodeWithFailure) {
}
// Should not retry since node is down.
- EXPECT_EQ(size_t(2), _sender.commands.size());
+ EXPECT_EQ(size_t(2), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, testRecheckNode) {
+TEST_F(BucketDBUpdaterTest, recheck_node) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
- ASSERT_EQ(size_t(1), _sender.commands.size());
+ ASSERT_EQ(size_t(1), _sender.commands().size());
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
@@ -1035,11 +1035,11 @@ TEST_F(BucketDBUpdaterTest, testRecheckNode) {
EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo());
}
-TEST_F(BucketDBUpdaterTest, testNotifyBucketChange) {
+TEST_F(BucketDBUpdaterTest, notify_bucket_change) {
enableDistributorClusterState("distributor:1 storage:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1234");
- _sender.replies.clear();
+ _sender.replies().clear();
{
api::BucketInfo info(1, 2, 3, 4, 5, true, true);
@@ -1058,11 +1058,11 @@ TEST_F(BucketDBUpdaterTest, testNotifyBucketChange) {
}
// Must receive reply
- ASSERT_EQ(size_t(2), _sender.replies.size());
+ ASSERT_EQ(size_t(2), _sender.replies().size());
for (int i = 0; i < 2; ++i) {
ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY,
- _sender.replies[i]->getType());
+ _sender.reply(i)->getType());
}
// No database update until request bucket info replies have been received.
@@ -1072,14 +1072,14 @@ TEST_F(BucketDBUpdaterTest, testNotifyBucketChange) {
dumpBucket(document::BucketId(16, 1)));
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(document::BucketId(16, 2)));
- ASSERT_EQ(size_t(2), _sender.commands.size());
+ ASSERT_EQ(size_t(2), _sender.commands().size());
std::vector<api::BucketInfo> infos;
infos.push_back(api::BucketInfo(4567, 200, 2000, 400, 4000, true, true));
infos.push_back(api::BucketInfo(8999, 300, 3000, 500, 5000, false, false));
for (int i = 0; i < 2; ++i) {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[i]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(i));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, i + 1), rbi.getBuckets()[0]);
@@ -1098,12 +1098,12 @@ TEST_F(BucketDBUpdaterTest, testNotifyBucketChange) {
dumpBucket(document::BucketId(16, 2)));
}
-TEST_F(BucketDBUpdaterTest, testNotifyBucketChangeFromNodeDown) {
+TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "1=1234");
- _sender.replies.clear();
+ _sender.replies().clear();
{
api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
@@ -1120,14 +1120,14 @@ TEST_F(BucketDBUpdaterTest, testNotifyBucketChangeFromNodeDown) {
"node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false,ready=false)"),
dumpBucket(document::BucketId(16, 1)));
- ASSERT_EQ(size_t(1), _sender.replies.size());
- ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY, _sender.replies[0]->getType());
+ ASSERT_EQ(size_t(1), _sender.replies().size());
+ ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY, _sender.reply(0)->getType());
// Currently, this pending operation will be auto-flushed when the cluster state
// changes so the behavior is still correct. Keep this test around to prevent
// regressions here.
- ASSERT_EQ(size_t(1), _sender.commands.size());
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]);
+ ASSERT_EQ(size_t(1), _sender.commands().size());
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, 1), rbi.getBuckets()[0]);
@@ -1152,9 +1152,9 @@ TEST_F(BucketDBUpdaterTest, testNotifyBucketChangeFromNodeDown) {
* distributor in the pending state but not by the current state would be
* discarded when attempted inserted into the bucket database.
*/
-TEST_F(BucketDBUpdaterTest, testNotifyChangeWithPendingStateQueuesBucketInfoRequests) {
+TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
setSystemState(lib::ClusterState("distributor:1 storage:1"));
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
{
api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
@@ -1164,15 +1164,15 @@ TEST_F(BucketDBUpdaterTest, testNotifyChangeWithPendingStateQueuesBucketInfoRequ
getBucketDBUpdater().onNotifyBucketChange(cmd);
}
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 storage:1"),
_bucketSpaces.size(), 10));
- ASSERT_EQ(_bucketSpaces.size() + 1, _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size() + 1, _sender.commands().size());
{
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[_bucketSpaces.size()]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(_bucketSpaces.size()));
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
EXPECT_EQ(document::BucketId(16, 1), rbi.getBuckets()[0]);
}
@@ -1184,14 +1184,14 @@ TEST_F(BucketDBUpdaterTest, testNotifyChangeWithPendingStateQueuesBucketInfoRequ
uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 1;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, expectedMsgs, dummyBucketsToReturn));
}
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
{
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.commands[0]);
+ auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
EXPECT_EQ(size_t(0), rbi.getBuckets().size());
}
}
-TEST_F(BucketDBUpdaterTest, testMergeReply) {
+TEST_F(BucketDBUpdaterTest, merge_reply) {
enableDistributorClusterState("distributor:1 storage:3");
addNodesToBucketDB(document::BucketId(16, 1234),
@@ -1209,10 +1209,10 @@ TEST_F(BucketDBUpdaterTest, testMergeReply) {
_sender.clear();
getBucketDBUpdater().onMergeBucketReply(reply);
- ASSERT_EQ(size_t(3), _sender.commands.size());
+ ASSERT_EQ(size_t(3), _sender.commands().size());
for (uint32_t i = 0; i < 3; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.commands[i]);
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
ASSERT_TRUE(req.get() != nullptr);
ASSERT_EQ(size_t(1), req->getBuckets().size());
@@ -1233,7 +1233,7 @@ TEST_F(BucketDBUpdaterTest, testMergeReply) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDown) {
+TEST_F(BucketDBUpdaterTest, merge_reply_node_down) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1252,10 +1252,10 @@ TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDown) {
_sender.clear();
getBucketDBUpdater().onMergeBucketReply(reply);
- ASSERT_EQ(size_t(2), _sender.commands.size());
+ ASSERT_EQ(size_t(2), _sender.commands().size());
for (uint32_t i = 0; i < 2; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.commands[i]);
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
ASSERT_TRUE(req.get() != nullptr);
ASSERT_EQ(size_t(1), req->getBuckets().size());
@@ -1275,7 +1275,7 @@ TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDown) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDownAfterRequestSent) {
+TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1292,12 +1292,12 @@ TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDownAfterRequestSent) {
_sender.clear();
getBucketDBUpdater().onMergeBucketReply(reply);
- ASSERT_EQ(size_t(3), _sender.commands.size());
+ ASSERT_EQ(size_t(3), _sender.commands().size());
setSystemState(lib::ClusterState("distributor:1 storage:2"));
for (uint32_t i = 0; i < 3; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.commands[i]);
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
ASSERT_TRUE(req.get() != nullptr);
ASSERT_EQ(size_t(1), req->getBuckets().size());
@@ -1318,7 +1318,7 @@ TEST_F(BucketDBUpdaterTest, testMergeReplyNodeDownAfterRequestSent) {
};
-TEST_F(BucketDBUpdaterTest, testFlush) {
+TEST_F(BucketDBUpdaterTest, flush) {
enableDistributorClusterState("distributor:1 storage:3");
_sender.clear();
@@ -1336,12 +1336,12 @@ TEST_F(BucketDBUpdaterTest, testFlush) {
_sender.clear();
getBucketDBUpdater().onMergeBucketReply(reply);
- ASSERT_EQ(size_t(3), _sender.commands.size());
- ASSERT_EQ(size_t(0), _senderDown.replies.size());
+ ASSERT_EQ(size_t(3), _sender.commands().size());
+ ASSERT_EQ(size_t(0), _senderDown.replies().size());
getBucketDBUpdater().flush();
// Flushing should drop all merge bucket replies
- EXPECT_EQ(size_t(0), _senderDown.commands.size());
+ EXPECT_EQ(size_t(0), _senderDown.commands().size());
}
std::string
@@ -1355,8 +1355,8 @@ BucketDBUpdaterTest::getSentNodes(
sortSentMessagesByIndex(fixture->sender);
std::ostringstream ost;
- for (uint32_t i = 0; i < fixture->sender.commands.size(); i++) {
- auto& req = dynamic_cast<RequestBucketInfoCommand&>(*fixture->sender.commands[i]);
+ for (uint32_t i = 0; i < fixture->sender.commands().size(); i++) {
+ auto& req = dynamic_cast<RequestBucketInfoCommand&>(*fixture->sender.command(i));
if (i > 0) {
ost << ",";
@@ -1372,7 +1372,7 @@ std::string
BucketDBUpdaterTest::getSentNodesDistributionChanged(
const std::string& oldClusterState)
{
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
framework::defaultimplementation::FakeClock clock;
ClusterInformation::CSP clusterInfo(createClusterInfo(oldClusterState));
@@ -1383,8 +1383,8 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged(
sortSentMessagesByIndex(sender);
std::ostringstream ost;
- for (uint32_t i = 0; i < sender.commands.size(); i++) {
- auto& req = dynamic_cast<RequestBucketInfoCommand&>(*sender.commands[i]);
+ for (uint32_t i = 0; i < sender.commands().size(); i++) {
+ auto& req = dynamic_cast<RequestBucketInfoCommand&>(*sender.command(i));
if (i > 0) {
ost << ",";
@@ -1396,7 +1396,7 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged(
return ost.str();
}
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateSendMessages) {
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
EXPECT_EQ(getNodeList({0, 1, 2}),
getSentNodes("cluster:d",
"distributor:1 storage:3"));
@@ -1501,8 +1501,8 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateSendMessages) {
"distributor:3 storage:3 .1.s:m"));
};
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateReceive) {
- MessageSenderStub sender;
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) {
+ DistributorMessageSenderStub sender;
auto cmd(std::make_shared<api::SetSystemStateCommand>(
lib::ClusterState("distributor:1 storage:3")));
@@ -1515,13 +1515,13 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateReceive) {
clock, clusterInfo, sender, getBucketSpaceRepo(),
cmd, outdatedNodesMap, api::Timestamp(1)));
- ASSERT_EQ(messageCount(3), sender.commands.size());
+ ASSERT_EQ(messageCount(3), sender.commands().size());
sortSentMessagesByIndex(sender);
std::ostringstream ost;
- for (uint32_t i = 0; i < sender.commands.size(); i++) {
- auto* req = dynamic_cast<RequestBucketInfoCommand*>(sender.commands[i].get());
+ for (uint32_t i = 0; i < sender.commands().size(); i++) {
+ auto* req = dynamic_cast<RequestBucketInfoCommand*>(sender.command(i).get());
ASSERT_TRUE(req != nullptr);
auto rep = std::make_shared<RequestBucketInfoReply>(*req);
@@ -1532,14 +1532,14 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateReceive) {
api::BucketInfo(i, i, i, i, i)));
ASSERT_TRUE(state->onRequestBucketInfoReply(rep));
- ASSERT_EQ((i == (sender.commands.size() - 1)), state->done());
+ ASSERT_EQ((i == (sender.commands().size() - 1)), state->done());
}
auto& pendingTransition = state->getPendingBucketSpaceDbTransition(makeBucketSpace());
EXPECT_EQ(3, (int)pendingTransition.results().size());
}
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateWithGroupDown) {
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
setDistribution(config);
@@ -1558,7 +1558,7 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateWithGroupDown) {
"distributor:6 .2.s:d storage:6"));
}
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateWithGroupDownAndNoHandover) {
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
setDistribution(config);
@@ -1654,7 +1654,7 @@ BucketDBUpdaterTest::mergeBucketLists(
framework::defaultimplementation::FakeClock clock;
framework::MilliSecTimer timer(clock);
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
OutdatedNodesMap outdatedNodesMap;
{
@@ -1711,7 +1711,7 @@ BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
includeBucketInfo);
}
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateMerge) {
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
// Simple initializing case - ask all nodes for info
EXPECT_EQ(
// Result is on the form: [bucket w/o count bits]:[node indexes]|..
@@ -1781,7 +1781,7 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateMerge) {
mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
}
-TEST_F(BucketDBUpdaterTest, testPendingClusterStateMergeReplicaChanged) {
+TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
// Node went from initializing to up and non-invalid bucket changed.
EXPECT_EQ(
std::string("2:0/2/3/4/t|3:0/2/4/6/t|"),
@@ -1793,7 +1793,7 @@ TEST_F(BucketDBUpdaterTest, testPendingClusterStateMergeReplicaChanged) {
true));
}
-TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInCurrentState) {
+TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1804,10 +1804,10 @@ TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInCurrentState)
getBucketDBUpdater().recheckBucketInfo(0, makeDocumentBucket(bucket));
- ASSERT_EQ(size_t(1), _sender.commands.size());
+ ASSERT_EQ(size_t(1), _sender.commands().size());
std::shared_ptr<api::RequestBucketInfoCommand> rbi(
std::dynamic_pointer_cast<RequestBucketInfoCommand>(
- _sender.commands[0]));
+ _sender.command(0)));
lib::ClusterState stateAfter("distributor:3 storage:3");
@@ -1823,7 +1823,7 @@ TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInCurrentState)
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
}
-TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInPendingState) {
+TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1834,10 +1834,10 @@ TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInPendingState)
getBucketDBUpdater().recheckBucketInfo(0, makeDocumentBucket(bucket));
- ASSERT_EQ(size_t(1), _sender.commands.size());
+ ASSERT_EQ(size_t(1), _sender.commands().size());
std::shared_ptr<api::RequestBucketInfoCommand> rbi(
std::dynamic_pointer_cast<RequestBucketInfoCommand>(
- _sender.commands[0]));
+ _sender.command(0)));
lib::ClusterState stateAfter("distributor:3 storage:3");
// Set, but _don't_ enable cluster state. We want it to be pending.
@@ -1859,7 +1859,7 @@ TEST_F(BucketDBUpdaterTest, testNoDbResurrectionForBucketNotOwnedInPendingState)
* will with a high likelihood end up not getting the complete view of the buckets in
* the cluster.
*/
-TEST_F(BucketDBUpdaterTest, testClusterStateAlwaysSendsFullFetchWhenDistributionChangePending) {
+TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
lib::ClusterState stateBefore("distributor:6 storage:6");
{
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 1;
@@ -1870,19 +1870,19 @@ TEST_F(BucketDBUpdaterTest, testClusterStateAlwaysSendsFullFetchWhenDistribution
setDistribution(distConfig);
sortSentMessagesByIndex(_sender);
- ASSERT_EQ(messageCount(6), _sender.commands.size());
+ ASSERT_EQ(messageCount(6), _sender.commands().size());
// Suddenly, a wild cluster state change appears! Even though this state
// does not in itself imply any bucket changes, it will still overwrite the
// pending cluster state and thus its state of pending bucket info requests.
setSystemState(lib::ClusterState("distributor:6 .2.t:12345 storage:6"));
- ASSERT_EQ(messageCount(12), _sender.commands.size());
+ ASSERT_EQ(messageCount(12), _sender.commands().size());
// Send replies for first messageCount(6) (outdated requests).
int numBuckets = 10;
for (uint32_t i = 0; i < messageCount(6); ++i) {
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 storage:6"),
- *_sender.commands[i], numBuckets));
+ *_sender.command(i), numBuckets));
}
// No change from these.
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(1, "distributor:6 storage:6"));
@@ -1890,7 +1890,7 @@ TEST_F(BucketDBUpdaterTest, testClusterStateAlwaysSendsFullFetchWhenDistribution
// Send for current pending.
for (uint32_t i = 0; i < messageCount(6); ++i) {
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 .2.t:12345 storage:6"),
- *_sender.commands[i + messageCount(6)],
+ *_sender.command(i + messageCount(6)),
numBuckets));
}
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:6 storage:6"));
@@ -1898,10 +1898,10 @@ TEST_F(BucketDBUpdaterTest, testClusterStateAlwaysSendsFullFetchWhenDistribution
// No more pending global fetch; this should be a no-op state.
setSystemState(lib::ClusterState("distributor:6 .3.t:12345 storage:6"));
- EXPECT_EQ(size_t(0), _sender.commands.size());
+ EXPECT_EQ(size_t(0), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, testChangedDistributionConfigTriggersRecoveryMode) {
+TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
_sender.clear();
EXPECT_TRUE(_distributor->isInRecoveryMode());
@@ -1914,11 +1914,11 @@ TEST_F(BucketDBUpdaterTest, testChangedDistributionConfigTriggersRecoveryMode) {
// No replies received yet, still no recovery mode.
EXPECT_FALSE(_distributor->isInRecoveryMode());
- ASSERT_EQ(messageCount(6), _sender.commands.size());
+ ASSERT_EQ(messageCount(6), _sender.commands().size());
uint32_t numBuckets = 10;
for (uint32_t i = 0; i < messageCount(6); ++i) {
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 storage:6"),
- *_sender.commands[i], numBuckets));
+ *_sender.command(i), numBuckets));
}
// Pending cluster state (i.e. distribution) has been enabled, which should
@@ -1970,7 +1970,7 @@ TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db
}));
}
-TEST_F(BucketDBUpdaterTest, testNewlyAddedBucketsHaveCurrentTimeAsGcTimestamp) {
+TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
getClock().setAbsoluteTimeInSeconds(101234);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1985,7 +1985,7 @@ TEST_F(BucketDBUpdaterTest, testNewlyAddedBucketsHaveCurrentTimeAsGcTimestamp) {
EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
}
-TEST_F(BucketDBUpdaterTest, testNewerMutationsNotOverwrittenByEarlierBucketFetch) {
+TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
{
lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 0;
@@ -1998,7 +1998,7 @@ TEST_F(BucketDBUpdaterTest, testNewerMutationsNotOverwrittenByEarlierBucketFetch
getClock().setAbsoluteTimeInSeconds(1000);
lib::ClusterState state("distributor:1 storage:1");
setSystemState(state);
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
// Before replying with the bucket info, simulate the arrival of a mutation
// reply that alters the state of the bucket with information that will be
@@ -2023,7 +2023,7 @@ TEST_F(BucketDBUpdaterTest, testNewerMutationsNotOverwrittenByEarlierBucketFetch
// correctness, as this should contain the same bucket info as that
// contained in the full bucket reply and the DB update is thus idempotent.
for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.commands[i], bucketsReturned));
+ ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.command(i), bucketsReturned));
}
BucketDatabase::Entry e(getBucket(bucket));
@@ -2035,8 +2035,8 @@ std::vector<uint16_t>
BucketDBUpdaterTest::getSendSet() const
{
std::vector<uint16_t> nodes;
- std::transform(_sender.commands.begin(),
- _sender.commands.end(),
+ std::transform(_sender.commands().begin(),
+ _sender.commands().end(),
std::back_inserter(nodes),
[](auto& cmd)
{
@@ -2080,7 +2080,7 @@ using nodeVec = std::vector<uint16_t>;
* database modifications caused by intermediate states will not be
* accounted for (basically the ABA problem in a distributed setting).
*/
-TEST_F(BucketDBUpdaterTest, preemptedDistrChangeCarriesNodeSetOverToNextStateFetch) {
+TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({0, 1, 2, 3, 4, 5}),
getSentNodesWithPreemption("version:1 distributor:6 storage:6",
@@ -2089,7 +2089,7 @@ TEST_F(BucketDBUpdaterTest, preemptedDistrChangeCarriesNodeSetOverToNextStateFet
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preemptedStorChangeCarriesNodeSetOverToNextStateFetch) {
+TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({2, 3}),
getSentNodesWithPreemption(
@@ -2099,7 +2099,7 @@ TEST_F(BucketDBUpdaterTest, preemptedStorChangeCarriesNodeSetOverToNextStateFetc
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preemptedStorageNodeDownMustBeReFetched) {
+TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
EXPECT_EQ(
expandNodeVec({2}),
getSentNodesWithPreemption(
@@ -2109,7 +2109,7 @@ TEST_F(BucketDBUpdaterTest, preemptedStorageNodeDownMustBeReFetched) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNowInDownState) {
+TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
EXPECT_EQ(
nodeVec{},
getSentNodesWithPreemption(
@@ -2131,7 +2131,7 @@ TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, outdatedNodeSetClearedAfterSuccessfulStateCompletion) {
+TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
lib::ClusterState stateBefore(
"version:1 distributor:6 storage:6 .1.t:1234");
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 10;
@@ -2142,7 +2142,7 @@ TEST_F(BucketDBUpdaterTest, outdatedNodeSetClearedAfterSuccessfulStateCompletion
// (completed) cluster state has been set.
lib::ClusterState stateAfter("version:3 distributor:6 storage:6");
setSystemState(stateAfter);
- EXPECT_EQ(size_t(0), _sender.commands.size());
+ EXPECT_EQ(size_t(0), _sender.commands().size());
}
// XXX test currently disabled since distribution config currently isn't used
@@ -2151,7 +2151,7 @@ TEST_F(BucketDBUpdaterTest, outdatedNodeSetClearedAfterSuccessfulStateCompletion
// distribution config will follow very shortly after the config has been
// applied to the node. The new cluster state will then send out requests to
// the correct node set.
-TEST_F(BucketDBUpdaterTest, DISABLED_clusterConfigDownsizeOnlySendsToAvailableNodes) {
+TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"),
expectedMsgs, dummyBucketsToReturn));
@@ -2166,7 +2166,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_clusterConfigDownsizeOnlySendsToAvailableNo
EXPECT_EQ((nodeVec{0, 1, 2}), getSendSet());
}
-TEST_F(BucketDBUpdaterTest, changedDiskSetTriggersReFetch) {
+TEST_F(BucketDBUpdaterTest, changed_disk_set_triggers_re_fetch) {
// Same number of online disks, but the set of disks has changed.
EXPECT_EQ(
getNodeList({1}),
@@ -2182,7 +2182,7 @@ TEST_F(BucketDBUpdaterTest, changedDiskSetTriggersReFetch) {
*
* See VESPA-790 for details.
*/
-TEST_F(BucketDBUpdaterTest, nodeMissingFromConfigIsTreatedAsNeedingOwnershipTransfer) {
+TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
uint32_t expectedMsgs = messageCount(3), dummyBucketsToReturn = 1;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"),
expectedMsgs, dummyBucketsToReturn));
@@ -2354,10 +2354,10 @@ TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format
const vespalib::string legacy_hash = "(0d3|3|*(0;0;1;2)(1;3;4;5))";
setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(messageCount(6), _sender.commands.size());
+ ASSERT_EQ(messageCount(6), _sender.commands().size());
api::RequestBucketInfoCommand* global_req = nullptr;
- for (auto& cmd : _sender.commands) {
+ for (auto& cmd : _sender.commands()) {
auto& req_cmd = dynamic_cast<api::RequestBucketInfoCommand&>(*cmd);
if (req_cmd.getBucketSpace() == document::FixedBucketSpaces::global_space()) {
global_req = &req_cmd;
@@ -2375,8 +2375,8 @@ TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format
getBucketDBUpdater().resendDelayedMessages();
// Should now be a resent request with legacy distribution hash
- ASSERT_EQ(messageCount(6) + 1, _sender.commands.size());
- auto& legacy_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands.back());
+ ASSERT_EQ(messageCount(6) + 1, _sender.commands().size());
+ auto& legacy_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
ASSERT_EQ(legacy_hash, legacy_req.getDistributionHash());
// Now if we reject it _again_ we should cycle back to the current hash
@@ -2388,8 +2388,8 @@ TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format
getClock().addSecondsToTime(10);
getBucketDBUpdater().resendDelayedMessages();
- ASSERT_EQ(messageCount(6) + 2, _sender.commands.size());
- auto& new_current_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands.back());
+ ASSERT_EQ(messageCount(6) + 2, _sender.commands().size());
+ auto& new_current_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
ASSERT_EQ(current_hash, new_current_req.getDistributionHash());
}
@@ -2420,7 +2420,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
- ASSERT_EQ(messageCount(4), _sender.commands.size());
+ ASSERT_EQ(messageCount(4), _sender.commands().size());
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(initial_state, messageCount(4), n_buckets));
_sender.clear();
@@ -2473,7 +2473,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_c
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
- ASSERT_EQ(messageCount(4), _sender.commands.size());
+ ASSERT_EQ(messageCount(4), _sender.commands().size());
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(initial_state, messageCount(4), n_buckets));
_sender.clear();
@@ -2500,14 +2500,14 @@ void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
getConfig().setAllowStaleReadsDuringClusterStateTransitions(true);
lib::ClusterState initial_state(initial_state_str);
setSystemState(initial_state);
- ASSERT_EQ(messageCount(initial_expected_msgs), _sender.commands.size());
+ ASSERT_EQ(messageCount(initial_expected_msgs), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
initial_state, messageCount(initial_expected_msgs), initial_buckets));
_sender.clear();
lib::ClusterState pending_state(pending_state_str); // Ownership change
set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true));
- ASSERT_EQ(messageCount(pending_expected_msgs), _sender.commands.size());
+ ASSERT_EQ(messageCount(pending_expected_msgs), _sender.commands().size());
ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
pending_state, messageCount(pending_expected_msgs), pending_buckets));
_sender.clear();
@@ -2581,7 +2581,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_trans
// Note: state manager is not modelled in this test, so we just check that the message handler returns
// false (meaning "didn't take message ownership") and there's no auto-generated reply.
EXPECT_FALSE(activate_cluster_state_version(3));
- EXPECT_EQ(size_t(0), _sender.replies.size());
+ EXPECT_EQ(size_t(0), _sender.replies().size());
}
TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
@@ -2597,10 +2597,10 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
constexpr uint32_t sub_buckets = 14;
constexpr uint32_t n_buckets = superbuckets * sub_buckets;
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands.size());
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- ASSERT_EQ(_sender.commands[bsi]->getType(), MessageType::REQUESTBUCKETINFO);
- const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.commands[bsi]);
+ ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
+ const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(bsi));
auto sreply = std::make_shared<RequestBucketInfoReply>(req);
sreply->setAddress(storageAddress(0));
@@ -2643,10 +2643,10 @@ uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_ben
constexpr uint32_t sub_buckets = 14;
constexpr uint32_t n_buckets = superbuckets * sub_buckets;
- assert(_bucketSpaces.size() == _sender.commands.size());
+ assert(_bucketSpaces.size() == _sender.commands().size());
for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- assert(_sender.commands[bsi]->getType() == MessageType::REQUESTBUCKETINFO);
- const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.commands[bsi]);
+ assert(_sender.command(bsi)->getType() == MessageType::REQUESTBUCKETINFO);
+ const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(bsi));
auto sreply = std::make_shared<RequestBucketInfoReply>(req);
sreply->setAddress(storageAddress(0));
diff --git a/storage/src/tests/distributor/bucketgctimecalculatortest.cpp b/storage/src/tests/distributor/bucketgctimecalculatortest.cpp
index 2d2c2e48e1a..d9d5c498735 100644
--- a/storage/src/tests/distributor/bucketgctimecalculatortest.cpp
+++ b/storage/src/tests/distributor/bucketgctimecalculatortest.cpp
@@ -1,11 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <chrono>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/distributor/bucketgctimecalculator.h>
+#include <vespa/vespalib/gtest/gtest.h>
-namespace storage {
-namespace distributor {
+using namespace ::testing;
+
+namespace storage::distributor {
struct MockBucketIdHasher : public BucketGcTimeCalculator::BucketIdHasher
{
@@ -16,25 +17,9 @@ struct MockBucketIdHasher : public BucketGcTimeCalculator::BucketIdHasher
}
};
-struct BucketGcTimeCalculatorTest : public CppUnit::TestFixture
-{
- void noGcIfAlreadyCheckedAfterStartPoint();
- void gcIfNotRunInCurrentPeriodAndCheckPeriodPassed();
- void noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed();
- void noGcIfCheckIntervalIsZero();
- void identityHasherReturnsBucketId();
-
+struct BucketGcTimeCalculatorTest : Test {
BucketGcTimeCalculatorTest();
- CPPUNIT_TEST_SUITE(BucketGcTimeCalculatorTest);
- CPPUNIT_TEST(noGcIfAlreadyCheckedAfterStartPoint);
- CPPUNIT_TEST(gcIfNotRunInCurrentPeriodAndCheckPeriodPassed);
- CPPUNIT_TEST(noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed);
- CPPUNIT_TEST(noGcIfCheckIntervalIsZero);
- CPPUNIT_TEST(identityHasherReturnsBucketId);
- CPPUNIT_TEST_SUITE_END();
-
-private:
// Ease of reading aliases
using CurrentTime = std::chrono::seconds;
using LastRunAt = std::chrono::seconds;
@@ -53,61 +38,47 @@ BucketGcTimeCalculatorTest::BucketGcTimeCalculatorTest()
hasher.nextGeneratedHash = 500;
}
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketGcTimeCalculatorTest);
-
-void
-BucketGcTimeCalculatorTest::noGcIfAlreadyCheckedAfterStartPoint()
-{
+TEST_F(BucketGcTimeCalculatorTest, no_gc_if_already_checked_after_start_point) {
// Note: LastRun(0) is considered to be within the current period.
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(0), LastRunAt(0)));
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(499), LastRunAt(0)));
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(999), LastRunAt(500)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(0), LastRunAt(0)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(499), LastRunAt(0)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(999), LastRunAt(500)));
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1000), LastRunAt(1000)));
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1234), LastRunAt(1100)));
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1600), LastRunAt(1500)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(1000), LastRunAt(1000)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(1234), LastRunAt(1100)));
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(1600), LastRunAt(1500)));
}
-void
-BucketGcTimeCalculatorTest::gcIfNotRunInCurrentPeriodAndCheckPeriodPassed()
-{
- CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(500), LastRunAt(0)));
- CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(1600), LastRunAt(500)));
+TEST_F(BucketGcTimeCalculatorTest, gc_if_not_run_in_current_period_and_check_period_passed) {
+ EXPECT_TRUE(calc.shouldGc(b, CurrentTime(500), LastRunAt(0)));
+ EXPECT_TRUE(calc.shouldGc(b, CurrentTime(1600), LastRunAt(500)));
// Note: this may look wrong, but is correct since GC should have been
// scheduled _after_ 1499 so this is most likely the case where a bucket
// has been added to the database at this point in time. Not treating
// this as a valid GC scenario would mean newly added buckets would have to
// wait until the next period to be considered. If the period is long and
// the system is unstable (causing many bucket handoffs), we'd risk not
- // being able to scheduled many buckets at all.
- CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(1600), LastRunAt(1499)));
+ // being able to schedule many buckets at all.
+ EXPECT_TRUE(calc.shouldGc(b, CurrentTime(1600), LastRunAt(1499)));
- CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(2000), LastRunAt(500)));
- CPPUNIT_ASSERT(calc.shouldGc(b, CurrentTime(2600), LastRunAt(1500)));
+ EXPECT_TRUE(calc.shouldGc(b, CurrentTime(2000), LastRunAt(500)));
+ EXPECT_TRUE(calc.shouldGc(b, CurrentTime(2600), LastRunAt(1500)));
}
-void
-BucketGcTimeCalculatorTest::noGcIfNotRunInCurrentPeriodAndCheckPeriodNotPassed()
-{
- CPPUNIT_ASSERT(!calc.shouldGc(b, CurrentTime(1000), LastRunAt(500)));
+TEST_F(BucketGcTimeCalculatorTest, no_gc_if_not_run_in_current_period_and_check_period_not_passed) {
+ EXPECT_FALSE(calc.shouldGc(b, CurrentTime(1000), LastRunAt(500)));
}
-void
-BucketGcTimeCalculatorTest::noGcIfCheckIntervalIsZero()
-{
+TEST_F(BucketGcTimeCalculatorTest, no_gc_if_check_interval_is_zero) {
BucketGcTimeCalculator calc2(hasher, std::chrono::seconds(0));
- CPPUNIT_ASSERT(!calc2.shouldGc(b, CurrentTime(5000), LastRunAt(0)));
+ EXPECT_FALSE(calc2.shouldGc(b, CurrentTime(5000), LastRunAt(0)));
}
-void
-BucketGcTimeCalculatorTest::identityHasherReturnsBucketId()
-{
+TEST_F(BucketGcTimeCalculatorTest, identity_hasher_returns_bucket_id) {
BucketGcTimeCalculator::BucketIdIdentityHasher hasher2;
document::BucketId bucket(36, 1234);
- CPPUNIT_ASSERT_EQUAL(bucket.getId(), static_cast<uint64_t>(hasher2.hash(bucket)));
+ EXPECT_EQ(bucket.getId(), static_cast<uint64_t>(hasher2.hash(bucket)));
}
-} // distributor
-} // storage
-
+} // storage::distributor
diff --git a/storage/src/tests/distributor/bucketstateoperationtest.cpp b/storage/src/tests/distributor/bucketstateoperationtest.cpp
index 216a051be15..c62d0a62ed3 100644
--- a/storage/src/tests/distributor/bucketstateoperationtest.cpp
+++ b/storage/src/tests/distributor/bucketstateoperationtest.cpp
@@ -1,64 +1,38 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-
-namespace distributor {
-
-class BucketStateOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(BucketStateOperationTest);
- CPPUNIT_TEST(testActiveStateSupportedInBucketDb);
- CPPUNIT_TEST(testActivateSingleNode);
- CPPUNIT_TEST(testActivateAndDeactivateNodes);
- CPPUNIT_TEST(testDoNotDeactivateIfActivateFails);
- CPPUNIT_TEST(testBucketDbNotUpdatedOnFailure);
- CPPUNIT_TEST_SUITE_END();
-
-private:
- void testActiveStateSupportedInBucketDb();
- void testActivateSingleNode();
- void testActivateAndDeactivateNodes();
- void testDoNotDeactivateIfActivateFails();
- void testBucketDbNotUpdatedOnFailure();
-
-public:
- void setUp() override {
+namespace storage::distributor {
+
+struct BucketStateOperationTest : Test, DistributorTestUtil {
+ void SetUp() override {
createLinks();
}
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketStateOperationTest);
-
-void
-BucketStateOperationTest::testActiveStateSupportedInBucketDb()
-{
+TEST_F(BucketStateOperationTest, active_state_supported_in_bucket_db) {
document::BucketId bid(16, 1);
insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, true);
BucketDatabase::Entry entry = getBucket(bid);
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT(entry->getNode(0)->active());
- CPPUNIT_ASSERT_EQUAL(
- std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
- "trusted=true,active=true,ready=false)"),
- entry->getNode(0)->toString());
+ ASSERT_TRUE(entry.valid());
+ EXPECT_TRUE(entry->getNode(0)->active());
+ EXPECT_EQ("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=true,ready=false)",
+ entry->getNode(0)->toString());
}
-void
-BucketStateOperationTest::testActivateSingleNode()
-{
+TEST_F(BucketStateOperationTest, activate_single_node) {
document::BucketId bid(16, 1);
insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, false);
@@ -70,35 +44,31 @@ BucketStateOperationTest::testActivateSingleNode()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 0).toString(),
- msg->getAddress()->toString());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SETBUCKETSTATE);
+ EXPECT_EQ(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
- const api::SetBucketStateCommand& cmd(
- dynamic_cast<const api::SetBucketStateCommand&>(*msg));
- CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
- CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+ auto& cmd = dynamic_cast<const api::SetBucketStateCommand&>(*msg);
+ EXPECT_EQ(bid, cmd.getBucketId());
+ EXPECT_EQ(api::SetBucketStateCommand::ACTIVE, cmd.getState());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
op.receive(_sender, reply);
BucketDatabase::Entry entry = getBucket(bid);
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT(entry->getNodeRef(0).active());
+ ASSERT_TRUE(entry.valid());
+ EXPECT_TRUE(entry->getNodeRef(0).active());
- CPPUNIT_ASSERT(op.ok());
+ EXPECT_TRUE(op.ok());
// TODO: check that it's done
}
-void
-BucketStateOperationTest::testActivateAndDeactivateNodes()
-{
+TEST_F(BucketStateOperationTest, activate_and_deactivate_nodes) {
document::BucketId bid(16, 1);
insertBucketInfo(bid, 0, 0xabc, 10, 1100, false, true);
insertBucketInfo(bid, 1, 0xdef, 15, 1500, false, false);
@@ -111,59 +81,51 @@ BucketStateOperationTest::testActivateAndDeactivateNodes()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
{
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 1).toString(),
- msg->getAddress()->toString());
-
- const api::SetBucketStateCommand& cmd(
- dynamic_cast<const api::SetBucketStateCommand&>(*msg));
- CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
- CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SETBUCKETSTATE);
+ EXPECT_EQ(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1).toString(),
+ msg->getAddress()->toString());
+
+ auto& cmd = dynamic_cast<const api::SetBucketStateCommand&>(*msg);
+ EXPECT_EQ(bid, cmd.getBucketId());
+ EXPECT_EQ(api::SetBucketStateCommand::ACTIVE, cmd.getState());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
op.receive(_sender, reply);
}
- CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+ ASSERT_EQ(2, _sender.commands().size());
{
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[1];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 0).toString(),
- msg->getAddress()->toString());
-
- const api::SetBucketStateCommand& cmd(
- dynamic_cast<const api::SetBucketStateCommand&>(*msg));
- CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
- CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::INACTIVE, cmd.getState());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(1);
+ ASSERT_EQ(msg->getType(), api::MessageType::SETBUCKETSTATE);
+ EXPECT_EQ(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ auto& cmd = dynamic_cast<const api::SetBucketStateCommand&>(*msg);
+ EXPECT_EQ(bid, cmd.getBucketId());
+ EXPECT_EQ(api::SetBucketStateCommand::INACTIVE, cmd.getState());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
op.receive(_sender, reply);
}
BucketDatabase::Entry entry = getBucket(bid);
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL(
- std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
- "trusted=true,active=false,ready=false)"),
- entry->getNodeRef(0).toString());
- CPPUNIT_ASSERT_EQUAL(
- std::string("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
- "trusted=false,active=true,ready=false)"),
- entry->getNodeRef(1).toString());
-
- CPPUNIT_ASSERT(op.ok());
+ ASSERT_TRUE(entry.valid());
+ EXPECT_EQ("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=false,ready=false)",
+ entry->getNodeRef(0).toString());
+ EXPECT_EQ("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
+ "trusted=false,active=true,ready=false)",
+ entry->getNodeRef(1).toString());
+
+ EXPECT_TRUE(op.ok());
}
-void
-BucketStateOperationTest::testDoNotDeactivateIfActivateFails()
-{
+TEST_F(BucketStateOperationTest, do_not_deactivate_if_activate_fails) {
document::BucketId bid(16, 1);
insertBucketInfo(bid, 0, 0xabc, 10, 1100, false, true);
insertBucketInfo(bid, 1, 0xdef, 15, 1500, false, false);
@@ -176,44 +138,38 @@ BucketStateOperationTest::testDoNotDeactivateIfActivateFails()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
{
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 1).toString(),
- msg->getAddress()->toString());
-
- const api::SetBucketStateCommand& cmd(
- dynamic_cast<const api::SetBucketStateCommand&>(*msg));
- CPPUNIT_ASSERT_EQUAL(bid, cmd.getBucketId());
- CPPUNIT_ASSERT_EQUAL(api::SetBucketStateCommand::ACTIVE, cmd.getState());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SETBUCKETSTATE);
+ EXPECT_EQ(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 1).toString(),
+ msg->getAddress()->toString());
+
+ auto& cmd = dynamic_cast<const api::SetBucketStateCommand&>(*msg);
+ EXPECT_EQ(bid, cmd.getBucketId());
+ EXPECT_EQ(api::SetBucketStateCommand::ACTIVE, cmd.getState());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "aaarg!"));
op.receive(_sender, reply);
}
- CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
BucketDatabase::Entry entry = getBucket(bid);
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL(
- std::string("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
- "trusted=true,active=true,ready=false)"),
- entry->getNodeRef(0).toString());
- CPPUNIT_ASSERT_EQUAL(
- std::string("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
- "trusted=false,active=false,ready=false)"),
- entry->getNodeRef(1).toString());
-
- CPPUNIT_ASSERT(!op.ok());
+ ASSERT_TRUE(entry.valid());
+ EXPECT_EQ("node(idx=0,crc=0xabc,docs=10/10,bytes=1100/1100,"
+ "trusted=true,active=true,ready=false)",
+ entry->getNodeRef(0).toString());
+ EXPECT_EQ("node(idx=1,crc=0xdef,docs=15/15,bytes=1500/1500,"
+ "trusted=false,active=false,ready=false)",
+ entry->getNodeRef(1).toString());
+
+ EXPECT_FALSE(op.ok());
}
-void
-BucketStateOperationTest::testBucketDbNotUpdatedOnFailure()
-{
+TEST_F(BucketStateOperationTest, bucket_db_not_updated_on_failure) {
document::BucketId bid(16, 1);
insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, false);
@@ -225,27 +181,24 @@ BucketStateOperationTest::testBucketDbNotUpdatedOnFailure()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL((size_t)1, _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SETBUCKETSTATE);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 0).toString(),
- msg->getAddress()->toString());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SETBUCKETSTATE);
+ EXPECT_EQ(api::StorageMessageAddress(
+ "storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "aaarg!"));
op.receive(_sender, reply);
BucketDatabase::Entry entry = getBucket(bid);
- CPPUNIT_ASSERT(entry.valid());
+ ASSERT_TRUE(entry.valid());
// Should not be updated
- CPPUNIT_ASSERT(!entry->getNodeRef(0).active());
+ EXPECT_FALSE(entry->getNodeRef(0).active());
- CPPUNIT_ASSERT(!op.ok());
+ EXPECT_FALSE(op.ok());
}
-} // namespace distributor
-
-} // namespace storage
+} // namespace storage::distributor
diff --git a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
index c2e8367fa30..e1010285dba 100644
--- a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
+++ b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
@@ -1,14 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/storage/distributor/bucket_spaces_stats_provider.h>
#include <vespa/storage/distributor/distributor_host_info_reporter.h>
#include <vespa/storage/distributor/min_replica_provider.h>
+#include <tests/common/hostreporter/util.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <tests/common/hostreporter/util.h>
#include <vespa/vespalib/stllike/asciistream.h>
-#include <vespa/storage/distributor/bucket_spaces_stats_provider.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage::distributor {
@@ -16,17 +15,9 @@ using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesS
using End = vespalib::JsonStream::End;
using File = vespalib::File;
using Object = vespalib::JsonStream::Object;
+using namespace ::testing;
-class DistributorHostInfoReporterTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE(DistributorHostInfoReporterTest);
- CPPUNIT_TEST(min_replica_stats_are_reported);
- CPPUNIT_TEST(generate_example_json);
- CPPUNIT_TEST(no_report_generated_if_disabled);
- CPPUNIT_TEST(bucket_spaces_stats_are_reported);
- CPPUNIT_TEST_SUITE_END();
-
- void min_replica_stats_are_reported();
+struct DistributorHostInfoReporterTest : Test {
void verifyBucketSpaceStats(const vespalib::Slime& root,
uint16_t nodeIndex,
const vespalib::string& bucketSpaceName,
@@ -35,13 +26,8 @@ class DistributorHostInfoReporterTest : public CppUnit::TestFixture
void verifyBucketSpaceStats(const vespalib::Slime& root,
uint16_t nodeIndex,
const vespalib::string& bucketSpaceName);
- void generate_example_json();
- void no_report_generated_if_disabled();
- void bucket_spaces_stats_are_reported();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(DistributorHostInfoReporterTest);
-
using ms = std::chrono::milliseconds;
namespace {
@@ -107,8 +93,8 @@ DistributorHostInfoReporterTest::verifyBucketSpaceStats(const vespalib::Slime& r
{
const auto &stats = getBucketSpaceStats(root, nodeIndex, bucketSpaceName);
const auto &buckets = stats["buckets"];
- CPPUNIT_ASSERT_EQUAL(bucketsTotal, static_cast<size_t>(buckets["total"].asLong()));
- CPPUNIT_ASSERT_EQUAL(bucketsPending, static_cast<size_t>(buckets["pending"].asLong()));
+ EXPECT_EQ(bucketsTotal, static_cast<size_t>(buckets["total"].asLong()));
+ EXPECT_EQ(bucketsPending, static_cast<size_t>(buckets["pending"].asLong()));
}
void
@@ -117,7 +103,7 @@ DistributorHostInfoReporterTest::verifyBucketSpaceStats(const vespalib::Slime& r
const vespalib::string& bucketSpaceName)
{
const auto &stats = getBucketSpaceStats(root, nodeIndex, bucketSpaceName);
- CPPUNIT_ASSERT(!stats["buckets"].valid());
+ EXPECT_FALSE(stats["buckets"].valid());
}
struct Fixture {
@@ -129,12 +115,10 @@ struct Fixture {
bucketSpacesStatsProvider(),
reporter(minReplicaProvider, bucketSpacesStatsProvider)
{}
- ~Fixture() {}
+ ~Fixture() = default;
};
-void
-DistributorHostInfoReporterTest::min_replica_stats_are_reported()
-{
+TEST_F(DistributorHostInfoReporterTest, min_replica_stats_are_reported) {
Fixture f;
std::unordered_map<uint16_t, uint32_t> minReplica;
@@ -145,13 +129,11 @@ DistributorHostInfoReporterTest::min_replica_stats_are_reported()
vespalib::Slime root;
util::reporterToSlime(f.reporter, root);
- CPPUNIT_ASSERT_EQUAL(2, getMinReplica(root, 0));
- CPPUNIT_ASSERT_EQUAL(9, getMinReplica(root, 5));
+ EXPECT_EQ(2, getMinReplica(root, 0));
+ EXPECT_EQ(9, getMinReplica(root, 5));
}
-void
-DistributorHostInfoReporterTest::generate_example_json()
-{
+TEST_F(DistributorHostInfoReporterTest, generate_example_json) {
Fixture f;
std::unordered_map<uint16_t, uint32_t> minReplica;
@@ -175,7 +157,7 @@ DistributorHostInfoReporterTest::generate_example_json()
std::string jsonString = json.str();
- std::string path = TEST_PATH("../../../protocols/getnodestate/distributor.json");
+ std::string path = "../../../../protocols/getnodestate/distributor.json";
std::string goldenString = File::readAll(path);
vespalib::Memory goldenMemory(goldenString);
@@ -186,12 +168,10 @@ DistributorHostInfoReporterTest::generate_example_json()
vespalib::Slime jsonSlime;
vespalib::slime::JsonFormat::decode(jsonMemory, jsonSlime);
- CPPUNIT_ASSERT_EQUAL(goldenSlime, jsonSlime);
+ EXPECT_EQ(goldenSlime, jsonSlime);
}
-void
-DistributorHostInfoReporterTest::no_report_generated_if_disabled()
-{
+TEST_F(DistributorHostInfoReporterTest, no_report_generated_if_disabled) {
Fixture f;
f.reporter.enableReporting(false);
@@ -202,12 +182,10 @@ DistributorHostInfoReporterTest::no_report_generated_if_disabled()
vespalib::Slime root;
util::reporterToSlime(f.reporter, root);
- CPPUNIT_ASSERT_EQUAL(size_t(0), root.get().children());
+ EXPECT_EQ(0, root.get().children());
}
-void
-DistributorHostInfoReporterTest::bucket_spaces_stats_are_reported()
-{
+TEST_F(DistributorHostInfoReporterTest, bucket_spaces_stats_are_reported) {
Fixture f;
PerNodeBucketSpacesStats stats;
stats[1]["default"] = BucketSpaceStats(11, 3);
@@ -226,9 +204,9 @@ DistributorHostInfoReporterTest::bucket_spaces_stats_are_reported()
verifyBucketSpaceStats(root, 3, "default", 19, 11);
try {
verifyBucketSpaceStats(root, 3, "global");
- CPPUNIT_ASSERT(false);
- } catch (const std::runtime_error &ex) {
- CPPUNIT_ASSERT("No bucket space found with name global" == vespalib::string(ex.what()));
+ FAIL() << "No exception thrown";
+ } catch (const std::runtime_error& ex) {
+ EXPECT_EQ("No bucket space found with name global", vespalib::string(ex.what()));
}
}
diff --git a/storage/src/tests/distributor/distributor_message_sender_stub.cpp b/storage/src/tests/distributor/distributor_message_sender_stub.cpp
new file mode 100644
index 00000000000..df894f1bb2c
--- /dev/null
+++ b/storage/src/tests/distributor/distributor_message_sender_stub.cpp
@@ -0,0 +1,20 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "distributor_message_sender_stub.h"
+#include <vespa/storageapi/messageapi/storagecommand.h>
+#include <vespa/storageapi/messageapi/storagereply.h>
+#include <string>
+#include <sstream>
+#include <stdexcept>
+
+namespace storage {
+
+DistributorMessageSenderStub::DistributorMessageSenderStub()
+ : _stub_impl(),
+ _cluster_name("storage"),
+ _pending_message_tracker(nullptr)
+{}
+
+DistributorMessageSenderStub::~DistributorMessageSenderStub() = default;
+
+}
diff --git a/storage/src/tests/distributor/distributor_message_sender_stub.h b/storage/src/tests/distributor/distributor_message_sender_stub.h
new file mode 100644
index 00000000000..7ebd4dee1ae
--- /dev/null
+++ b/storage/src/tests/distributor/distributor_message_sender_stub.h
@@ -0,0 +1,98 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/storage/distributor/distributormessagesender.h>
+#include <tests/common/message_sender_stub.h>
+#include <cassert>
+
+namespace storage {
+
+class DistributorMessageSenderStub : public distributor::DistributorMessageSender {
+ MessageSenderStub _stub_impl;
+ std::string _cluster_name;
+ distributor::PendingMessageTracker* _pending_message_tracker;
+public:
+
+ DistributorMessageSenderStub();
+ ~DistributorMessageSenderStub() override;
+
+ std::vector<std::shared_ptr<api::StorageCommand>>& commands() noexcept {
+ return _stub_impl.commands;
+ }
+ std::vector<std::shared_ptr<api::StorageReply>>& replies() noexcept {
+ return _stub_impl.replies;
+ }
+ const std::vector<std::shared_ptr<api::StorageCommand>>& commands() const noexcept {
+ return _stub_impl.commands;
+ }
+ const std::vector<std::shared_ptr<api::StorageReply>>& replies() const noexcept {
+ return _stub_impl.replies;
+ };
+
+ const std::shared_ptr<api::StorageCommand>& command(size_t idx) noexcept {
+ assert(idx < commands().size());
+ return commands()[idx];
+ }
+
+ const std::shared_ptr<api::StorageReply>& reply(size_t idx) noexcept {
+ assert(idx < replies().size());
+ return replies()[idx];
+ }
+
+ void clear() {
+ _stub_impl.clear();
+ }
+
+ void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd) override {
+ _stub_impl.sendCommand(cmd);
+ }
+
+ void sendReply(const std::shared_ptr<api::StorageReply>& reply) override {
+ _stub_impl.sendReply(reply);
+ }
+
+ std::string getLastCommand(bool verbose = true) const {
+ return _stub_impl.getLastCommand(verbose);
+ }
+
+ std::string getCommands(bool includeAddress = false,
+ bool verbose = false,
+ uint32_t fromIndex = 0) const {
+ return _stub_impl.getCommands(includeAddress, verbose, fromIndex);
+ }
+
+ std::string getLastReply(bool verbose = true) const {
+ return _stub_impl.getLastReply(verbose);
+ }
+
+ std::string getReplies(bool includeAddress = false,
+ bool verbose = false) const {
+ return _stub_impl.getReplies(includeAddress, verbose);
+ }
+
+ std::string dumpMessage(const api::StorageMessage& msg,
+ bool includeAddress,
+ bool verbose) const {
+ return _stub_impl.dumpMessage(msg, includeAddress, verbose);
+ }
+
+ int getDistributorIndex() const override {
+ return 0;
+ }
+
+ const std::string& getClusterName() const override {
+ return _cluster_name;
+ }
+
+ const distributor::PendingMessageTracker& getPendingMessageTracker() const override {
+ assert(_pending_message_tracker);
+ return *_pending_message_tracker;
+ }
+
+ void setPendingMessageTracker(distributor::PendingMessageTracker& tracker) {
+ _pending_message_tracker = &tracker;
+ }
+};
+
+}
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/distributortest.cpp
index c519ef0713b..2710ed67717 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/distributortest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/distributor/idealstatemetricsset.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/bucketsplitting.h>
@@ -15,6 +14,8 @@
#include <tests/common/dummystoragelink.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
using document::test::makeDocumentBucket;
using document::test::makeBucketSpace;
@@ -22,87 +23,13 @@ using document::FixedBucketSpaces;
using document::BucketSpace;
using document::Bucket;
using document::BucketId;
+using namespace ::testing;
-namespace storage {
+namespace storage::distributor {
-namespace distributor {
+struct DistributorTest : Test, DistributorTestUtil {
+ DistributorTest();
-class Distributor_Test : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(Distributor_Test);
- CPPUNIT_TEST(testOperationGeneration);
- CPPUNIT_TEST(testOperationsGeneratedAndStartedWithoutDuplicates);
- CPPUNIT_TEST(testRecoveryModeOnClusterStateChange);
- CPPUNIT_TEST(testOperationsAreThrottled);
- CPPUNIT_TEST_IGNORED(testRecoveryModeEntryResetsScanner);
- CPPUNIT_TEST_IGNORED(testReprioritizeBucketOnMaintenanceReply);
- CPPUNIT_TEST(testHandleUnknownMaintenanceReply);
- CPPUNIT_TEST(testContainsTimeStatement);
- CPPUNIT_TEST(testUpdateBucketDatabase);
- CPPUNIT_TEST(testTickProcessesStatusRequests);
- CPPUNIT_TEST(testMetricUpdateHookUpdatesPendingMaintenanceMetrics);
- CPPUNIT_TEST(testPriorityConfigIsPropagatedToDistributorConfiguration);
- CPPUNIT_TEST(testNoDbResurrectionForBucketNotOwnedInPendingState);
- CPPUNIT_TEST(testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime);
- CPPUNIT_TEST(mergeStatsAreAccumulatedDuringDatabaseIteration);
- CPPUNIT_TEST(statsGeneratedForPreemptedOperations);
- CPPUNIT_TEST(hostInfoReporterConfigIsPropagatedToReporter);
- CPPUNIT_TEST(replicaCountingModeIsConfiguredToTrustedByDefault);
- CPPUNIT_TEST(replicaCountingModeConfigIsPropagatedToMetricUpdater);
- CPPUNIT_TEST(bucketActivationIsEnabledByDefault);
- CPPUNIT_TEST(bucketActivationConfigIsPropagatedToDistributorConfiguration);
- CPPUNIT_TEST(max_clock_skew_config_is_propagated_to_distributor_config);
- CPPUNIT_TEST(configured_safe_time_point_rejection_works_end_to_end);
- CPPUNIT_TEST(sequencing_config_is_propagated_to_distributor_config);
- CPPUNIT_TEST(merge_busy_inhibit_duration_config_is_propagated_to_distributor_config);
- CPPUNIT_TEST(merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker);
- CPPUNIT_TEST(external_client_requests_are_handled_individually_in_priority_order);
- CPPUNIT_TEST(internal_messages_are_started_in_fifo_order_batch);
- CPPUNIT_TEST(closing_aborts_priority_queued_client_requests);
- CPPUNIT_TEST(entering_recovery_mode_resets_bucket_space_stats);
- CPPUNIT_TEST(leaving_recovery_mode_immediately_sends_getnodestate_replies);
- CPPUNIT_TEST(pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies);
- CPPUNIT_TEST(pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies);
- CPPUNIT_TEST_SUITE_END();
-
-public:
- Distributor_Test();
-
-protected:
- void testOperationGeneration();
- void testOperationsGeneratedAndStartedWithoutDuplicates();
- void testRecoveryModeOnClusterStateChange();
- void testOperationsAreThrottled();
- void testRecoveryModeEntryResetsScanner();
- void testReprioritizeBucketOnMaintenanceReply();
- void testHandleUnknownMaintenanceReply();
- void testContainsTimeStatement();
- void testUpdateBucketDatabase();
- void testTickProcessesStatusRequests();
- void testMetricUpdateHookUpdatesPendingMaintenanceMetrics();
- void testPriorityConfigIsPropagatedToDistributorConfiguration();
- void testNoDbResurrectionForBucketNotOwnedInPendingState();
- void testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime();
- void mergeStatsAreAccumulatedDuringDatabaseIteration();
- void statsGeneratedForPreemptedOperations();
- void hostInfoReporterConfigIsPropagatedToReporter();
- void replicaCountingModeIsConfiguredToTrustedByDefault();
- void replicaCountingModeConfigIsPropagatedToMetricUpdater();
- void bucketActivationIsEnabledByDefault();
- void bucketActivationConfigIsPropagatedToDistributorConfiguration();
- void max_clock_skew_config_is_propagated_to_distributor_config();
- void configured_safe_time_point_rejection_works_end_to_end();
- void sequencing_config_is_propagated_to_distributor_config();
- void merge_busy_inhibit_duration_config_is_propagated_to_distributor_config();
- void merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker();
- void external_client_requests_are_handled_individually_in_priority_order();
- void internal_messages_are_started_in_fifo_order_batch();
- void closing_aborts_priority_queued_client_requests();
- void entering_recovery_mode_resets_bucket_space_stats();
- void leaving_recovery_mode_immediately_sends_getnodestate_replies();
- void pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies();
- void pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies();
// TODO handle edge case for window between getnodestate reply already
// sent and new request not yet received
@@ -110,17 +37,15 @@ protected:
const BucketSpacesStatsProvider::PerNodeBucketSpacesStats &stats);
std::vector<document::BucketSpace> _bucketSpaces;
-public:
- void setUp() override {
+ void SetUp() override {
createLinks();
_bucketSpaces = getBucketSpaces();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
-private:
// Simple type aliases to make interfacing with certain utility functions
// easier. Note that this is only for readability and does not provide any
// added type safety.
@@ -139,10 +64,9 @@ private:
.getMinimumReplicaCountingMode();
}
- std::string testOp(api::StorageMessage* msg)
+ std::string testOp(std::shared_ptr<api::StorageMessage> msg)
{
- api::StorageMessage::SP msgPtr(msg);
- _distributor->handleMessage(msgPtr);
+ _distributor->handleMessage(msg);
std::string tmp = _sender.getCommands();
_sender.clear();
@@ -211,6 +135,38 @@ private:
return _node->getNodeStateUpdater().explicit_node_state_reply_send_invocations();
}
+ StatusReporterDelegate& distributor_status_delegate() {
+ return _distributor->_distributorStatusDelegate;
+ }
+
+ framework::TickingThreadPool& distributor_thread_pool() {
+ return _distributor->_threadPool;
+ }
+
+ const std::vector<std::shared_ptr<Distributor::Status>>& distributor_status_todos() {
+ return _distributor->_statusToDo;
+ }
+
+ Distributor::MetricUpdateHook distributor_metric_update_hook() {
+ return _distributor->_metricUpdateHook;
+ }
+
+ SimpleMaintenanceScanner::PendingMaintenanceStats& distributor_maintenance_stats() {
+ return _distributor->_maintenanceStats;
+ }
+
+ BucketSpacesStatsProvider::PerNodeBucketSpacesStats distributor_bucket_spaces_stats() {
+ return _distributor->getBucketSpacesStats();
+ }
+
+ DistributorHostInfoReporter& distributor_host_info_reporter() {
+ return _distributor->_hostInfoReporter;
+ }
+
+ bool distributor_handle_message(const std::shared_ptr<api::StorageMessage>& msg) {
+ return _distributor->handleMessage(msg);
+ }
+
void configureMaxClusterClockSkew(int seconds);
void sendDownClusterStateCommand();
void replyToSingleRequestBucketInfoCommandWith1Bucket();
@@ -222,39 +178,32 @@ private:
void do_test_pending_merge_getnodestate_reply_edge(BucketSpace space);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(Distributor_Test);
-
-Distributor_Test::Distributor_Test()
- : CppUnit::TestFixture(),
- DistributorTestUtil(),
- _bucketSpaces()
+DistributorTest::DistributorTest()
+ : Test(),
+ DistributorTestUtil(),
+ _bucketSpaces()
{
}
-void
-Distributor_Test::testOperationGeneration()
-{
+TEST_F(DistributorTest, operation_generation) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bid;
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
- CPPUNIT_ASSERT_EQUAL(std::string("Remove"),
- testOp(new api::RemoveCommand(
- makeDocumentBucket(bid),
- document::DocumentId("userdoc:m:1:foo"),
- api::Timestamp(1234))));
+ EXPECT_EQ("Remove", testOp(std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bid),
+ document::DocumentId("userdoc:m:1:foo"),
+ api::Timestamp(1234))));
- api::CreateVisitorCommand* cmd = new api::CreateVisitorCommand(makeBucketSpace(), "foo", "bar", "");
+ auto cmd = std::make_shared<api::CreateVisitorCommand>(makeBucketSpace(), "foo", "bar", "");
cmd->addBucketToBeVisited(document::BucketId(16, 1));
cmd->addBucketToBeVisited(document::BucketId());
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create"), testOp(cmd));
+ EXPECT_EQ("Visitor Create", testOp(cmd));
}
-void
-Distributor_Test::testOperationsGeneratedAndStartedWithoutDuplicates()
-{
+TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
for (uint32_t i = 0; i < 6; ++i) {
@@ -263,36 +212,32 @@ Distributor_Test::testOperationsGeneratedAndStartedWithoutDuplicates()
tickDistributorNTimes(20);
- CPPUNIT_ASSERT(!tick());
+ ASSERT_FALSE(tick());
- CPPUNIT_ASSERT_EQUAL(6, (int)_sender.commands.size());
+ ASSERT_EQ(6, _sender.commands().size());
}
-void
-Distributor_Test::testRecoveryModeOnClusterStateChange()
-{
+TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
setupDistributor(Redundancy(1), NodeCount(2),
"storage:1 .0.s:d distributor:1");
enableDistributorClusterState("storage:1 distributor:1");
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
for (uint32_t i = 0; i < 3; ++i) {
addNodesToBucketDB(document::BucketId(16, i), "0=1");
}
for (int i = 0; i < 3; ++i) {
tick();
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
}
tick();
- CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+ EXPECT_FALSE(_distributor->isInRecoveryMode());
enableDistributorClusterState("storage:2 distributor:1");
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
}
-void
-Distributor_Test::testOperationsAreThrottled()
-{
+TEST_F(DistributorTest, operations_are_throttled) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
getConfig().setMinPendingMaintenanceOps(1);
getConfig().setMaxPendingMaintenanceOps(1);
@@ -301,32 +246,16 @@ Distributor_Test::testOperationsAreThrottled()
addNodesToBucketDB(document::BucketId(16, i), "0=1");
}
tickDistributorNTimes(20);
- CPPUNIT_ASSERT_EQUAL(1, (int)_sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
}
-void
-Distributor_Test::testRecoveryModeEntryResetsScanner()
-{
- CPPUNIT_FAIL("TODO: refactor so this can be mocked and tested easily");
-}
-
-void
-Distributor_Test::testReprioritizeBucketOnMaintenanceReply()
-{
- CPPUNIT_FAIL("TODO: refactor so this can be mocked and tested easily");
-}
-
-void
-Distributor_Test::testHandleUnknownMaintenanceReply()
-{
+TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
{
- api::SplitBucketCommand::SP cmd(
- new api::SplitBucketCommand(makeDocumentBucket(document::BucketId(16, 1234))));
- api::SplitBucketReply::SP reply(new api::SplitBucketReply(*cmd));
-
- CPPUNIT_ASSERT(_distributor->handleReply(reply));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1234)));
+ auto reply = std::make_shared<api::SplitBucketReply>(*cmd);
+ ASSERT_TRUE(_distributor->handleReply(reply));
}
{
@@ -335,94 +264,74 @@ Distributor_Test::testHandleUnknownMaintenanceReply()
auto cmd = std::make_shared<api::RemoveLocationCommand>(
"false", makeDocumentBucket(document::BucketId(30, 1234)));
auto reply = std::shared_ptr<api::StorageReply>(cmd->makeReply());
- CPPUNIT_ASSERT(_distributor->handleReply(reply));
+ ASSERT_TRUE(_distributor->handleReply(reply));
}
}
-void
-Distributor_Test::testContainsTimeStatement()
-{
+TEST_F(DistributorTest, contains_time_statement) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
- CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement(""));
- CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement("testdoctype1"));
- CPPUNIT_ASSERT_EQUAL(false, getConfig().containsTimeStatement("testdoctype1.headerfield > 42"));
- CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield > now()"));
- CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield > now() - 3600"));
- CPPUNIT_ASSERT_EQUAL(true, getConfig().containsTimeStatement("testdoctype1.headerfield == now() - 3600"));
+ EXPECT_FALSE(getConfig().containsTimeStatement(""));
+ EXPECT_FALSE(getConfig().containsTimeStatement("testdoctype1"));
+ EXPECT_FALSE(getConfig().containsTimeStatement("testdoctype1.headerfield > 42"));
+ EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield > now()"));
+ EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield > now() - 3600"));
+ EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield == now() - 3600"));
}
-void
-Distributor_Test::testUpdateBucketDatabase()
-{
+TEST_F(DistributorTest, update_bucket_database) {
enableDistributorClusterState("distributor:1 storage:3");
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)"
- ),
- updateBucketDB("0:456,1:456,2:789", "2:r"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)"
- ),
- updateBucketDB("0:456,1:456", "2:456"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x34a,docs=421/421,bytes=210/210,trusted=false,active=false,ready=false)"
- ),
- updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:842,2:333"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false)"
- ),
- updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:789,2:333"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=true,active=false,ready=false)"),
- updateBucketDB("0:456:t,1:456:t", "0:r,1:r,2:333"));
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
+ updateBucketDB("0:456,1:456,2:789", "2:r"));
+
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
+ "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
+ updateBucketDB("0:456,1:456", "2:456"));
+
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=false,active=false,ready=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x34a,docs=421/421,bytes=210/210,trusted=false,active=false,ready=false)",
+ updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:842,2:333"));
+
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false)",
+ updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:789,2:333"));
+
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=true,active=false,ready=false)",
+ updateBucketDB("0:456:t,1:456:t", "0:r,1:r,2:333"));
// Copies are in sync so should still be trusted even if explicitly reset.
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)"
- ),
- updateBucketDB("0:456,1:456", "2:456", ResetTrusted(true)));
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
+ "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
+ updateBucketDB("0:456,1:456", "2:456", ResetTrusted(true)));
// When resetting, first inserted copy should not end up as implicitly trusted.
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false)"
- ),
- updateBucketDB("0:456",
- "2:333",
- ResetTrusted(true)));
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=false,active=false,ready=false), "
+ "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false)",
+ updateBucketDB("0:456", "2:333", ResetTrusted(true)));
}
namespace {
using namespace framework::defaultimplementation;
-class StatusRequestThread : public framework::Runnable
-{
+class StatusRequestThread : public framework::Runnable {
StatusReporterDelegate& _reporter;
std::string _result;
public:
- StatusRequestThread(StatusReporterDelegate& reporter)
+ explicit StatusRequestThread(StatusReporterDelegate& reporter)
: _reporter(reporter)
{}
void run(framework::ThreadHandle&) override {
@@ -439,16 +348,14 @@ public:
}
-void
-Distributor_Test::testTickProcessesStatusRequests()
-{
+TEST_F(DistributorTest, tick_processes_status_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
// Must go via delegate since reportStatus is now just a rendering
// function and not a request enqueuer (see Distributor::handleStatusRequest).
- StatusRequestThread thread(_distributor->_distributorStatusDelegate);
+ StatusRequestThread thread(distributor_status_delegate());
FakeClock clock;
ThreadPoolImpl pool(clock);
@@ -461,20 +368,20 @@ Distributor_Test::testTickProcessesStatusRequests()
while (true) {
FastOS_Thread::Sleep(1);
framework::TickingLockGuard guard(
- _distributor->_threadPool.freezeCriticalTicks());
- if (!_distributor->_statusToDo.empty()) break;
+ distributor_thread_pool().freezeCriticalTicks());
+ if (!distributor_status_todos().empty()) {
+ break;
+ }
}
- CPPUNIT_ASSERT(tick());
+ ASSERT_TRUE(tick());
- tp->interruptAndJoin(0);
+ tp->interruptAndJoin(nullptr);
- CPPUNIT_ASSERT_CONTAIN("BucketId(0x4000000000000001)", thread.getResult());
+ EXPECT_THAT(thread.getResult(), HasSubstr("BucketId(0x4000000000000001)"));
}
-void
-Distributor_Test::testMetricUpdateHookUpdatesPendingMaintenanceMetrics()
-{
+TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// To ensure we count all operations, not just those fitting within the
// pending window.
@@ -494,53 +401,33 @@ Distributor_Test::testMetricUpdateHookUpdatesPendingMaintenanceMetrics()
// By this point, no hook has been called so the metrics have not been
// set.
- typedef MaintenanceOperation MO;
+ using MO = MaintenanceOperation;
{
const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
- CPPUNIT_ASSERT_EQUAL(int64_t(0),
- metrics.operations[MO::MERGE_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::SPLIT_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0),
- metrics.operations[MO::SET_BUCKET_STATE]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::DELETE_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::JOIN_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0),
- metrics.operations[MO::GARBAGE_COLLECTION]
- ->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::MERGE_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::SPLIT_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::SET_BUCKET_STATE]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::DELETE_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::JOIN_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::GARBAGE_COLLECTION]->pending.getLast());
}
// Force trigger update hook
vespalib::Monitor l;
- _distributor->_metricUpdateHook.updateMetrics(vespalib::MonitorGuard(l));
+ distributor_metric_update_hook().updateMetrics(vespalib::MonitorGuard(l));
// Metrics should now be updated to the last complete working state
{
const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
- CPPUNIT_ASSERT_EQUAL(int64_t(1),
- metrics.operations[MO::MERGE_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), metrics.operations[MO::SPLIT_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1),
- metrics.operations[MO::SET_BUCKET_STATE]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::DELETE_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.operations[MO::JOIN_BUCKET]
- ->pending.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0),
- metrics.operations[MO::GARBAGE_COLLECTION]
- ->pending.getLast());
+ EXPECT_EQ(1, metrics.operations[MO::MERGE_BUCKET]->pending.getLast());
+ EXPECT_EQ(1, metrics.operations[MO::SPLIT_BUCKET]->pending.getLast());
+ EXPECT_EQ(1, metrics.operations[MO::SET_BUCKET_STATE]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::DELETE_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::JOIN_BUCKET]->pending.getLast());
+ EXPECT_EQ(0, metrics.operations[MO::GARBAGE_COLLECTION]->pending.getLast());
}
}
-void
-Distributor_Test::testPriorityConfigIsPropagatedToDistributorConfiguration()
-{
+TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -560,24 +447,21 @@ Distributor_Test::testPriorityConfigIsPropagatedToDistributorConfiguration()
getConfig().configure(builder);
- const DistributorConfiguration::MaintenancePriorities& mp(
- getConfig().getMaintenancePriorities());
- CPPUNIT_ASSERT_EQUAL(1, static_cast<int>(mp.mergeMoveToIdealNode));
- CPPUNIT_ASSERT_EQUAL(2, static_cast<int>(mp.mergeOutOfSyncCopies));
- CPPUNIT_ASSERT_EQUAL(3, static_cast<int>(mp.mergeTooFewCopies));
- CPPUNIT_ASSERT_EQUAL(4, static_cast<int>(mp.activateNoExistingActive));
- CPPUNIT_ASSERT_EQUAL(5, static_cast<int>(mp.activateWithExistingActive));
- CPPUNIT_ASSERT_EQUAL(6, static_cast<int>(mp.deleteBucketCopy));
- CPPUNIT_ASSERT_EQUAL(7, static_cast<int>(mp.joinBuckets));
- CPPUNIT_ASSERT_EQUAL(8, static_cast<int>(mp.splitDistributionBits));
- CPPUNIT_ASSERT_EQUAL(9, static_cast<int>(mp.splitLargeBucket));
- CPPUNIT_ASSERT_EQUAL(10, static_cast<int>(mp.splitInconsistentBucket));
- CPPUNIT_ASSERT_EQUAL(11, static_cast<int>(mp.garbageCollection));
-}
-
-void
-Distributor_Test::testNoDbResurrectionForBucketNotOwnedInPendingState()
-{
+ const auto& mp = getConfig().getMaintenancePriorities();
+ EXPECT_EQ(1, static_cast<int>(mp.mergeMoveToIdealNode));
+ EXPECT_EQ(2, static_cast<int>(mp.mergeOutOfSyncCopies));
+ EXPECT_EQ(3, static_cast<int>(mp.mergeTooFewCopies));
+ EXPECT_EQ(4, static_cast<int>(mp.activateNoExistingActive));
+ EXPECT_EQ(5, static_cast<int>(mp.activateWithExistingActive));
+ EXPECT_EQ(6, static_cast<int>(mp.deleteBucketCopy));
+ EXPECT_EQ(7, static_cast<int>(mp.joinBuckets));
+ EXPECT_EQ(8, static_cast<int>(mp.splitDistributionBits));
+ EXPECT_EQ(9, static_cast<int>(mp.splitLargeBucket));
+ EXPECT_EQ(10, static_cast<int>(mp.splitInconsistentBucket));
+ EXPECT_EQ(11, static_cast<int>(mp.garbageCollection));
+}
+
+TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
lib::ClusterState newState("storage:10 distributor:10");
auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
@@ -587,24 +471,20 @@ Distributor_Test::testNoDbResurrectionForBucketNotOwnedInPendingState()
getBucketDBUpdater().onSetSystemState(stateCmd);
document::BucketId nonOwnedBucket(16, 3);
- CPPUNIT_ASSERT(!getBucketDBUpdater()
- .checkOwnershipInPendingState(makeDocumentBucket(nonOwnedBucket)).isOwned());
- CPPUNIT_ASSERT(!getBucketDBUpdater().getDistributorComponent()
- .checkOwnershipInPendingAndCurrentState(makeDocumentBucket(nonOwnedBucket))
- .isOwned());
+ EXPECT_FALSE(getBucketDBUpdater().checkOwnershipInPendingState(makeDocumentBucket(nonOwnedBucket)).isOwned());
+ EXPECT_FALSE(getBucketDBUpdater().getDistributorComponent()
+ .checkOwnershipInPendingAndCurrentState(makeDocumentBucket(nonOwnedBucket))
+ .isOwned());
std::vector<BucketCopy> copies;
copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
getExternalOperationHandler().updateBucketDatabase(makeDocumentBucket(nonOwnedBucket), copies,
DatabaseUpdate::CREATE_IF_NONEXISTING);
- CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
- dumpBucket(nonOwnedBucket));
+ EXPECT_EQ("NONEXISTING", dumpBucket(nonOwnedBucket));
}
-void
-Distributor_Test::testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime()
-{
+TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
getClock().setAbsoluteTimeInSeconds(101234);
document::BucketId bucket(16, 7654);
@@ -614,13 +494,10 @@ Distributor_Test::testAddedDbBucketsWithoutGcTimestampImplicitlyGetCurrentTime()
getExternalOperationHandler().updateBucketDatabase(makeDocumentBucket(bucket), copies,
DatabaseUpdate::CREATE_IF_NONEXISTING);
BucketDatabase::Entry e(getBucket(bucket));
- CPPUNIT_ASSERT_EQUAL(uint32_t(101234), e->getLastGarbageCollectionTime());
+ EXPECT_EQ(101234, e->getLastGarbageCollectionTime());
}
-
-void
-Distributor_Test::mergeStatsAreAccumulatedDuringDatabaseIteration()
-{
+TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
// Copies out of sync. Not possible for distributor to _reliably_ tell
// which direction(s) data will flow, so for simplicity assume that we
@@ -642,46 +519,47 @@ Distributor_Test::mergeStatsAreAccumulatedDuringDatabaseIteration()
// added to existing.
tickDistributorNTimes(50);
- const auto& stats(_distributor->_maintenanceStats);
+ const auto& stats = distributor_maintenance_stats();
{
NodeMaintenanceStats wanted;
wanted.syncing = 1;
wanted.copyingOut = 2;
wanted.total = 3;
- CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
+ EXPECT_EQ(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
}
{
NodeMaintenanceStats wanted;
wanted.movingOut = 1;
wanted.total = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
}
{
NodeMaintenanceStats wanted;
wanted.syncing = 1;
wanted.copyingIn = 2;
wanted.total = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(2, makeBucketSpace()));
+ EXPECT_EQ(wanted, stats.perNodeStats.forNode(2, makeBucketSpace()));
}
- auto bucketStats = _distributor->getBucketSpacesStats();
- CPPUNIT_ASSERT_EQUAL(static_cast<size_t>(3), bucketStats.size());
+ auto bucketStats = distributor_bucket_spaces_stats();
+ ASSERT_EQ(3, bucketStats.size());
assertBucketSpaceStats(1, 3, 0, "default", bucketStats);
assertBucketSpaceStats(0, 1, 1, "default", bucketStats);
assertBucketSpaceStats(3, 1, 2, "default", bucketStats);
}
void
-Distributor_Test::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node, const vespalib::string &bucketSpace,
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats &stats)
+DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
+ const vespalib::string& bucketSpace,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
{
auto nodeItr = stats.find(node);
- CPPUNIT_ASSERT(nodeItr != stats.end());
- CPPUNIT_ASSERT_EQUAL(static_cast<size_t>(1), nodeItr->second.size());
+ ASSERT_TRUE(nodeItr != stats.end());
+ ASSERT_EQ(1, nodeItr->second.size());
auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
- CPPUNIT_ASSERT(bucketSpaceItr != nodeItr->second.end());
- CPPUNIT_ASSERT(bucketSpaceItr->second.valid());
- CPPUNIT_ASSERT_EQUAL(expBucketTotal, bucketSpaceItr->second.bucketsTotal());
- CPPUNIT_ASSERT_EQUAL(expBucketPending, bucketSpaceItr->second.bucketsPending());
+ ASSERT_TRUE(bucketSpaceItr != nodeItr->second.end());
+ ASSERT_TRUE(bucketSpaceItr->second.valid());
+ ASSERT_EQ(expBucketTotal, bucketSpaceItr->second.bucketsTotal());
+ ASSERT_EQ(expBucketPending, bucketSpaceItr->second.bucketsPending());
}
/**
@@ -690,9 +568,7 @@ Distributor_Test::assertBucketSpaceStats(size_t expBucketPending, size_t expBuck
* their state checkers at all, we won't get any statistics from any other
* operations for the bucket.
*/
-void
-Distributor_Test::statsGeneratedForPreemptedOperations()
-{
+TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// For this test it suffices to have a single bucket with multiple aspects
// wrong about it. In this case, let a bucket be both out of sync _and_
@@ -701,63 +577,53 @@ Distributor_Test::statsGeneratedForPreemptedOperations()
// by activation, we'll see no merge stats at all.
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1,1=2/2/2");
tickDistributorNTimes(50);
- const auto& stats(_distributor->_maintenanceStats);
+ const auto& stats = distributor_maintenance_stats();
{
NodeMaintenanceStats wanted;
wanted.syncing = 1;
wanted.total = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
+ EXPECT_EQ(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
}
{
NodeMaintenanceStats wanted;
wanted.syncing = 1;
wanted.total = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
}
}
-void
-Distributor_Test::hostInfoReporterConfigIsPropagatedToReporter()
-{
+TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Default is enabled=true.
- CPPUNIT_ASSERT(_distributor->_hostInfoReporter.isReportingEnabled());
+ EXPECT_TRUE(distributor_host_info_reporter().isReportingEnabled());
ConfigBuilder builder;
builder.enableHostInfoReporting = false;
configureDistributor(builder);
- CPPUNIT_ASSERT(!_distributor->_hostInfoReporter.isReportingEnabled());
+ EXPECT_FALSE(distributor_host_info_reporter().isReportingEnabled());
}
-void
-Distributor_Test::replicaCountingModeIsConfiguredToTrustedByDefault()
-{
+TEST_F(DistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- CPPUNIT_ASSERT_EQUAL(ConfigBuilder::TRUSTED, currentReplicaCountingMode());
+ EXPECT_EQ(ConfigBuilder::TRUSTED, currentReplicaCountingMode());
}
-void
-Distributor_Test::replicaCountingModeConfigIsPropagatedToMetricUpdater()
-{
+TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
builder.minimumReplicaCountingMode = ConfigBuilder::ANY;
configureDistributor(builder);
- CPPUNIT_ASSERT_EQUAL(ConfigBuilder::ANY, currentReplicaCountingMode());
+ EXPECT_EQ(ConfigBuilder::ANY, currentReplicaCountingMode());
}
-void
-Distributor_Test::bucketActivationIsEnabledByDefault()
-{
+TEST_F(DistributorTest, bucket_activation_is_enabled_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- CPPUNIT_ASSERT(getConfig().isBucketActivationDisabled() == false);
+ EXPECT_FALSE(getConfig().isBucketActivationDisabled());
}
-void
-Distributor_Test::bucketActivationConfigIsPropagatedToDistributorConfiguration()
-{
+TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -766,11 +632,11 @@ Distributor_Test::bucketActivationConfigIsPropagatedToDistributorConfiguration()
builder.disableBucketActivation = true;
getConfig().configure(builder);
- CPPUNIT_ASSERT(getConfig().isBucketActivationDisabled());
+ EXPECT_TRUE(getConfig().isBucketActivationDisabled());
}
void
-Distributor_Test::configureMaxClusterClockSkew(int seconds) {
+DistributorTest::configureMaxClusterClockSkew(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -779,12 +645,11 @@ Distributor_Test::configureMaxClusterClockSkew(int seconds) {
_distributor->enableNextConfig();
}
-void
-Distributor_Test::max_clock_skew_config_is_propagated_to_distributor_config() {
+TEST_F(DistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configureMaxClusterClockSkew(5);
- CPPUNIT_ASSERT(getConfig().getMaxClusterClockSkew() == std::chrono::seconds(5));
+ EXPECT_EQ(getConfig().getMaxClusterClockSkew(), std::chrono::seconds(5));
}
namespace {
@@ -798,19 +663,18 @@ auto makeDummyRemoveCommand() {
}
-void Distributor_Test::sendDownClusterStateCommand() {
+void DistributorTest::sendDownClusterStateCommand() {
lib::ClusterState newState("bits:1 storage:1 distributor:1");
auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
_distributor->handleMessage(stateCmd);
}
-void Distributor_Test::replyToSingleRequestBucketInfoCommandWith1Bucket() {
- CPPUNIT_ASSERT_EQUAL(_bucketSpaces.size(), _sender.commands.size());
- for (uint32_t i = 0; i < _sender.commands.size(); ++i) {
- CPPUNIT_ASSERT_EQUAL(api::MessageType::REQUESTBUCKETINFO,
- _sender.commands[i]->getType());
+void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
+ for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
+ ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO, _sender.command(i)->getType());
auto& bucketReq(static_cast<api::RequestBucketInfoCommand&>
- (*_sender.commands[i]));
+ (*_sender.command(i)));
auto bucketReply = bucketReq.makeReply();
if (bucketReq.getBucketSpace() == FixedBucketSpaces::default_space()) {
// Make sure we have a bucket to route our remove op to, or we'd get
@@ -822,52 +686,49 @@ void Distributor_Test::replyToSingleRequestBucketInfoCommandWith1Bucket() {
}
_distributor->handleMessage(std::move(bucketReply));
}
- _sender.commands.clear();
+ _sender.commands().clear();
}
-void Distributor_Test::sendDownDummyRemoveCommand() {
+void DistributorTest::sendDownDummyRemoveCommand() {
_distributor->handleMessage(makeDummyRemoveCommand());
}
-void Distributor_Test::assertSingleBouncedRemoveReplyPresent() {
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size()); // Rejected remove
- CPPUNIT_ASSERT_EQUAL(api::MessageType::REMOVE_REPLY,
- _sender.replies[0]->getType());
- auto& reply(static_cast<api::RemoveReply&>(*_sender.replies[0]));
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::STALE_TIMESTAMP,
- reply.getResult().getResult());
- _sender.replies.clear();
+void DistributorTest::assertSingleBouncedRemoveReplyPresent() {
+ ASSERT_EQ(1, _sender.replies().size()); // Rejected remove
+ ASSERT_EQ(api::MessageType::REMOVE_REPLY, _sender.reply(0)->getType());
+ auto& reply(static_cast<api::RemoveReply&>(*_sender.reply(0)));
+ ASSERT_EQ(api::ReturnCode::STALE_TIMESTAMP, reply.getResult().getResult());
+ _sender.replies().clear();
}
-void Distributor_Test::assertNoMessageBounced() {
- CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.replies.size());
+void DistributorTest::assertNoMessageBounced() {
+ ASSERT_EQ(0, _sender.replies().size());
}
// TODO refactor this to set proper highest timestamp as part of bucket info
// reply once we have the "highest timestamp across all owned buckets" feature
// in place.
-void
-Distributor_Test::configured_safe_time_point_rejection_works_end_to_end() {
+TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
setupDistributor(Redundancy(2), NodeCount(2),
"bits:1 storage:1 distributor:2");
getClock().setAbsoluteTimeInSeconds(1000);
configureMaxClusterClockSkew(10);
sendDownClusterStateCommand();
- replyToSingleRequestBucketInfoCommandWith1Bucket();
+ ASSERT_NO_FATAL_FAILURE(replyToSingleRequestBucketInfoCommandWith1Bucket());
// SetSystemStateCommand sent down chain at this point.
sendDownDummyRemoveCommand();
- assertSingleBouncedRemoveReplyPresent();
+ ASSERT_NO_FATAL_FAILURE(assertSingleBouncedRemoveReplyPresent());
// Increment time to first whole second of clock + 10 seconds of skew.
// Should now not get any feed rejections.
getClock().setAbsoluteTimeInSeconds(1011);
sendDownDummyRemoveCommand();
- assertNoMessageBounced();
+ ASSERT_NO_FATAL_FAILURE(assertNoMessageBounced());
}
-void Distributor_Test::configure_mutation_sequencing(bool enabled) {
+void DistributorTest::configure_mutation_sequencing(bool enabled) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -876,23 +737,23 @@ void Distributor_Test::configure_mutation_sequencing(bool enabled) {
_distributor->enableNextConfig();
}
-void Distributor_Test::sequencing_config_is_propagated_to_distributor_config() {
+TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Should be enabled by default
- CPPUNIT_ASSERT(getConfig().getSequenceMutatingOperations());
+ EXPECT_TRUE(getConfig().getSequenceMutatingOperations());
// Explicitly disabled.
configure_mutation_sequencing(false);
- CPPUNIT_ASSERT(!getConfig().getSequenceMutatingOperations());
+ EXPECT_FALSE(getConfig().getSequenceMutatingOperations());
// Explicitly enabled.
configure_mutation_sequencing(true);
- CPPUNIT_ASSERT(getConfig().getSequenceMutatingOperations());
+ EXPECT_TRUE(getConfig().getSequenceMutatingOperations());
}
void
-Distributor_Test::configure_merge_busy_inhibit_duration(int seconds) {
+DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -901,39 +762,39 @@ Distributor_Test::configure_merge_busy_inhibit_duration(int seconds) {
_distributor->enableNextConfig();
}
-void Distributor_Test::merge_busy_inhibit_duration_config_is_propagated_to_distributor_config() {
+TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configure_merge_busy_inhibit_duration(7);
- CPPUNIT_ASSERT(getConfig().getInhibitMergesOnBusyNodeDuration() == std::chrono::seconds(7));
+ EXPECT_EQ(getConfig().getInhibitMergesOnBusyNodeDuration(), std::chrono::seconds(7));
}
-void Distributor_Test::merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker() {
+TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
configure_merge_busy_inhibit_duration(100);
auto cmd = makeDummyRemoveCommand(); // Remove is for bucket 1
- _distributor->handleMessage(cmd);
+ distributor_handle_message(cmd);
// Should send to content node 0
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::REMOVE, _sender.commands[0]->getType());
- auto& fwd_cmd = dynamic_cast<api::RemoveCommand&>(*_sender.commands[0]);
+ ASSERT_EQ(1, _sender.commands().size());
+ ASSERT_EQ(api::MessageType::REMOVE, _sender.command(0)->getType());
+ auto& fwd_cmd = dynamic_cast<api::RemoveCommand&>(*_sender.command(0));
auto reply = fwd_cmd.makeReply();
reply->setResult(api::ReturnCode(api::ReturnCode::BUSY));
_distributor->handleReply(std::shared_ptr<api::StorageReply>(std::move(reply)));
auto& node_info = _distributor->getPendingMessageTracker().getNodeInfo();
- CPPUNIT_ASSERT(node_info.isBusy(0));
+ EXPECT_TRUE(node_info.isBusy(0));
getClock().addSecondsToTime(99);
- CPPUNIT_ASSERT(node_info.isBusy(0));
+ EXPECT_TRUE(node_info.isBusy(0));
getClock().addSecondsToTime(2);
- CPPUNIT_ASSERT(!node_info.isBusy(0));
+ EXPECT_FALSE(node_info.isBusy(0));
}
-void Distributor_Test::external_client_requests_are_handled_individually_in_priority_order() {
+TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -950,18 +811,18 @@ void Distributor_Test::external_client_requests_are_handled_individually_in_prio
// For each tick, a priority-order client request is processed and sent off.
for (size_t i = 1; i <= priorities.size(); ++i) {
tickDistributorNTimes(1);
- CPPUNIT_ASSERT_EQUAL(size_t(i), _sender.commands.size());
+ ASSERT_EQ(i, _sender.commands().size());
}
std::vector<int> expected({0, 10, 40, 50, 255});
std::vector<int> actual;
- for (auto& msg : _sender.commands) {
+ for (auto& msg : _sender.commands()) {
actual.emplace_back(static_cast<int>(msg->getPriority()));
}
- CPPUNIT_ASSERT_EQUAL(expected, actual);
+ EXPECT_THAT(actual, ContainerEq(expected));
}
-void Distributor_Test::internal_messages_are_started_in_fifo_order_batch() {
+TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// To test internal request ordering, we use NotifyBucketChangeCommand
// for the reason that it explicitly updates the bucket database for
// each individual invocation.
@@ -980,16 +841,16 @@ void Distributor_Test::internal_messages_are_started_in_fifo_order_batch() {
// Doing a single tick should process all internal requests in one batch
tickDistributorNTimes(1);
- CPPUNIT_ASSERT_EQUAL(size_t(5), _sender.replies.size());
+ ASSERT_EQ(5, _sender.replies().size());
// The bucket info for priority 1 (last FIFO-order change command received, but
// highest priority) should be the end-state of the bucket database, _not_ that
// of lowest priority 255.
BucketDatabase::Entry e(getBucket(bucket));
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1, 1, 1), e.getBucketInfo().getNode(0)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(1, 1, 1), e.getBucketInfo().getNode(0)->getBucketInfo());
}
-void Distributor_Test::closing_aborts_priority_queued_client_requests() {
+TEST_F(DistributorTest, closing_aborts_priority_queued_client_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bucket(16, 1);
addNodesToBucketDB(bucket, "0=1/1/1/t");
@@ -1003,10 +864,9 @@ void Distributor_Test::closing_aborts_priority_queued_client_requests() {
tickDistributorNTimes(1);
// Closing should trigger 1 abort via startet GetOperation and 9 aborts from pri queue
_distributor->close();
- CPPUNIT_ASSERT_EQUAL(size_t(10), _sender.replies.size());
- for (auto& msg : _sender.replies) {
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
- dynamic_cast<api::StorageReply&>(*msg).getResult().getResult());
+ ASSERT_EQ(10, _sender.replies().size());
+ for (auto& msg : _sender.replies()) {
+ EXPECT_EQ(api::ReturnCode::ABORTED, dynamic_cast<api::StorageReply&>(*msg).getResult().getResult());
}
}
@@ -1016,19 +876,19 @@ void assert_invalid_stats_for_all_spaces(
const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats,
uint16_t node_index) {
auto stats_iter = stats.find(node_index);
- CPPUNIT_ASSERT(stats_iter != stats.cend());
- CPPUNIT_ASSERT_EQUAL(size_t(2), stats_iter->second.size());
+ ASSERT_TRUE(stats_iter != stats.cend());
+ ASSERT_EQ(2, stats_iter->second.size());
auto space_iter = stats_iter->second.find(document::FixedBucketSpaces::default_space_name());
- CPPUNIT_ASSERT(space_iter != stats_iter->second.cend());
- CPPUNIT_ASSERT(!space_iter->second.valid());
+ ASSERT_TRUE(space_iter != stats_iter->second.cend());
+ ASSERT_FALSE(space_iter->second.valid());
space_iter = stats_iter->second.find(document::FixedBucketSpaces::global_space_name());
- CPPUNIT_ASSERT(space_iter != stats_iter->second.cend());
- CPPUNIT_ASSERT(!space_iter->second.valid());
+ ASSERT_TRUE(space_iter != stats_iter->second.cend());
+ ASSERT_FALSE(space_iter->second.valid());
}
}
-void Distributor_Test::entering_recovery_mode_resets_bucket_space_stats() {
+TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
// Set up a cluster state + DB contents which implies merge maintenance ops
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -1038,82 +898,80 @@ void Distributor_Test::entering_recovery_mode_resets_bucket_space_stats() {
tickDistributorNTimes(5); // 1/3rds into second round through database
enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
// Bucket space stats should now be invalid per space per node, pending stats
// from state version 2. Exposing stats from version 1 risks reporting stale
// information back to the cluster controller.
- const auto stats = _distributor->getBucketSpacesStats();
- CPPUNIT_ASSERT_EQUAL(size_t(2), stats.size());
+ const auto stats = distributor_bucket_spaces_stats();
+ ASSERT_EQ(2, stats.size());
assert_invalid_stats_for_all_spaces(stats, 0);
assert_invalid_stats_for_all_spaces(stats, 2);
}
-void Distributor_Test::leaving_recovery_mode_immediately_sends_getnodestate_replies() {
+TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
// Should not send explicit replies during init stage
- CPPUNIT_ASSERT_EQUAL(size_t(0), explicit_node_state_reply_send_invocations());
+ ASSERT_EQ(0, explicit_node_state_reply_send_invocations());
// Add a couple of buckets so we have something to iterate over
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
- CPPUNIT_ASSERT_EQUAL(size_t(0), explicit_node_state_reply_send_invocations());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
+ EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
tickDistributorNTimes(1); // DB round not yet complete
- CPPUNIT_ASSERT_EQUAL(size_t(0), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
tickDistributorNTimes(2); // DB round complete after 2nd bucket + "scan done" discovery tick
- CPPUNIT_ASSERT_EQUAL(size_t(1), explicit_node_state_reply_send_invocations());
- CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
+ EXPECT_FALSE(_distributor->isInRecoveryMode());
// Now out of recovery mode, subsequent round completions should not send replies
tickDistributorNTimes(10);
- CPPUNIT_ASSERT_EQUAL(size_t(1), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
}
-void Distributor_Test::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
+void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
- CPPUNIT_ASSERT(_distributor->isInRecoveryMode());
+ EXPECT_TRUE(_distributor->isInRecoveryMode());
// 2 buckets with missing replicas triggering merge pending stats
addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a");
addNodesToBucketDB(Bucket(space, BucketId(16, 2)), "0=1/1/1/t/a");
tickDistributorNTimes(3);
- CPPUNIT_ASSERT(!_distributor->isInRecoveryMode());
+ EXPECT_FALSE(_distributor->isInRecoveryMode());
const auto space_name = FixedBucketSpaces::to_string(space);
assertBucketSpaceStats(2, 0, 1, space_name, _distributor->getBucketSpacesStats());
// First completed scan sends off merge stats et al to cluster controller
- CPPUNIT_ASSERT_EQUAL(size_t(1), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
// Edge not triggered when 1 bucket with missing replica left
addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a,1=1/1/1/t");
tickDistributorNTimes(3);
assertBucketSpaceStats(1, 1, 1, space_name, _distributor->getBucketSpacesStats());
- CPPUNIT_ASSERT_EQUAL(size_t(1), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
// Edge triggered when no more buckets with requiring merge
addNodesToBucketDB(Bucket(space, BucketId(16, 2)), "0=1/1/1/t/a,1=1/1/1/t");
tickDistributorNTimes(3);
assertBucketSpaceStats(0, 2, 1, space_name, _distributor->getBucketSpacesStats());
- CPPUNIT_ASSERT_EQUAL(size_t(2), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
// Should only send when edge happens, not in subsequent DB iterations
tickDistributorNTimes(10);
- CPPUNIT_ASSERT_EQUAL(size_t(2), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
// Going back to merges pending should _not_ send a getnodestate reply (at least for now)
addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a");
tickDistributorNTimes(3);
assertBucketSpaceStats(1, 1, 1, space_name, _distributor->getBucketSpacesStats());
- CPPUNIT_ASSERT_EQUAL(size_t(2), explicit_node_state_reply_send_invocations());
+ EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
}
-void Distributor_Test::pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies() {
+TEST_F(DistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::default_space());
}
-void Distributor_Test::pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies() {
+TEST_F(DistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::global_space());
}
}
-
-}
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index 3f7f2eac63a..91af37e0f30 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -295,11 +295,11 @@ DistributorTestUtil::sendReply(Operation& op,
api::ReturnCode::Result result)
{
if (idx == -1) {
- idx = _sender.commands.size() - 1;
+ idx = _sender.commands().size() - 1;
}
- assert(idx >= 0 && idx < static_cast<int>(_sender.commands.size()));
+ assert(idx >= 0 && idx < static_cast<int>(_sender.commands().size()));
- std::shared_ptr<api::StorageCommand> cmd = _sender.commands[idx];
+ std::shared_ptr<api::StorageCommand> cmd = _sender.command(idx);
api::StorageReply::SP reply(cmd->makeReply().release());
reply->setResult(result);
op.receive(_sender, reply);
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
index 420111437d2..3dc71bcb433 100644
--- a/storage/src/tests/distributor/distributortestutil.h
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include "messagesenderstub.h"
+#include "distributor_message_sender_stub.h"
#include <tests/common/teststorageapp.h>
#include <tests/common/testhelper.h>
#include <tests/common/dummystoragelink.h>
@@ -181,14 +181,14 @@ protected:
std::unique_ptr<framework::TickingThreadPool> _threadPool;
std::unique_ptr<Distributor> _distributor;
std::unique_ptr<storage::DistributorComponent> _component;
- MessageSenderStub _sender;
- MessageSenderStub _senderDown;
+ DistributorMessageSenderStub _sender;
+ DistributorMessageSenderStub _senderDown;
HostInfo _hostInfo;
struct MessageSenderImpl : public ChainedMessageSender {
- MessageSenderStub& _sender;
- MessageSenderStub& _senderDown;
- MessageSenderImpl(MessageSenderStub& up, MessageSenderStub& down)
+ DistributorMessageSenderStub& _sender;
+ DistributorMessageSenderStub& _senderDown;
+ MessageSenderImpl(DistributorMessageSenderStub& up, DistributorMessageSenderStub& down)
: _sender(up), _senderDown(down) {}
void sendUp(const std::shared_ptr<api::StorageMessage>& msg) override {
diff --git a/storage/src/tests/distributor/externaloperationhandlertest.cpp b/storage/src/tests/distributor/externaloperationhandlertest.cpp
index 40fe885dcb1..88e133cc010 100644
--- a/storage/src/tests/distributor/externaloperationhandlertest.cpp
+++ b/storage/src/tests/distributor/externaloperationhandlertest.cpp
@@ -9,43 +9,17 @@
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/update/documentupdate.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using document::DocumentId;
+using namespace ::testing;
namespace storage::distributor {
-class ExternalOperationHandlerTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
+struct ExternalOperationHandlerTest : Test, DistributorTestUtil {
document::TestDocMan _testDocMan;
- CPPUNIT_TEST_SUITE(ExternalOperationHandlerTest);
- CPPUNIT_TEST(testBucketSplitMask);
- CPPUNIT_TEST(mutating_operation_wdr_bounced_on_wrong_current_distribution);
- CPPUNIT_TEST(mutating_operation_busy_bounced_on_wrong_pending_distribution);
- CPPUNIT_TEST(mutating_operation_busy_bounced_if_no_cluster_state_received_yet);
- CPPUNIT_TEST(read_only_operation_wdr_bounced_on_wrong_current_distribution);
- CPPUNIT_TEST(read_only_operation_busy_bounced_if_no_cluster_state_received_yet);
- CPPUNIT_TEST(reject_put_if_not_past_safe_time_point);
- CPPUNIT_TEST(reject_remove_if_not_past_safe_time_point);
- CPPUNIT_TEST(reject_update_if_not_past_safe_time_point);
- CPPUNIT_TEST(get_not_rejected_by_unsafe_time_point);
- CPPUNIT_TEST(mutation_not_rejected_when_safe_point_reached);
- CPPUNIT_TEST(reject_put_with_concurrent_mutation_to_same_id);
- CPPUNIT_TEST(do_not_reject_put_operations_to_different_ids);
- CPPUNIT_TEST(reject_remove_with_concurrent_mutation_to_same_id);
- CPPUNIT_TEST(do_not_reject_remove_operations_to_different_ids);
- CPPUNIT_TEST(reject_update_with_concurrent_mutation_to_same_id);
- CPPUNIT_TEST(do_not_reject_update_operations_to_different_ids);
- CPPUNIT_TEST(operation_destruction_allows_new_mutations_for_id);
- CPPUNIT_TEST(concurrent_get_and_mutation_do_not_conflict);
- CPPUNIT_TEST(sequencing_works_across_mutation_types);
- CPPUNIT_TEST(sequencing_can_be_explicitly_config_disabled);
- CPPUNIT_TEST(gets_are_started_with_mutable_db_outside_transition_period);
- CPPUNIT_TEST(gets_are_started_with_read_only_db_during_transition_period);
- CPPUNIT_TEST(gets_are_busy_bounced_during_transition_period_if_stale_reads_disabled);
- CPPUNIT_TEST_SUITE_END();
-
document::BucketId findNonOwnedUserBucketInState(vespalib::stringref state);
document::BucketId findOwned1stNotOwned2ndInStates(
vespalib::stringref state1,
@@ -63,7 +37,8 @@ class ExternalOperationHandlerTest : public CppUnit::TestFixture,
void verify_busy_bounced_due_to_no_active_state(std::shared_ptr<api::StorageCommand> cmd);
- Operation::SP start_operation_verify_not_rejected(std::shared_ptr<api::StorageCommand> cmd);
+ void start_operation_verify_not_rejected(std::shared_ptr<api::StorageCommand> cmd,
+ Operation::SP& out_generated);
void start_operation_verify_rejected(std::shared_ptr<api::StorageCommand> cmd);
int64_t safe_time_not_reached_metric_count(
@@ -93,32 +68,6 @@ class ExternalOperationHandlerTest : public CppUnit::TestFixture,
// Returns an arbitrary bucket not owned in the pending state
document::BucketId set_up_pending_cluster_state_transition(bool read_only_enabled);
-protected:
- void testBucketSplitMask();
- void mutating_operation_wdr_bounced_on_wrong_current_distribution();
- void mutating_operation_busy_bounced_on_wrong_pending_distribution();
- void mutating_operation_busy_bounced_if_no_cluster_state_received_yet();
- void read_only_operation_wdr_bounced_on_wrong_current_distribution();
- void read_only_operation_busy_bounced_if_no_cluster_state_received_yet();
- void reject_put_if_not_past_safe_time_point();
- void reject_remove_if_not_past_safe_time_point();
- void reject_update_if_not_past_safe_time_point();
- void get_not_rejected_by_unsafe_time_point();
- void mutation_not_rejected_when_safe_point_reached();
- void reject_put_with_concurrent_mutation_to_same_id();
- void do_not_reject_put_operations_to_different_ids();
- void reject_remove_with_concurrent_mutation_to_same_id();
- void do_not_reject_remove_operations_to_different_ids();
- void reject_update_with_concurrent_mutation_to_same_id();
- void do_not_reject_update_operations_to_different_ids();
- void operation_destruction_allows_new_mutations_for_id();
- void concurrent_get_and_mutation_do_not_conflict();
- void sequencing_works_across_mutation_types();
- void sequencing_can_be_explicitly_config_disabled();
- void gets_are_started_with_mutable_db_outside_transition_period();
- void gets_are_started_with_read_only_db_during_transition_period();
- void gets_are_busy_bounced_during_transition_period_if_stale_reads_disabled();
-
void assert_rejection_due_to_unsafe_time(
std::shared_ptr<api::StorageCommand> cmd);
@@ -130,37 +79,30 @@ protected:
std::shared_ptr<api::StorageCommand> cmd1,
std::shared_ptr<api::StorageCommand> cmd2);
-public:
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ExternalOperationHandlerTest);
-
-using document::DocumentId;
-
-void
-ExternalOperationHandlerTest::testBucketSplitMask()
-{
+TEST_F(ExternalOperationHandlerTest, bucket_split_mask) {
{
createLinks();
getDirConfig().getConfig("stor-distributormanager").set("minsplitcount", "16");
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0xffff),
+ EXPECT_EQ(document::BucketId(16, 0xffff),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0xffff))
).stripUnused());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0),
+ EXPECT_EQ(document::BucketId(16, 0),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0x10000))
).stripUnused());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0xffff),
+ EXPECT_EQ(document::BucketId(16, 0xffff),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0xffff))
).stripUnused());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x100),
+ EXPECT_EQ(document::BucketId(16, 0x100),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0x100))
).stripUnused());
@@ -169,11 +111,11 @@ ExternalOperationHandlerTest::testBucketSplitMask()
{
getDirConfig().getConfig("stor-distributormanager").set("minsplitcount", "20");
createLinks();
- CPPUNIT_ASSERT_EQUAL(document::BucketId(20, 0x11111),
+ EXPECT_EQ(document::BucketId(20, 0x11111),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0x111111))
).stripUnused());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(20, 0x22222),
+ EXPECT_EQ(document::BucketId(20, 0x22222),
getExternalOperationHandler().getBucketId(document::DocumentId(
vespalib::make_string("userdoc:ns:%d::", 0x222222))
).stripUnused());
@@ -256,9 +198,7 @@ std::shared_ptr<api::RemoveCommand> ExternalOperationHandlerTest::makeRemoveComm
return std::make_shared<api::RemoveCommand>(makeDocumentBucket(document::BucketId(0)), DocumentId(id), api::Timestamp(0));
}
-void
-ExternalOperationHandlerTest::mutating_operation_wdr_bounced_on_wrong_current_distribution()
-{
+TEST_F(ExternalOperationHandlerTest, mutating_operation_wdr_bounced_on_wrong_current_distribution) {
createLinks();
std::string state("version:1 distributor:2 storage:2");
setupDistributor(1, 2, state);
@@ -267,18 +207,15 @@ ExternalOperationHandlerTest::mutating_operation_wdr_bounced_on_wrong_current_di
auto cmd = makeUpdateCommandForUser(bucket.withoutCountBits());
Operation::SP genOp;
- CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
- CPPUNIT_ASSERT(!genOp.get());
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(WRONG_DISTRIBUTION, "
- "version:1 distributor:2 storage:2)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_TRUE(getExternalOperationHandler().handleMessage(cmd, genOp));
+ ASSERT_FALSE(genOp.get());
+ ASSERT_EQ(1, _sender.replies().size());
+ EXPECT_EQ("ReturnCode(WRONG_DISTRIBUTION, "
+ "version:1 distributor:2 storage:2)",
+ _sender.reply(0)->getResult().toString());
}
-void
-ExternalOperationHandlerTest::read_only_operation_wdr_bounced_on_wrong_current_distribution()
-{
+TEST_F(ExternalOperationHandlerTest, read_only_operation_wdr_bounced_on_wrong_current_distribution) {
createLinks();
std::string state("version:1 distributor:2 storage:2");
setupDistributor(1, 2, state);
@@ -287,18 +224,15 @@ ExternalOperationHandlerTest::read_only_operation_wdr_bounced_on_wrong_current_d
auto cmd = makeGetCommandForUser(bucket.withoutCountBits());
Operation::SP genOp;
- CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
- CPPUNIT_ASSERT(!genOp.get());
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(WRONG_DISTRIBUTION, "
- "version:1 distributor:2 storage:2)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_TRUE(getExternalOperationHandler().handleMessage(cmd, genOp));
+ ASSERT_FALSE(genOp.get());
+ ASSERT_EQ(1, _sender.replies().size());
+ EXPECT_EQ("ReturnCode(WRONG_DISTRIBUTION, "
+ "version:1 distributor:2 storage:2)",
+ _sender.reply(0)->getResult().toString());
}
-void
-ExternalOperationHandlerTest::mutating_operation_busy_bounced_on_wrong_pending_distribution()
-{
+TEST_F(ExternalOperationHandlerTest, mutating_operation_busy_bounced_on_wrong_pending_distribution) {
createLinks();
std::string current("version:10 distributor:2 storage:2");
std::string pending("version:11 distributor:3 storage:3");
@@ -313,12 +247,11 @@ ExternalOperationHandlerTest::mutating_operation_busy_bounced_on_wrong_pending_d
auto cmd = makeUpdateCommandForUser(b.withoutCountBits());
Operation::SP genOp;
- CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
- CPPUNIT_ASSERT(!genOp.get());
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(BUSY, Currently pending cluster state transition from version 10 to 11)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_TRUE(getExternalOperationHandler().handleMessage(cmd, genOp));
+ ASSERT_FALSE(genOp.get());
+ ASSERT_EQ(1, _sender.replies().size());
+ EXPECT_EQ("ReturnCode(BUSY, Currently pending cluster state transition from version 10 to 11)",
+ _sender.reply(0)->getResult().toString());
}
void
@@ -329,25 +262,20 @@ ExternalOperationHandlerTest::verify_busy_bounced_due_to_no_active_state(std::sh
setupDistributor(1, 2, state);
Operation::SP genOp;
- CPPUNIT_ASSERT(getExternalOperationHandler().handleMessage(cmd, genOp));
- CPPUNIT_ASSERT(!genOp.get());
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(BUSY, No cluster state activated yet)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_TRUE(getExternalOperationHandler().handleMessage(cmd, genOp));
+ ASSERT_FALSE(genOp.get());
+ ASSERT_EQ(1, _sender.replies().size());
+ EXPECT_EQ("ReturnCode(BUSY, No cluster state activated yet)",
+ _sender.reply(0)->getResult().toString());
}
// TODO NOT_READY is a more appropriate return code for this case, but must ensure it's
// handled gracefully and silently through the stack. BUSY is a safe bet until then.
-void
-ExternalOperationHandlerTest::mutating_operation_busy_bounced_if_no_cluster_state_received_yet()
-{
+TEST_F(ExternalOperationHandlerTest, mutating_operation_busy_bounced_if_no_cluster_state_received_yet) {
verify_busy_bounced_due_to_no_active_state(makeUpdateCommandForUser(12345));
}
-void
-ExternalOperationHandlerTest::read_only_operation_busy_bounced_if_no_cluster_state_received_yet()
-{
+TEST_F(ExternalOperationHandlerTest, read_only_operation_busy_bounced_if_no_cluster_state_received_yet) {
verify_busy_bounced_due_to_no_active_state(makeGetCommandForUser(12345));
}
@@ -364,34 +292,30 @@ void ExternalOperationHandlerTest::assert_rejection_due_to_unsafe_time(
Operation::SP generated;
getExternalOperationHandler().handleMessage(cmd, generated);
- CPPUNIT_ASSERT(generated.get() == nullptr);
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(STALE_TIMESTAMP, "
- "Operation received at time 9, which is before "
- "bucket ownership transfer safe time of 10)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_EQ(generated.get(), nullptr);
+ ASSERT_EQ(1, _sender.replies().size());
+ EXPECT_EQ("ReturnCode(STALE_TIMESTAMP, "
+ "Operation received at time 9, which is before "
+ "bucket ownership transfer safe time of 10)",
+ _sender.reply(0)->getResult().toString());
}
-void ExternalOperationHandlerTest::reject_put_if_not_past_safe_time_point() {
+TEST_F(ExternalOperationHandlerTest, reject_put_if_not_past_safe_time_point) {
assert_rejection_due_to_unsafe_time(makePutCommand("foo", "id:foo:testdoctype1::bar"));
- CPPUNIT_ASSERT_EQUAL(int64_t(1), safe_time_not_reached_metric_count(
- getDistributor().getMetrics().puts));
+ EXPECT_EQ(1, safe_time_not_reached_metric_count(getDistributor().getMetrics().puts));
}
-void ExternalOperationHandlerTest::reject_remove_if_not_past_safe_time_point() {
+TEST_F(ExternalOperationHandlerTest, reject_remove_if_not_past_safe_time_point) {
assert_rejection_due_to_unsafe_time(makeRemoveCommand("id:foo:testdoctype1::bar"));
- CPPUNIT_ASSERT_EQUAL(int64_t(1), safe_time_not_reached_metric_count(
- getDistributor().getMetrics().removes));
+ EXPECT_EQ(1, safe_time_not_reached_metric_count(getDistributor().getMetrics().removes));
}
-void ExternalOperationHandlerTest::reject_update_if_not_past_safe_time_point() {
+TEST_F(ExternalOperationHandlerTest, reject_update_if_not_past_safe_time_point) {
assert_rejection_due_to_unsafe_time(makeUpdateCommand());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), safe_time_not_reached_metric_count(
- getDistributor().getMetrics().updates));
+ EXPECT_EQ(1, safe_time_not_reached_metric_count(getDistributor().getMetrics().updates));
}
-void ExternalOperationHandlerTest::get_not_rejected_by_unsafe_time_point() {
+TEST_F(ExternalOperationHandlerTest, get_not_rejected_by_unsafe_time_point) {
createLinks();
setupDistributor(1, 2, "version:1 distributor:1 storage:1");
getClock().setAbsoluteTimeInSeconds(9);
@@ -400,13 +324,12 @@ void ExternalOperationHandlerTest::get_not_rejected_by_unsafe_time_point() {
Operation::SP generated;
getExternalOperationHandler().handleMessage(
makeGetCommandForUser(0), generated);
- CPPUNIT_ASSERT(generated.get() != nullptr);
- CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), safe_time_not_reached_metric_count(
- getDistributor().getMetrics().gets));
+ ASSERT_NE(generated.get(), nullptr);
+ ASSERT_EQ(0, _sender.replies().size());
+ EXPECT_EQ(0, safe_time_not_reached_metric_count(getDistributor().getMetrics().gets));
}
-void ExternalOperationHandlerTest::mutation_not_rejected_when_safe_point_reached() {
+TEST_F(ExternalOperationHandlerTest, mutation_not_rejected_when_safe_point_reached) {
createLinks();
setupDistributor(1, 2, "version:1 distributor:1 storage:1");
getClock().setAbsoluteTimeInSeconds(10);
@@ -418,10 +341,9 @@ void ExternalOperationHandlerTest::mutation_not_rejected_when_safe_point_reached
std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(0)), id, api::Timestamp(0)),
generated);
- CPPUNIT_ASSERT(generated.get() != nullptr);
- CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.replies.size());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), safe_time_not_reached_metric_count(
- getDistributor().getMetrics().removes));
+ ASSERT_NE(generated.get(), nullptr);
+ ASSERT_EQ(0, _sender.replies().size());
+ EXPECT_EQ(0, safe_time_not_reached_metric_count(getDistributor().getMetrics().removes));
}
void ExternalOperationHandlerTest::set_up_distributor_for_sequencing_test() {
@@ -429,22 +351,24 @@ void ExternalOperationHandlerTest::set_up_distributor_for_sequencing_test() {
setupDistributor(1, 2, "version:1 distributor:1 storage:1");
}
-Operation::SP ExternalOperationHandlerTest::start_operation_verify_not_rejected(
- std::shared_ptr<api::StorageCommand> cmd) {
+void ExternalOperationHandlerTest::start_operation_verify_not_rejected(
+ std::shared_ptr<api::StorageCommand> cmd,
+ Operation::SP& out_generated)
+{
Operation::SP generated;
- _sender.replies.clear();
+ _sender.replies().clear();
getExternalOperationHandler().handleMessage(cmd, generated);
- CPPUNIT_ASSERT(generated.get() != nullptr);
- CPPUNIT_ASSERT_EQUAL(size_t(0), _sender.replies.size());
- return generated;
+ ASSERT_NE(generated.get(), nullptr);
+ ASSERT_EQ(0, _sender.replies().size());
+ out_generated = std::move(generated);
}
void ExternalOperationHandlerTest::start_operation_verify_rejected(
std::shared_ptr<api::StorageCommand> cmd) {
Operation::SP generated;
- _sender.replies.clear();
+ _sender.replies().clear();
getExternalOperationHandler().handleMessage(cmd, generated);
- CPPUNIT_ASSERT(generated.get() == nullptr);
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.replies.size());
+ ASSERT_EQ(generated.get(), nullptr);
+ ASSERT_EQ(1, _sender.replies().size());
}
void ExternalOperationHandlerTest::assert_second_command_rejected_due_to_concurrent_mutation(
@@ -454,15 +378,15 @@ void ExternalOperationHandlerTest::assert_second_command_rejected_due_to_concurr
set_up_distributor_for_sequencing_test();
// Must hold ref to started operation, or sequencing handle will be released.
- Operation::SP generated1 = start_operation_verify_not_rejected(std::move(cmd1));
- start_operation_verify_rejected(std::move(cmd2));
+ Operation::SP generated1;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(std::move(cmd1), generated1));
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_rejected(std::move(cmd2)));
// TODO reconsider BUSY return code. Need something transient and non-noisy
- CPPUNIT_ASSERT_EQUAL(
- std::string(vespalib::make_string(
+ EXPECT_EQ(vespalib::make_string(
"ReturnCode(BUSY, A mutating operation for document "
- "'%s' is already in progress)", expected_id_in_message.c_str())),
- _sender.replies[0]->getResult().toString());
+ "'%s' is already in progress)", expected_id_in_message.c_str()),
+ _sender.reply(0)->getResult().toString());
}
void ExternalOperationHandlerTest::assert_second_command_not_rejected_due_to_concurrent_mutation(
@@ -470,89 +394,97 @@ void ExternalOperationHandlerTest::assert_second_command_not_rejected_due_to_con
std::shared_ptr<api::StorageCommand> cmd2) {
set_up_distributor_for_sequencing_test();
- Operation::SP generated1 = start_operation_verify_not_rejected(std::move(cmd1));
- start_operation_verify_not_rejected(std::move(cmd2));
+ Operation::SP generated1;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(std::move(cmd1), generated1));
+ Operation::SP generated2;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(std::move(cmd2), generated2));
}
-void ExternalOperationHandlerTest::reject_put_with_concurrent_mutation_to_same_id() {
- assert_second_command_rejected_due_to_concurrent_mutation(
+TEST_F(ExternalOperationHandlerTest, reject_put_with_concurrent_mutation_to_same_id) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_rejected_due_to_concurrent_mutation(
makePutCommand("testdoctype1", _dummy_id),
- makePutCommand("testdoctype1", _dummy_id), _dummy_id);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), concurrent_mutatations_metric_count(getDistributor().getMetrics().puts));
+ makePutCommand("testdoctype1", _dummy_id), _dummy_id));
+ EXPECT_EQ(1, concurrent_mutatations_metric_count(getDistributor().getMetrics().puts));
}
-void ExternalOperationHandlerTest::do_not_reject_put_operations_to_different_ids() {
- assert_second_command_not_rejected_due_to_concurrent_mutation(
+TEST_F(ExternalOperationHandlerTest, do_not_reject_put_operations_to_different_ids) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_not_rejected_due_to_concurrent_mutation(
makePutCommand("testdoctype1", "id:foo:testdoctype1::baz"),
- makePutCommand("testdoctype1", "id:foo:testdoctype1::foo"));
- CPPUNIT_ASSERT_EQUAL(int64_t(0), concurrent_mutatations_metric_count(getDistributor().getMetrics().puts));
+ makePutCommand("testdoctype1", "id:foo:testdoctype1::foo")));
+ EXPECT_EQ(0, concurrent_mutatations_metric_count(getDistributor().getMetrics().puts));
}
-void ExternalOperationHandlerTest::reject_remove_with_concurrent_mutation_to_same_id() {
- assert_second_command_rejected_due_to_concurrent_mutation(
- makeRemoveCommand(_dummy_id), makeRemoveCommand(_dummy_id), _dummy_id);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), concurrent_mutatations_metric_count(getDistributor().getMetrics().removes));
+TEST_F(ExternalOperationHandlerTest, reject_remove_with_concurrent_mutation_to_same_id) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_rejected_due_to_concurrent_mutation(
+ makeRemoveCommand(_dummy_id), makeRemoveCommand(_dummy_id), _dummy_id));
+ EXPECT_EQ(1, concurrent_mutatations_metric_count(getDistributor().getMetrics().removes));
}
-void ExternalOperationHandlerTest::do_not_reject_remove_operations_to_different_ids() {
- assert_second_command_not_rejected_due_to_concurrent_mutation(
+TEST_F(ExternalOperationHandlerTest, do_not_reject_remove_operations_to_different_ids) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_not_rejected_due_to_concurrent_mutation(
makeRemoveCommand("id:foo:testdoctype1::baz"),
- makeRemoveCommand("id:foo:testdoctype1::foo"));
- CPPUNIT_ASSERT_EQUAL(int64_t(0), concurrent_mutatations_metric_count(getDistributor().getMetrics().removes));
+ makeRemoveCommand("id:foo:testdoctype1::foo")));
+ EXPECT_EQ(0, concurrent_mutatations_metric_count(getDistributor().getMetrics().removes));
}
-void ExternalOperationHandlerTest::reject_update_with_concurrent_mutation_to_same_id() {
- assert_second_command_rejected_due_to_concurrent_mutation(
+TEST_F(ExternalOperationHandlerTest, reject_update_with_concurrent_mutation_to_same_id) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_rejected_due_to_concurrent_mutation(
makeUpdateCommand("testdoctype1", _dummy_id),
- makeUpdateCommand("testdoctype1", _dummy_id), _dummy_id);
- CPPUNIT_ASSERT_EQUAL(int64_t(1), concurrent_mutatations_metric_count(getDistributor().getMetrics().updates));
+ makeUpdateCommand("testdoctype1", _dummy_id), _dummy_id));
+ EXPECT_EQ(1, concurrent_mutatations_metric_count(getDistributor().getMetrics().updates));
}
-void ExternalOperationHandlerTest::do_not_reject_update_operations_to_different_ids() {
- assert_second_command_not_rejected_due_to_concurrent_mutation(
+TEST_F(ExternalOperationHandlerTest, do_not_reject_update_operations_to_different_ids) {
+ ASSERT_NO_FATAL_FAILURE(assert_second_command_not_rejected_due_to_concurrent_mutation(
makeUpdateCommand("testdoctype1", "id:foo:testdoctype1::baz"),
- makeUpdateCommand("testdoctype1", "id:foo:testdoctype1::foo"));
- CPPUNIT_ASSERT_EQUAL(int64_t(0), concurrent_mutatations_metric_count(getDistributor().getMetrics().updates));
+ makeUpdateCommand("testdoctype1", "id:foo:testdoctype1::foo")));
+ EXPECT_EQ(0, concurrent_mutatations_metric_count(getDistributor().getMetrics().updates));
}
-void ExternalOperationHandlerTest::operation_destruction_allows_new_mutations_for_id() {
+TEST_F(ExternalOperationHandlerTest, operation_destruction_allows_new_mutations_for_id) {
set_up_distributor_for_sequencing_test();
- Operation::SP generated = start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id));
+ Operation::SP generated;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id), generated));
generated.reset(); // Implicitly release sequencing handle
- start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id));
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id), generated));
}
-void ExternalOperationHandlerTest::concurrent_get_and_mutation_do_not_conflict() {
+TEST_F(ExternalOperationHandlerTest, concurrent_get_and_mutation_do_not_conflict) {
set_up_distributor_for_sequencing_test();
- Operation::SP generated1 = start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id));
+ Operation::SP generated1;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id), generated1));
- start_operation_verify_not_rejected(makeGetCommand(_dummy_id));
+ Operation::SP generated2;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeGetCommand(_dummy_id), generated2));
}
-void ExternalOperationHandlerTest::sequencing_works_across_mutation_types() {
+TEST_F(ExternalOperationHandlerTest, sequencing_works_across_mutation_types) {
set_up_distributor_for_sequencing_test();
- Operation::SP generated = start_operation_verify_not_rejected(makePutCommand("testdoctype1", _dummy_id));
- start_operation_verify_rejected(makeRemoveCommand(_dummy_id));
- start_operation_verify_rejected(makeUpdateCommand("testdoctype1", _dummy_id));
+ Operation::SP generated;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makePutCommand("testdoctype1", _dummy_id), generated));
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_rejected(makeRemoveCommand(_dummy_id)));
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_rejected(makeUpdateCommand("testdoctype1", _dummy_id)));
}
-void ExternalOperationHandlerTest::sequencing_can_be_explicitly_config_disabled() {
+TEST_F(ExternalOperationHandlerTest, sequencing_can_be_explicitly_config_disabled) {
set_up_distributor_for_sequencing_test();
// Should be able to modify config after links have been created, i.e. this is a live config.
getConfig().setSequenceMutatingOperations(false);
- Operation::SP generated = start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id));
+ Operation::SP generated1;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id), generated1));
// Sequencing is disabled, so concurrent op is not rejected.
- start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id));
+ Operation::SP generated2;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(makeRemoveCommand(_dummy_id), generated2));
}
-void ExternalOperationHandlerTest::gets_are_started_with_mutable_db_outside_transition_period() {
+TEST_F(ExternalOperationHandlerTest, gets_are_started_with_mutable_db_outside_transition_period) {
createLinks();
std::string current = "version:1 distributor:1 storage:3";
setupDistributor(1, 3, current);
@@ -560,10 +492,12 @@ void ExternalOperationHandlerTest::gets_are_started_with_mutable_db_outside_tran
document::BucketId b(16, 1234); // Only 1 distributor (us), so doesn't matter
- auto op = start_operation_verify_not_rejected(makeGetCommandForUser(b.withoutCountBits()));
+ Operation::SP op;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(
+ makeGetCommandForUser(b.withoutCountBits()), op));
auto& get_op = dynamic_cast<GetOperation&>(*op);
const auto* expected_space = &getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space());
- CPPUNIT_ASSERT_EQUAL(expected_space, &get_op.bucketSpace());
+ EXPECT_EQ(expected_space, &get_op.bucketSpace());
}
document::BucketId ExternalOperationHandlerTest::set_up_pending_cluster_state_transition(bool read_only_enabled) {
@@ -579,22 +513,24 @@ document::BucketId ExternalOperationHandlerTest::set_up_pending_cluster_state_tr
return findOwned1stNotOwned2ndInStates(current, pending);
}
-void ExternalOperationHandlerTest::gets_are_started_with_read_only_db_during_transition_period() {
+TEST_F(ExternalOperationHandlerTest, gets_are_started_with_read_only_db_during_transition_period) {
auto non_owned_bucket = set_up_pending_cluster_state_transition(true);
- auto op = start_operation_verify_not_rejected(makeGetCommandForUser(non_owned_bucket.withoutCountBits()));
+ Operation::SP op;
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_not_rejected(
+ makeGetCommandForUser(non_owned_bucket.withoutCountBits()), op));
auto& get_op = dynamic_cast<GetOperation&>(*op);
const auto* expected_space = &getReadOnlyBucketSpaceRepo().get(document::FixedBucketSpaces::default_space());
- CPPUNIT_ASSERT_EQUAL(expected_space, &get_op.bucketSpace());
+ EXPECT_EQ(expected_space, &get_op.bucketSpace());
}
-void ExternalOperationHandlerTest::gets_are_busy_bounced_during_transition_period_if_stale_reads_disabled() {
+TEST_F(ExternalOperationHandlerTest, gets_are_busy_bounced_during_transition_period_if_stale_reads_disabled) {
auto non_owned_bucket = set_up_pending_cluster_state_transition(false);
- start_operation_verify_rejected(makeGetCommandForUser(non_owned_bucket.withoutCountBits()));
- CPPUNIT_ASSERT_EQUAL(
- std::string("ReturnCode(BUSY, Currently pending cluster state transition from version 123 to 321)"),
- _sender.replies[0]->getResult().toString());
+ ASSERT_NO_FATAL_FAILURE(start_operation_verify_rejected(
+ makeGetCommandForUser(non_owned_bucket.withoutCountBits())));
+ EXPECT_EQ("ReturnCode(BUSY, Currently pending cluster state transition from version 123 to 321)",
+ _sender.reply(0)->getResult().toString());
}
diff --git a/storage/src/tests/distributor/garbagecollectiontest.cpp b/storage/src/tests/distributor/garbagecollectiontest.cpp
index e2a6bb84065..122a1452632 100644
--- a/storage/src/tests/distributor/garbagecollectiontest.cpp
+++ b/storage/src/tests/distributor/garbagecollectiontest.cpp
@@ -1,42 +1,29 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
#include <vespa/storageapi/message/removelocation.h>
#include <vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h>
#include <vespa/storage/distributor/idealstatemanager.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
-class GarbageCollectionOperationTest : public CppUnit::TestFixture, public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(GarbageCollectionOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
- void testSimple();
-
-public:
- void setUp() override {
+struct GarbageCollectionOperationTest : Test, DistributorTestUtil {
+ void SetUp() override {
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(GarbageCollectionOperationTest);
-
-void
-GarbageCollectionOperationTest::testSimple()
-{
+TEST_F(GarbageCollectionOperationTest, simple) {
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "0=250/50/300,1=250/50/300");
getConfig().setGarbageCollection("music.date < 34", 3600);
@@ -48,34 +35,30 @@ GarbageCollectionOperationTest::testSimple()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+ ASSERT_EQ(2, _sender.commands().size());
getClock().setAbsoluteTimeInSeconds(34);
for (uint32_t i = 0; i < 2; ++i) {
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[i];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::REMOVELOCATION);
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(i);
+ ASSERT_EQ(msg->getType(), api::MessageType::REMOVELOCATION);
- api::RemoveLocationCommand* tmp = (api::RemoveLocationCommand*)msg.get();
- CPPUNIT_ASSERT_EQUAL(vespalib::string("music.date < 34"),
- tmp->getDocumentSelection());
+ auto& tmp = dynamic_cast<api::RemoveLocationCommand&>(*msg);
+ EXPECT_EQ("music.date < 34", tmp.getDocumentSelection());
- std::shared_ptr<api::StorageReply> reply(tmp->makeReply().release());
- api::RemoveLocationReply* sreply = (api::RemoveLocationReply*)reply.get();
- sreply->setBucketInfo(api::BucketInfo(666, 90, 500));
+ std::shared_ptr<api::StorageReply> reply(tmp.makeReply());
+ auto& sreply = dynamic_cast<api::RemoveLocationReply&>(*reply);
+ sreply.setBucketInfo(api::BucketInfo(666, 90, 500));
op.receive(_sender, reply);
}
BucketDatabase::Entry entry = getBucket(document::BucketId(16, 1));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL(2, (int)entry->getNodeCount());
- CPPUNIT_ASSERT_EQUAL(34, (int)entry->getLastGarbageCollectionTime());
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
- entry->getNodeRef(0).getBucketInfo());
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
- entry->getNodeRef(1).getBucketInfo());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(2, entry->getNodeCount());
+ EXPECT_EQ(34, entry->getLastGarbageCollectionTime());
+ EXPECT_EQ(api::BucketInfo(666, 90, 500), entry->getNodeRef(0).getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(666, 90, 500), entry->getNodeRef(1).getBucketInfo());
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp
index 064348539bf..4b67cf1963d 100644
--- a/storage/src/tests/distributor/getoperationtest.cpp
+++ b/storage/src/tests/distributor/getoperationtest.cpp
@@ -6,121 +6,101 @@
#include <vespa/storage/distributor/externaloperationhandler.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/storage/distributor/distributormetricsset.h>
+#include <vespa/storage/distributor/operations/external/getoperation.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <iomanip>
-#include <vespa/storage/distributor/operations/external/getoperation.h>
using std::shared_ptr;
using config::ConfigGetter;
using document::DocumenttypesConfig;
using config::FileSpec;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage::distributor {
-class GetOperationTest : public CppUnit::TestFixture, public DistributorTestUtil {
- CPPUNIT_TEST_SUITE(GetOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testNotFound);
- CPPUNIT_TEST(testResendOnStorageFailure);
- CPPUNIT_TEST(testResendOnStorageFailureAllFail);
- CPPUNIT_TEST(testSendToIdealCopyIfBucketInSync);
- CPPUNIT_TEST(testReturnNotFoundWhenBucketNotInDb);
- CPPUNIT_TEST(testAskAllNodesIfBucketIsInconsistent);
- CPPUNIT_TEST(testSendToAllInvalidNodesWhenInconsistent);
- CPPUNIT_TEST(testAskTrustedNodeIfBucketIsInconsistent);
- CPPUNIT_TEST(testInconsistentSplit); // Test that we ask all nodes if a bucket is inconsistent.
- CPPUNIT_TEST(testSendToAllInvalidCopies);
- CPPUNIT_TEST(testMultiInconsistentBucket);
- CPPUNIT_TEST(testMultiInconsistentBucketFail);
- CPPUNIT_TEST(testMultiInconsistentBucketNotFound);
- CPPUNIT_TEST(testMultiInconsistentBucketNotFoundDeleted);
- CPPUNIT_TEST(testMultipleCopiesWithFailureOnLocalNode);
- CPPUNIT_TEST(canGetDocumentsWhenAllReplicaNodesRetired);
- CPPUNIT_TEST_SUITE_END();
+struct GetOperationTest : Test, DistributorTestUtil {
std::shared_ptr<const document::DocumentTypeRepo> _repo;
-
-public:
document::DocumentId docId;
document::BucketId bucketId;
std::unique_ptr<Operation> op;
- void setUp() override {
+ GetOperationTest();
+ ~GetOperationTest();
+
+ void SetUp() override {
_repo.reset(
new document::DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>::
getConfig("config-doctypes",
- FileSpec(TEST_PATH("config-doctypes.cfg")))));
+ FileSpec("../config-doctypes.cfg"))));
createLinks();
docId = document::DocumentId(document::DocIdString("test", "uri"));
bucketId = getExternalOperationHandler().getBucketId(docId);
};
- void tearDown() override {
+ void TearDown() override {
close();
op.reset();
}
void sendGet() {
- std::shared_ptr<api::GetCommand> msg(
- new api::GetCommand(makeDocumentBucket(document::BucketId(0)), docId, "[all]"));
-
- op.reset(new GetOperation(getExternalOperationHandler(),
- getDistributorBucketSpace(),
- msg,
- getDistributor().getMetrics().
- gets[msg->getLoadType()]));
+ auto msg = std::make_shared<api::GetCommand>(makeDocumentBucket(document::BucketId(0)), docId, "[all]");
+ op = std::make_unique<GetOperation>(
+ getExternalOperationHandler(), getDistributorBucketSpace(),
+ msg, getDistributor().getMetrics(). gets[msg->getLoadType()]);
op->start(_sender, framework::MilliSecTime(0));
}
+ static constexpr uint32_t LastCommand = UINT32_MAX;
+
void sendReply(uint32_t idx,
api::ReturnCode::Result result,
std::string authorVal, uint32_t timestamp)
{
- if (idx == (uint32_t)-1) {
- idx = _sender.commands.size() - 1;
+ if (idx == LastCommand) {
+ idx = _sender.commands().size() - 1;
}
- std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[idx];
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GET, msg2->getType());
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(idx);
+ ASSERT_EQ(api::MessageType::GET, msg2->getType());
- api::GetCommand* tmp = static_cast<api::GetCommand*>(msg2.get());
+ auto* tmp = static_cast<api::GetCommand*>(msg2.get());
document::Document::SP doc;
- if (authorVal.length()) {
+ if (!authorVal.empty()) {
const document::DocumentType* type(_repo->getDocumentType("text/html"));
- doc = document::Document::SP(
- new document::Document(*type, docId));
+ doc = std::make_unique<document::Document>(*type, docId);
doc->setValue(doc->getField("author"),
document::StringFieldValue(authorVal));
}
- api::GetReply* reply = new api::GetReply(*tmp, doc, timestamp);
+ auto reply = std::make_shared<api::GetReply>(*tmp, doc, timestamp);
reply->setResult(result);
- op->receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ op->receive(_sender, reply);
}
void replyWithFailure() {
- sendReply(-1, api::ReturnCode::IO_FAILURE, "", 0);
+ sendReply(LastCommand, api::ReturnCode::IO_FAILURE, "", 0);
}
void replyWithNotFound() {
- sendReply(-1, api::ReturnCode::OK, "", 0);
+ sendReply(LastCommand, api::ReturnCode::OK, "", 0);
}
void replyWithDocument() {
- sendReply(-1, api::ReturnCode::OK, "foo", 100);
+ sendReply(LastCommand, api::ReturnCode::OK, "foo", 100);
}
std::string getLastReplyAuthor() {
- api::StorageMessage& msg = *_sender.replies[_sender.replies.size() - 1];
+ api::StorageMessage& msg = *_sender.replies().back();
if (msg.getType() == api::MessageType::GET_REPLY) {
document::Document::SP doc(
@@ -137,147 +117,104 @@ public:
void setClusterState(const std::string& clusterState) {
enableDistributorClusterState(clusterState);
}
-
- void testSimple();
- void testReturnNotFoundWhenBucketNotInDb();
- void testNotFound();
- void testResendOnStorageFailure();
- void testResendOnStorageFailureAllFail();
- void testSendToIdealCopyIfBucketInSync();
- void testAskAllNodesIfBucketIsInconsistent();
- void testSendToAllInvalidNodesWhenInconsistent();
- void testAskTrustedNodeIfBucketIsInconsistent();
- void testInconsistentSplit();
- void testMultiInconsistentBucket();
- void testMultiInconsistentBucketFail();
- void testMultiInconsistentBucketNotFound();
- void testMultiInconsistentBucketNotFoundDeleted();
- void testSendToAllInvalidCopies();
- void testMultipleCopiesWithFailureOnLocalNode();
- void canGetDocumentsWhenAllReplicaNodesRetired();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(GetOperationTest);
+GetOperationTest::GetOperationTest() = default;
+GetOperationTest::~GetOperationTest() = default;
-void
-GetOperationTest::testSimple()
-{
+TEST_F(GetOperationTest, simple) {
setClusterState("distributor:1 storage:2");
addNodesToBucketDB(bucketId, "0=4,1=4");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0", _sender.getCommands(true));
- replyWithDocument();
+ ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100) ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testAskTrustedNodeIfBucketIsInconsistent()
-{
+TEST_F(GetOperationTest, ask_trusted_node_if_bucket_is_inconsistent) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100/3/10,1=200/4/12/t");
sendGet();
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 1", _sender.getCommands(true));
- replyWithDocument();
+ ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100) ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testAskAllNodesIfBucketIsInconsistent()
-{
+TEST_F(GetOperationTest, ask_all_nodes_if_bucket_is_inconsistent) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100/3/10,1=200/4/12");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newauthor", getLastReplyAuthor());
}
-
-void
-GetOperationTest::testSendToAllInvalidCopies()
-{
+TEST_F(GetOperationTest, send_to_all_invalid_copies) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "2=0/0/1,3=0/0/1");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 2,Get => 3"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 2,Get => 3", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newauthor", getLastReplyAuthor());
}
-void
-GetOperationTest::testSendToAllInvalidNodesWhenInconsistent()
-{
+TEST_F(GetOperationTest, send_to_all_invalid_nodes_when_inconsistent) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100,1=200,2=0/0/1,3=0/0/1");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 2,Get => 3,Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 2,Get => 3,Get => 0,Get => 1",
+ _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
- sendReply(2, api::ReturnCode::OK, "oldauthor", 1);
- sendReply(3, api::ReturnCode::OK, "oldauthor", 1);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
+ ASSERT_NO_FATAL_FAILURE(sendReply(2, api::ReturnCode::OK, "oldauthor", 1));
+ ASSERT_NO_FATAL_FAILURE(sendReply(3, api::ReturnCode::OK, "oldauthor", 1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newauthor", getLastReplyAuthor());
}
-void
-GetOperationTest::testInconsistentSplit()
-{
+TEST_F(GetOperationTest, inconsistent_split) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(document::BucketId(16, 0x2a52), "0=100");
@@ -285,162 +222,126 @@ GetOperationTest::testInconsistentSplit()
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newauthor", getLastReplyAuthor());
}
-
-void
-GetOperationTest::testMultiInconsistentBucketNotFound()
-{
+TEST_F(GetOperationTest, multi_inconsistent_bucket_not_found) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "", 0);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "", 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testMultiInconsistentBucketNotFoundDeleted()
-{
+TEST_F(GetOperationTest, multi_inconsistent_bucket_not_found_deleted) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
// This signifies that the latest change was that the document was deleted
// at timestamp 3.
- sendReply(1, api::ReturnCode::OK, "", 3);
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "", 3));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 3) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 3) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testMultiInconsistentBucket()
-{
+TEST_F(GetOperationTest, multi_inconsistent_bucket) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 2);
- sendReply(1, api::ReturnCode::OK, "oldauthor", 1);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "oldauthor", 1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 2) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 2) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newauthor", getLastReplyAuthor());
}
-void
-GetOperationTest::testMultiInconsistentBucketFail()
-{
+TEST_F(GetOperationTest, multi_inconsistent_bucket_fail) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "0=100,2=100,1=200,3=200");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
- sendReply(0, api::ReturnCode::OK, "newauthor", 1);
- sendReply(1, api::ReturnCode::DISK_FAILURE, "", 0);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 1));
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::DISK_FAILURE, "", 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 3"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 3",
+ _sender.getLastCommand());
- replyWithDocument();
+ ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-
-void
-GetOperationTest::testReturnNotFoundWhenBucketNotInDb()
-{
+TEST_F(GetOperationTest, return_not_found_when_bucket_not_in_db) {
setClusterState("distributor:1 storage:1");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 0) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testNotFound()
-{
+TEST_F(GetOperationTest, not_found) {
setClusterState("distributor:1 storage:1");
addNodesToBucketDB(bucketId, "0=100");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 0"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 0",
+ _sender.getLastCommand());
- replyWithNotFound();
+ ASSERT_NO_FATAL_FAILURE(replyWithNotFound());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 0) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(1, (int)(getDistributor().
- getMetrics().gets[documentapi::LoadType::DEFAULT].
- failures.notfound.getValue()));
+ EXPECT_EQ(1, getDistributor().getMetrics().gets[documentapi::LoadType::DEFAULT].
+ failures.notfound.getValue());
}
-void
-GetOperationTest::testResendOnStorageFailure()
-{
+TEST_F(GetOperationTest, resend_on_storage_failure) {
setClusterState("distributor:1 storage:3");
// Add two nodes that are not trusted. GET should retry each one of them
@@ -449,27 +350,22 @@ GetOperationTest::testResendOnStorageFailure()
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ _sender.getLastCommand());
- replyWithFailure();
+ ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2",
+ _sender.getLastCommand());
- replyWithDocument();
+ ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testResendOnStorageFailureAllFail()
-{
+TEST_F(GetOperationTest, resend_on_storage_failure_all_fail) {
setClusterState("distributor:1 storage:3");
// Add two nodes that are not trusted. GET should retry each one of them
@@ -478,27 +374,22 @@ GetOperationTest::testResendOnStorageFailureAllFail()
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ _sender.getLastCommand());
- replyWithFailure();
+ ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 2",
+ _sender.getLastCommand());
- replyWithFailure();
+ ASSERT_NO_FATAL_FAILURE(replyWithFailure());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 0) ReturnCode(IO_FAILURE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 0) ReturnCode(IO_FAILURE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testSendToIdealCopyIfBucketInSync()
-{
+TEST_F(GetOperationTest, send_to_ideal_copy_if_bucket_in_sync) {
setClusterState("distributor:1 storage:4");
addNodesToBucketDB(bucketId, "1=100,2=100,3=100");
@@ -506,21 +397,17 @@ GetOperationTest::testSendToIdealCopyIfBucketInSync()
sendGet();
// Should always send to node 1 (follow bucket db order)
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1"),
- _sender.getLastCommand());
+ ASSERT_EQ("Get(BucketId(0x4000000000002a52), doc:test:uri) => 1",
+ _sender.getLastCommand());
- replyWithDocument();
+ ASSERT_NO_FATAL_FAILURE(replyWithDocument());
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-GetOperationTest::testMultipleCopiesWithFailureOnLocalNode()
-{
+TEST_F(GetOperationTest, multiple_copies_with_failure_on_local_node) {
setClusterState("distributor:1 storage:4");
// Node 0 is local copy to distributor 0 and will be preferred when
@@ -529,39 +416,30 @@ GetOperationTest::testMultipleCopiesWithFailureOnLocalNode()
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0", _sender.getCommands(true));
// Fail local node; no reply must be sent yet since we've got more nodes
// to try.
- sendReply(0, api::ReturnCode::TIMEOUT, "", 0);
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::TIMEOUT, "", 0));
// Retry with remaining copy on node 2.
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0,Get => 2"),
- _sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 2", _sender.getCommands(true));
- sendReply(1, api::ReturnCode::OK, "newestauthor", 3);
+ ASSERT_NO_FATAL_FAILURE(sendReply(1, api::ReturnCode::OK, "newestauthor", 3));
- CPPUNIT_ASSERT_EQUAL(
- std::string("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 3) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("GetReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 3) ReturnCode(NONE)",
+ _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(std::string("newestauthor"), getLastReplyAuthor());
+ EXPECT_EQ("newestauthor", getLastReplyAuthor());
}
-void
-GetOperationTest::canGetDocumentsWhenAllReplicaNodesRetired()
-{
+TEST_F(GetOperationTest, can_get_documents_when_all_replica_nodes_retired) {
setClusterState("distributor:1 storage:2 .0.s:r .1.s:r");
addNodesToBucketDB(bucketId, "0=4,1=4");
sendGet();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get => 0"),
- _sender.getCommands(true));
+ EXPECT_EQ("Get => 0", _sender.getCommands(true));
}
}
diff --git a/storage/src/tests/distributor/idealstatemanagertest.cpp b/storage/src/tests/distributor/idealstatemanagertest.cpp
index 7401e083900..fc26a8c9cce 100644
--- a/storage/src/tests/distributor/idealstatemanagertest.cpp
+++ b/storage/src/tests/distributor/idealstatemanagertest.cpp
@@ -1,5 +1,4 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storage/distributor/bucketdbupdater.h>
@@ -12,81 +11,68 @@
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/document/test/make_bucket_space.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
using document::test::makeBucketSpace;
using document::FixedBucketSpaces;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
-class IdealStateManagerTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
-public:
+struct IdealStateManagerTest : Test, DistributorTestUtil {
IdealStateManagerTest()
- : CppUnit::TestFixture(),
- DistributorTestUtil(),
- _bucketSpaces()
+ : Test(),
+ DistributorTestUtil(),
+ _bucketSpaces()
{}
- void setUp() override {
+ void SetUp() override {
createLinks();
_bucketSpaces = getBucketSpaces();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
- void testSibling();
- void testClearActiveOnNodeDown();
- void testRecheckWhenActive();
- void testRecheckWhenPending();
- void testOpsGenerationBusy();
- void testStatusPage();
- void testDisabledStateChecker();
- void testBlockIdealStateOpsOnFullRequestBucketInfo();
- void testBlockCheckForAllOperationsToSpecificBucket();
-
void setSystemState(const lib::ClusterState& systemState) {
_distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
}
- CPPUNIT_TEST_SUITE(IdealStateManagerTest);
- CPPUNIT_TEST(testSibling);
- CPPUNIT_TEST(testClearActiveOnNodeDown);
- CPPUNIT_TEST(testRecheckWhenActive);
- CPPUNIT_TEST(testStatusPage);
- CPPUNIT_TEST(testDisabledStateChecker);
- CPPUNIT_TEST(testBlockIdealStateOpsOnFullRequestBucketInfo);
- CPPUNIT_TEST(testBlockCheckForAllOperationsToSpecificBucket);
- CPPUNIT_TEST_SUITE_END();
-private:
+ bool checkBlock(const IdealStateOperation& op,
+ const document::Bucket& bucket,
+ const PendingMessageTracker& tracker) const
+ {
+ return op.checkBlock(bucket, tracker);
+ }
+
+ bool checkBlockForAllNodes(const IdealStateOperation& op,
+ const document::Bucket& bucket,
+ const PendingMessageTracker& tracker) const
+ {
+ return op.checkBlockForAllNodes(bucket, tracker);
+ }
+
std::vector<document::BucketSpace> _bucketSpaces;
std::string makeBucketStatusString(const std::string &defaultSpaceBucketStatus);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(IdealStateManagerTest);
-
-void
-IdealStateManagerTest::testSibling()
-{
- CPPUNIT_ASSERT_EQUAL(document::BucketId(1,1),
- getIdealStateManager().getDistributorComponent()
- .getSibling(document::BucketId(1, 0)));
- CPPUNIT_ASSERT_EQUAL(document::BucketId(1,0),
- getIdealStateManager().getDistributorComponent()
- .getSibling(document::BucketId(1, 1)));
- CPPUNIT_ASSERT_EQUAL(document::BucketId(2,3),
- getIdealStateManager().getDistributorComponent()
- .getSibling(document::BucketId(2, 1)));
- CPPUNIT_ASSERT_EQUAL(document::BucketId(2,1),
- getIdealStateManager().getDistributorComponent()
- .getSibling(document::BucketId(2, 3)));
+TEST_F(IdealStateManagerTest, sibling) {
+ EXPECT_EQ(document::BucketId(1,1),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(1, 0)));
+ EXPECT_EQ(document::BucketId(1,0),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(1, 1)));
+ EXPECT_EQ(document::BucketId(2,3),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(2, 1)));
+ EXPECT_EQ(document::BucketId(2,1),
+ getIdealStateManager().getDistributorComponent()
+ .getSibling(document::BucketId(2, 3)));
}
-void
-IdealStateManagerTest::testStatusPage() {
+TEST_F(IdealStateManagerTest, status_page) {
close();
getDirConfig().getConfig("stor-distributormanager").set("splitsize", "100");
getDirConfig().getConfig("stor-distributormanager").set("splitcount", "1000000");
@@ -101,15 +87,14 @@ IdealStateManagerTest::testStatusPage() {
std::ostringstream ost;
getIdealStateManager().getBucketStatus(ost);
- CPPUNIT_ASSERT_EQUAL(makeBucketStatusString("BucketId(0x4000000000000002) : [node(idx=0,crc=0xff,docs=10/10,bytes=10/10,trusted=true,active=true,ready=false)]<br>\n"
+ EXPECT_EQ(makeBucketStatusString("BucketId(0x4000000000000002) : [node(idx=0,crc=0xff,docs=10/10,bytes=10/10,trusted=true,active=true,ready=false)]<br>\n"
"<b>BucketId(0x4000000000000005):</b> <i> : split: [Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) is "
"higher than the configured limit of (100, 1000000)]</i> [node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,"
"active=true,ready=false)]<br>\n"),
- ost.str());
+ ost.str());
}
-void
-IdealStateManagerTest::testDisabledStateChecker() {
+TEST_F(IdealStateManagerTest, disabled_state_checker) {
setupDistributor(1, 1, "distributor:1 storage:1");
getConfig().setSplitSize(100);
@@ -122,7 +107,7 @@ IdealStateManagerTest::testDisabledStateChecker() {
std::ostringstream ost;
getIdealStateManager().getBucketStatus(ost);
- CPPUNIT_ASSERT_EQUAL(makeBucketStatusString(
+ EXPECT_EQ(makeBucketStatusString(
"BucketId(0x4000000000000002) : [node(idx=0,crc=0xff,docs=10/10,bytes=10/10,trusted=true,active=true,ready=false)]<br>\n"
"<b>BucketId(0x4000000000000005):</b> <i> : split: [Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) is "
"higher than the configured limit of (100, 1000000)]</i> [node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,"
@@ -130,14 +115,11 @@ IdealStateManagerTest::testDisabledStateChecker() {
ost.str());
tick();
- CPPUNIT_ASSERT_EQUAL(std::string(""),
- _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("", _distributor->getActiveIdealStateOperations());
}
-void
-IdealStateManagerTest::testClearActiveOnNodeDown()
-{
+TEST_F(IdealStateManagerTest, clear_active_on_node_down) {
setSystemState(lib::ClusterState("distributor:1 storage:3"));
for (int i = 1; i < 4; i++) {
insertBucketInfo(document::BucketId(16, i), 0, 0xff, 100, 200);
@@ -152,24 +134,19 @@ IdealStateManagerTest::testClearActiveOnNodeDown()
tick();
}
- CPPUNIT_ASSERT_EQUAL(
- std::string("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"
- "setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)) (pri 100)\n"
- "setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)) (pri 100)\n"),
- _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"
+ "setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)) (pri 100)\n"
+ "setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)) (pri 100)\n",
+ _distributor->getActiveIdealStateOperations());
setSystemState(lib::ClusterState("distributor:1 storage:3 .0.s:d"));
- CPPUNIT_ASSERT_EQUAL(std::string(""),
- _distributor->getActiveIdealStateOperations());
- CPPUNIT_ASSERT_EQUAL(uint32_t(0),
- _distributor->getPendingMessageTracker()
- .getNodeInfo().getPendingCount(0));
+ EXPECT_EQ("", _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ(0, _distributor->getPendingMessageTracker()
+ .getNodeInfo().getPendingCount(0));
}
-void
-IdealStateManagerTest::testRecheckWhenActive()
-{
+TEST_F(IdealStateManagerTest, recheck_when_active) {
for (uint32_t j = 0; j < 3; j++) {
insertBucketInfo(document::BucketId(16, 1), j, 0xff - j, 100, 200);
}
@@ -178,26 +155,21 @@ IdealStateManagerTest::testRecheckWhenActive()
tick();
- CPPUNIT_ASSERT_EQUAL(
- std::string("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"),
- _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
+ _distributor->getActiveIdealStateOperations());
tick();
- CPPUNIT_ASSERT_EQUAL(
- std::string("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"),
- _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
+ _distributor->getActiveIdealStateOperations());
tick();
- CPPUNIT_ASSERT_EQUAL(
- std::string("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n"),
- _distributor->getActiveIdealStateOperations());
+ EXPECT_EQ("setbucketstate to [0] Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)) (pri 100)\n",
+ _distributor->getActiveIdealStateOperations());
}
-void
-IdealStateManagerTest::testBlockIdealStateOpsOnFullRequestBucketInfo()
-{
+TEST_F(IdealStateManagerTest, block_ideal_state_ops_on_full_request_bucket_info) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
@@ -209,45 +181,39 @@ IdealStateManagerTest::testBlockIdealStateOpsOnFullRequestBucketInfo()
// RequestBucketInfoCommand does not have a specific bucketid since it's
// sent to the entire node. It will then use a null bucketid.
{
- std::shared_ptr<api::RequestBucketInfoCommand> msg(
- new api::RequestBucketInfoCommand(makeBucketSpace(), buckets));
- msg->setAddress(
- api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 4));
+ auto msg = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), buckets);
+ msg->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 4));
tracker.insert(msg);
}
{
RemoveBucketOperation op("storage",
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 4)));
- CPPUNIT_ASSERT(op.isBlocked(tracker));
+ EXPECT_TRUE(op.isBlocked(tracker));
}
{
// Don't trigger on requests to other nodes.
RemoveBucketOperation op("storage",
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 5)));
- CPPUNIT_ASSERT(!op.isBlocked(tracker));
+ EXPECT_FALSE(op.isBlocked(tracker));
}
// Don't block on null-bucket messages that aren't RequestBucketInfo.
{
- std::shared_ptr<api::CreateVisitorCommand> msg(
- new api::CreateVisitorCommand(makeBucketSpace(), "foo", "bar", "baz"));
- msg->setAddress(
- api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 7));
+ auto msg = std::make_shared<api::CreateVisitorCommand>(makeBucketSpace(), "foo", "bar", "baz");
+ msg->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 7));
tracker.insert(msg);
}
{
RemoveBucketOperation op("storage",
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
- CPPUNIT_ASSERT(!op.isBlocked(tracker));
+ EXPECT_FALSE(op.isBlocked(tracker));
}
}
-void
-IdealStateManagerTest::testBlockCheckForAllOperationsToSpecificBucket()
-{
+TEST_F(IdealStateManagerTest, block_check_for_all_operations_to_specific_bucket) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
PendingMessageTracker tracker(_node->getComponentRegister());
@@ -263,9 +229,9 @@ IdealStateManagerTest::testBlockCheckForAllOperationsToSpecificBucket()
RemoveBucketOperation op("storage",
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
// Not blocked for exact node match.
- CPPUNIT_ASSERT(!op.checkBlock(makeDocumentBucket(bid), tracker));
+ EXPECT_FALSE(checkBlock(op, makeDocumentBucket(bid), tracker));
// But blocked for bucket match!
- CPPUNIT_ASSERT(op.checkBlockForAllNodes(makeDocumentBucket(bid), tracker));
+ EXPECT_TRUE(checkBlockForAllNodes(op, makeDocumentBucket(bid), tracker));
}
}
@@ -282,6 +248,4 @@ IdealStateManagerTest::makeBucketStatusString(const std::string &defaultSpaceBuc
return ost.str();
}
-} // distributor
-} // storage
-
+} // storage::distributor
diff --git a/storage/src/tests/distributor/joinbuckettest.cpp b/storage/src/tests/distributor/joinbuckettest.cpp
index 42ba0c0c0b9..a918a29c609 100644
--- a/storage/src/tests/distributor/joinbuckettest.cpp
+++ b/storage/src/tests/distributor/joinbuckettest.cpp
@@ -1,47 +1,33 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/storage/distributor/operations/idealstate/joinoperation.h>
#include <vespa/storage/distributor/distributor.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
-
-class JoinOperationTest : public CppUnit::TestFixture, public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(JoinOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(sendSparseJoinsToNodesWithoutBothSourceBuckets);
- CPPUNIT_TEST_SUITE_END();
+namespace storage::distributor {
+struct JoinOperationTest : Test, DistributorTestUtil {
void checkSourceBucketsAndSendReply(
JoinOperation& op,
size_t msgIndex,
const std::vector<document::BucketId>& wantedIds);
-protected:
- void testSimple();
- void sendSparseJoinsToNodesWithoutBothSourceBuckets();
-
-public:
- void setUp() override {
+ void SetUp() override {
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(JoinOperationTest);
-
-void
-JoinOperationTest::testSimple()
-{
+TEST_F(JoinOperationTest, simple) {
getConfig().setJoinCount(100);
getConfig().setJoinSize(1000);
@@ -61,14 +47,13 @@ JoinOperationTest::testSimple()
checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}});
- CPPUNIT_ASSERT(!getBucket(document::BucketId(33, 0x100000001)).valid());
- CPPUNIT_ASSERT(!getBucket(document::BucketId(33, 1)).valid());
+ EXPECT_FALSE(getBucket(document::BucketId(33, 0x100000001)).valid());
+ EXPECT_FALSE(getBucket(document::BucketId(33, 1)).valid());
BucketDatabase::Entry entry = getBucket(document::BucketId(32, 0));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(666, 90, 500),
- entry->getNodeRef(0).getBucketInfo());
+ ASSERT_TRUE(entry.valid());
+ EXPECT_EQ(0, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(api::BucketInfo(666, 90, 500), entry->getNodeRef(0).getBucketInfo());
}
void
@@ -77,18 +62,16 @@ JoinOperationTest::checkSourceBucketsAndSendReply(
size_t msgIndex,
const std::vector<document::BucketId>& wantedIds)
{
- CPPUNIT_ASSERT(_sender.commands.size() > msgIndex);
+ ASSERT_GT(_sender.commands().size(), msgIndex);
- std::shared_ptr<api::StorageCommand> msg(_sender.commands[msgIndex]);
- CPPUNIT_ASSERT_EQUAL(api::MessageType::JOINBUCKETS, msg->getType());
+ std::shared_ptr<api::StorageCommand> msg(_sender.command(msgIndex));
+ ASSERT_EQ(api::MessageType::JOINBUCKETS, msg->getType());
- api::JoinBucketsCommand& joinCmd(
- dynamic_cast<api::JoinBucketsCommand&>(*msg));
- CPPUNIT_ASSERT_EQUAL(wantedIds, joinCmd.getSourceBuckets());
+ auto& joinCmd = dynamic_cast<api::JoinBucketsCommand&>(*msg);
+ EXPECT_THAT(joinCmd.getSourceBuckets(), ContainerEq(wantedIds));
std::shared_ptr<api::StorageReply> reply(joinCmd.makeReply());
- api::JoinBucketsReply& sreply(
- dynamic_cast<api::JoinBucketsReply&>(*reply));
+ auto& sreply = dynamic_cast<api::JoinBucketsReply&>(*reply);
sreply.setBucketInfo(api::BucketInfo(666, 90, 500));
op.receive(_sender, reply);
@@ -99,9 +82,7 @@ JoinOperationTest::checkSourceBucketsAndSendReply(
* bucket id used as both source buckets) for those nodes having only one of
* the buckets.
*/
-void
-JoinOperationTest::sendSparseJoinsToNodesWithoutBothSourceBuckets()
-{
+TEST_F(JoinOperationTest, send_sparse_joins_to_nodes_without_both_source_buckets) {
getConfig().setJoinCount(100);
getConfig().setJoinSize(1000);
@@ -119,10 +100,8 @@ JoinOperationTest::sendSparseJoinsToNodesWithoutBothSourceBuckets()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}});
- checkSourceBucketsAndSendReply(op, 1, {{33, 1}, {33, 1}});
-}
-
+ ASSERT_NO_FATAL_FAILURE(checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}}));
+ ASSERT_NO_FATAL_FAILURE(checkSourceBucketsAndSendReply(op, 1, {{33, 1}, {33, 1}}));
}
}
diff --git a/storage/src/tests/distributor/maintenancemocks.h b/storage/src/tests/distributor/maintenancemocks.h
index 2be74ca1a8b..c88e477e90e 100644
--- a/storage/src/tests/distributor/maintenancemocks.h
+++ b/storage/src/tests/distributor/maintenancemocks.h
@@ -11,8 +11,7 @@
using document::test::makeBucketSpace;
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
class MockMaintenancePriorityGenerator
: public MaintenancePriorityGenerator
@@ -116,5 +115,3 @@ public:
};
}
-}
-
diff --git a/storage/src/tests/distributor/maintenanceschedulertest.cpp b/storage/src/tests/distributor/maintenanceschedulertest.cpp
index db0347617f0..53408bbf6b6 100644
--- a/storage/src/tests/distributor/maintenanceschedulertest.cpp
+++ b/storage/src/tests/distributor/maintenanceschedulertest.cpp
@@ -1,110 +1,78 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <string>
-#include <sstream>
-#include <memory>
+#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/storage/distributor/maintenance/maintenancescheduler.h>
#include <vespa/storage/bucketdb/mapbucketdatabase.h>
#include <tests/distributor/maintenancemocks.h>
-#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <memory>
+#include <string>
+#include <sstream>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-
-namespace distributor {
+namespace storage::distributor {
using document::BucketId;
-typedef MaintenancePriority Priority;
-typedef MaintenanceScheduler::WaitTimeMs WaitTimeMs;
-
-class MaintenanceSchedulerTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(MaintenanceSchedulerTest);
- CPPUNIT_TEST(testPriorityClearedAfterScheduled);
- CPPUNIT_TEST(testOperationIsScheduled);
- CPPUNIT_TEST(testNoOperationsToSchedule);
- CPPUNIT_TEST(testSuppressLowPrioritiesInEmergencyMode);
- CPPUNIT_TEST(testPriorityNotClearedIfOperationNotStarted);
- CPPUNIT_TEST_SUITE_END();
+using Priority = MaintenancePriority;
+using WaitTimeMs = MaintenanceScheduler::WaitTimeMs;
+struct MaintenanceSchedulerTest : Test {
std::unique_ptr<SimpleBucketPriorityDatabase> _priorityDb;
std::unique_ptr<MockMaintenanceOperationGenerator> _operationGenerator;
std::unique_ptr<MockOperationStarter> _operationStarter;
std::unique_ptr<MaintenanceScheduler> _scheduler;
- void addBucketToDb(int bucketNum);
-public:
- void testPriorityClearedAfterScheduled();
- void testOperationIsScheduled();
- void testNoOperationsToSchedule();
- void testSuppressLowPrioritiesInEmergencyMode();
- void testPriorityNotClearedIfOperationNotStarted();
-
- void setUp() override;
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MaintenanceSchedulerTest);
-
void
-MaintenanceSchedulerTest::setUp()
+MaintenanceSchedulerTest::SetUp()
{
- _priorityDb.reset(new SimpleBucketPriorityDatabase());
- _operationGenerator.reset(new MockMaintenanceOperationGenerator());
- _operationStarter.reset(new MockOperationStarter());
- _scheduler.reset(new MaintenanceScheduler(*_operationGenerator,
- *_priorityDb,
- *_operationStarter));
+ _priorityDb = std::make_unique<SimpleBucketPriorityDatabase>();
+ _operationGenerator = std::make_unique<MockMaintenanceOperationGenerator>();
+ _operationStarter = std::make_unique<MockOperationStarter>();
+ _scheduler = std::make_unique<MaintenanceScheduler>(*_operationGenerator, *_priorityDb, *_operationStarter);
}
-void
-MaintenanceSchedulerTest::testPriorityClearedAfterScheduled()
-{
+TEST_F(MaintenanceSchedulerTest, priority_cleared_after_scheduled) {
_priorityDb->setPriority(PrioritizedBucket(makeDocumentBucket(BucketId(16, 1)), Priority::VERY_HIGH));
_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE);
- CPPUNIT_ASSERT_EQUAL(std::string(), _priorityDb->toString());
+ EXPECT_EQ("", _priorityDb->toString());
}
-void
-MaintenanceSchedulerTest::testOperationIsScheduled()
-{
+TEST_F(MaintenanceSchedulerTest, operation_is_scheduled) {
_priorityDb->setPriority(PrioritizedBucket(makeDocumentBucket(BucketId(16, 1)), Priority::MEDIUM));
_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE);
- CPPUNIT_ASSERT_EQUAL(std::string("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 100\n"),
- _operationStarter->toString());
+ EXPECT_EQ("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 100\n",
+ _operationStarter->toString());
}
-void
-MaintenanceSchedulerTest::testNoOperationsToSchedule()
-{
+TEST_F(MaintenanceSchedulerTest, no_operations_toschedule) {
WaitTimeMs waitMs(_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE));
- CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), waitMs);
- CPPUNIT_ASSERT_EQUAL(std::string(), _operationStarter->toString());
+ EXPECT_EQ(WaitTimeMs(1), waitMs);
+ EXPECT_EQ("", _operationStarter->toString());
}
-void
-MaintenanceSchedulerTest::testSuppressLowPrioritiesInEmergencyMode()
-{
+TEST_F(MaintenanceSchedulerTest, suppress_low_priorities_in_emergency_mode) {
_priorityDb->setPriority(PrioritizedBucket(makeDocumentBucket(BucketId(16, 1)), Priority::HIGH));
_priorityDb->setPriority(PrioritizedBucket(makeDocumentBucket(BucketId(16, 2)), Priority::VERY_HIGH));
- CPPUNIT_ASSERT_EQUAL(WaitTimeMs(0), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
- CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
- CPPUNIT_ASSERT_EQUAL(std::string("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)), pri 0\n"),
- _operationStarter->toString());
- CPPUNIT_ASSERT_EQUAL(std::string("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri HIGH)\n"),
- _priorityDb->toString());
+ EXPECT_EQ(WaitTimeMs(0), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
+ EXPECT_EQ(WaitTimeMs(1), _scheduler->tick(MaintenanceScheduler::RECOVERY_SCHEDULING_MODE));
+ EXPECT_EQ("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)), pri 0\n",
+ _operationStarter->toString());
+ EXPECT_EQ("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri HIGH)\n",
+ _priorityDb->toString());
}
-void
-MaintenanceSchedulerTest::testPriorityNotClearedIfOperationNotStarted()
-{
+TEST_F(MaintenanceSchedulerTest, priority_not_cleared_if_operation_not_started) {
_priorityDb->setPriority(PrioritizedBucket(makeDocumentBucket(BucketId(16, 1)), Priority::HIGH));
_operationStarter->setShouldStartOperations(false);
WaitTimeMs waitMs(_scheduler->tick(MaintenanceScheduler::NORMAL_SCHEDULING_MODE));
- CPPUNIT_ASSERT_EQUAL(WaitTimeMs(1), waitMs);
- CPPUNIT_ASSERT_EQUAL(std::string("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri HIGH)\n"),
- _priorityDb->toString());
+ EXPECT_EQ(WaitTimeMs(1), waitMs);
+ EXPECT_EQ("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri HIGH)\n",
+ _priorityDb->toString());
}
}
-}
diff --git a/storage/src/tests/distributor/mergelimitertest.cpp b/storage/src/tests/distributor/mergelimitertest.cpp
index 0b40424594b..b06630ea592 100644
--- a/storage/src/tests/distributor/mergelimitertest.cpp
+++ b/storage/src/tests/distributor/mergelimitertest.cpp
@@ -1,94 +1,65 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/distributor/operations/idealstate/mergelimiter.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
namespace storage::distributor {
-struct MergeLimiterTest : public CppUnit::TestFixture
-{
- void testKeepsAllBelowLimit();
- void testLessThanMaxUntrusted();
- void testMoreThanMaxUntrusted();
- void testAllUntrustedLessThanMaxVariants();
- void testAllUntrustedMoreThanMaxVariants();
- void testSourceOnlyLast();
- void limited_set_cannot_be_just_source_only();
- void non_source_only_replica_chosen_from_in_sync_group();
- void non_source_only_replicas_preferred_when_replicas_not_in_sync();
- void at_least_one_non_source_only_replica_chosen_when_all_trusted();
- void missing_replica_distinct_from_empty_replica();
-
- CPPUNIT_TEST_SUITE(MergeLimiterTest);
- CPPUNIT_TEST(testKeepsAllBelowLimit);
- CPPUNIT_TEST(testLessThanMaxUntrusted);
- CPPUNIT_TEST(testMoreThanMaxUntrusted);
- CPPUNIT_TEST(testAllUntrustedLessThanMaxVariants);
- CPPUNIT_TEST(testAllUntrustedMoreThanMaxVariants);
- CPPUNIT_TEST(testSourceOnlyLast);
- CPPUNIT_TEST(limited_set_cannot_be_just_source_only);
- CPPUNIT_TEST(non_source_only_replica_chosen_from_in_sync_group);
- CPPUNIT_TEST(non_source_only_replicas_preferred_when_replicas_not_in_sync);
- CPPUNIT_TEST(at_least_one_non_source_only_replica_chosen_when_all_trusted);
- CPPUNIT_TEST(missing_replica_distinct_from_empty_replica);
- CPPUNIT_TEST_SUITE_END();
-};
+namespace {
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeLimiterTest);
+using BucketCopyPtr = std::unique_ptr<BucketCopy>;
+std::vector<BucketCopyPtr> _bucketDatabase;
-namespace {
- using BucketCopyPtr = std::unique_ptr<BucketCopy>;
- std::vector<BucketCopyPtr> _bucketDatabase;
-
- struct NodeFactory {
- std::vector<MergeMetaData> _nodes;
-
- NodeFactory& add(int index, int crc) {
- _bucketDatabase.push_back(BucketCopyPtr(
- new BucketCopy(0, index, api::BucketInfo(crc, 5, 10))));
- _nodes.push_back(MergeMetaData(index, *_bucketDatabase.back()));
- return *this;
- }
- NodeFactory& addTrusted(int index, int crc) {
- add(index, crc);
- _bucketDatabase.back()->setTrusted(true);
- return *this;
- }
- NodeFactory& addMissing(int index) {
- add(index, 0x1); // "Magic" checksum value implying invalid/recently created replica
- return *this;
- }
- NodeFactory& addEmpty(int index) {
- add(index, 0x0);
- return *this;
- }
- NodeFactory& setSourceOnly() {
- _nodes.back()._sourceOnly = true;
- return *this;
- }
-
- operator const MergeLimiter::NodeArray&() const { return _nodes; }
- };
-
- #define ASSERT_LIMIT(maxNodes, nodes, result) \
- { \
- MergeLimiter limiter(maxNodes); \
- auto nodesCopy = nodes; \
- limiter.limitMergeToMaxNodes(nodesCopy); \
- std::ostringstream actual; \
- for (uint32_t i = 0; i < nodesCopy.size(); ++i) { \
- if (i != 0) actual << ","; \
- actual << nodesCopy[i]._nodeIndex; \
- if (nodesCopy[i]._sourceOnly) actual << 's'; \
- } \
- CPPUNIT_ASSERT_EQUAL(std::string(result), actual.str()); \
+struct NodeFactory {
+ std::vector<MergeMetaData> _nodes;
+
+ NodeFactory& add(int index, int crc) {
+ _bucketDatabase.emplace_back(
+ std::make_unique<BucketCopy>(0, index, api::BucketInfo(crc, 5, 10)));
+ _nodes.emplace_back(MergeMetaData(index, *_bucketDatabase.back()));
+ return *this;
+ }
+ NodeFactory& addTrusted(int index, int crc) {
+ add(index, crc);
+ _bucketDatabase.back()->setTrusted(true);
+ return *this;
+ }
+ NodeFactory& addMissing(int index) {
+ add(index, 0x1); // "Magic" checksum value implying invalid/recently created replica
+ return *this;
}
+ NodeFactory& addEmpty(int index) {
+ add(index, 0x0);
+ return *this;
+ }
+ NodeFactory& setSourceOnly() {
+ _nodes.back()._sourceOnly = true;
+ return *this;
+ }
+
+ operator const MergeLimiter::NodeArray&() const { return _nodes; }
+};
+
+#define ASSERT_LIMIT(maxNodes, nodes, result) \
+{ \
+ MergeLimiter limiter(maxNodes); \
+ auto nodesCopy = nodes; \
+ limiter.limitMergeToMaxNodes(nodesCopy); \
+ std::ostringstream actual; \
+ for (uint32_t i = 0; i < nodesCopy.size(); ++i) { \
+ if (i != 0) actual << ","; \
+ actual << nodesCopy[i]._nodeIndex; \
+ if (nodesCopy[i]._sourceOnly) actual << 's'; \
+ } \
+ ASSERT_EQ(result, actual.str()); \
+}
+
}
// If there is <= max nodes, then none should be removed.
-void
-MergeLimiterTest::testKeepsAllBelowLimit()
-{
+TEST(MergeLimiterTest, keeps_all_below_limit) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(3, 0x4)
.addTrusted(5, 0x4)
@@ -100,9 +71,7 @@ MergeLimiterTest::testKeepsAllBelowLimit()
}
// If less than max nodes is untrusted, merge all untrusted copies with a
// trusted one. (Optionally with extra trusted copies if there is space)
-void
-MergeLimiterTest::testLessThanMaxUntrusted()
-{
+TEST(MergeLimiterTest, less_than_max_untrusted) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(3, 0x4)
.addTrusted(5, 0x4)
@@ -113,9 +82,7 @@ MergeLimiterTest::testLessThanMaxUntrusted()
}
// With more than max untrusted, just merge one trusted with as many untrusted
// that fits.
-void
-MergeLimiterTest::testMoreThanMaxUntrusted()
-{
+TEST(MergeLimiterTest, more_than_max_untrusted) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(3, 0x4)
.addTrusted(5, 0x4)
@@ -129,9 +96,7 @@ MergeLimiterTest::testMoreThanMaxUntrusted()
// With nothing trusted. If there is <= max different variants (checksums),
// merge one of each variant. After this merge, all these nodes can be set
// trusted. (Except for any source only ones)
-void
-MergeLimiterTest::testAllUntrustedLessThanMaxVariants()
-{
+TEST(MergeLimiterTest, all_untrusted_less_than_max_variants) {
MergeLimiter::NodeArray nodes(NodeFactory()
.add(3, 0x4)
.add(5, 0x4)
@@ -144,9 +109,7 @@ MergeLimiterTest::testAllUntrustedLessThanMaxVariants()
}
// With nothing trusted and more than max variants, we just have to merge one
// of each variant until we end up with less than max variants.
-void
-MergeLimiterTest::testAllUntrustedMoreThanMaxVariants()
-{
+TEST(MergeLimiterTest, all_untrusted_more_than_max_variants) {
MergeLimiter::NodeArray nodes(NodeFactory()
.add(3, 0x4)
.add(5, 0x5)
@@ -160,9 +123,7 @@ MergeLimiterTest::testAllUntrustedMoreThanMaxVariants()
// With more than max untrusted, just merge one trusted with as many untrusted
// that fits.
-void
-MergeLimiterTest::testSourceOnlyLast()
-{
+TEST(MergeLimiterTest, source_only_last) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(3, 0x4)
.addTrusted(5, 0x4).setSourceOnly()
@@ -174,7 +135,7 @@ MergeLimiterTest::testSourceOnlyLast()
ASSERT_LIMIT(4, nodes, "9,3,5s,2s");
}
-void MergeLimiterTest::limited_set_cannot_be_just_source_only() {
+TEST(MergeLimiterTest, limited_set_cannot_be_just_source_only) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(9, 0x6)
.addTrusted(2, 0x6)
@@ -184,7 +145,7 @@ void MergeLimiterTest::limited_set_cannot_be_just_source_only() {
ASSERT_LIMIT(3, nodes, "2,13s,1s");
}
-void MergeLimiterTest::non_source_only_replica_chosen_from_in_sync_group() {
+TEST(MergeLimiterTest, non_source_only_replica_chosen_from_in_sync_group) {
// nodes 9, 2, 13 are all in sync. Merge limiter will currently by default
// pop the _last_ node of an in-sync replica "group" when outputting a limited
// set. Unless we special-case source-only replicas here, we'd end up with an
@@ -198,7 +159,7 @@ void MergeLimiterTest::non_source_only_replica_chosen_from_in_sync_group() {
ASSERT_LIMIT(3, nodes, "2,13s,1s");
}
-void MergeLimiterTest::non_source_only_replicas_preferred_when_replicas_not_in_sync() {
+TEST(MergeLimiterTest, non_source_only_replicas_preferred_when_replicas_not_in_sync) {
MergeLimiter::NodeArray nodes(NodeFactory()
.add(9, 0x4)
.add(2, 0x5)
@@ -208,7 +169,7 @@ void MergeLimiterTest::non_source_only_replicas_preferred_when_replicas_not_in_s
ASSERT_LIMIT(3, nodes, "9,2,13s");
}
-void MergeLimiterTest::at_least_one_non_source_only_replica_chosen_when_all_trusted() {
+TEST(MergeLimiterTest, at_least_one_non_source_only_replica_chosen_when_all_trusted) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addTrusted(9, 0x6)
.addTrusted(2, 0x6)
@@ -218,7 +179,7 @@ void MergeLimiterTest::at_least_one_non_source_only_replica_chosen_when_all_trus
ASSERT_LIMIT(3, nodes, "2,13s,1s");
}
-void MergeLimiterTest::missing_replica_distinct_from_empty_replica() {
+TEST(MergeLimiterTest, missing_replica_distinct_from_empty_replica) {
MergeLimiter::NodeArray nodes(NodeFactory()
.addEmpty(3)
.addEmpty(5)
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 672c1d06124..75faddbe667 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -1,70 +1,35 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <boost/lexical_cast.hpp>
-#include <cppunit/extensions/HelperMacros.h>
-#include <iomanip>
+#include <vespa/document/test/make_document_bucket.h>
#include <tests/common/dummystoragelink.h>
#include <vespa/storage/distributor/idealstatemanager.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
#include <vespa/storage/distributor/bucketdbupdater.h>
-#include <tests/distributor/distributortestutil.h>
-#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/distributor.h>
+#include <tests/distributor/distributortestutil.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/vespalib/gtest/gtest.h>
-using std::shared_ptr;
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
-
-class MergeOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(MergeOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testFailIfSourceOnlyCopiesChanged);
- CPPUNIT_TEST(testGenerateNodeList);
- CPPUNIT_TEST(doNotRemoveCopiesWithPendingMessages);
- CPPUNIT_TEST(allow_deleting_active_source_only_replica);
- CPPUNIT_TEST(testMarkRedundantTrustedCopiesAsSourceOnly);
- CPPUNIT_TEST(onlyMarkRedundantRetiredReplicasAsSourceOnly);
- CPPUNIT_TEST(mark_post_merge_redundant_replicas_source_only);
- CPPUNIT_TEST(merge_operation_is_blocked_by_any_busy_target_node);
- CPPUNIT_TEST(missing_replica_is_included_in_limited_node_list);
- CPPUNIT_TEST_SUITE_END();
+namespace storage::distributor {
+struct MergeOperationTest : Test, DistributorTestUtil {
std::unique_ptr<PendingMessageTracker> _pendingTracker;
-protected:
- void testSimple();
- void testFailIfSourceOnlyCopiesChanged();
- void testGenerateNodeList();
- void doNotRemoveCopiesWithPendingMessages();
- void allow_deleting_active_source_only_replica();
- void testMarkRedundantTrustedCopiesAsSourceOnly();
- void onlyMarkRedundantRetiredReplicasAsSourceOnly();
- void mark_post_merge_redundant_replicas_source_only();
- void merge_operation_is_blocked_by_any_busy_target_node();
- void missing_replica_is_included_in_limited_node_list();
-
-public:
- void setUp() override {
+ void SetUp() override {
createLinks();
- _pendingTracker.reset(new PendingMessageTracker(getComponentRegister()));
+ _pendingTracker = std::make_unique<PendingMessageTracker>(getComponentRegister());
_sender.setPendingMessageTracker(*_pendingTracker);
}
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeOperationTest);
-
-void
-MergeOperationTest::testSimple()
-{
+TEST_F(MergeOperationTest, simple) {
getClock().setAbsoluteTimeInSeconds(10);
addNodesToBucketDB(document::BucketId(16, 1),
@@ -79,25 +44,20 @@ MergeOperationTest::testSimple()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
- "cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
- "reasons to start: ) => 0"),
- _sender.getLastCommand(true));
+ ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
+ "reasons to start: ) => 0",
+ _sender.getLastCommand(true));
sendReply(op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DeleteBucketCommand(BucketId(0x4000000000000001)) "
- "Reasons to start: => 1"),
- _sender.getLastCommand(true));
+ ASSERT_EQ("DeleteBucketCommand(BucketId(0x4000000000000001)) "
+ "Reasons to start: => 1",
+ _sender.getLastCommand(true));
}
-void
-MergeOperationTest::testFailIfSourceOnlyCopiesChanged()
-{
+TEST_F(MergeOperationTest, fail_if_source_only_copies_changed) {
getClock().setAbsoluteTimeInSeconds(10);
addNodesToBucketDB(document::BucketId(16, 1),
@@ -116,11 +76,10 @@ MergeOperationTest::testFailIfSourceOnlyCopiesChanged()
"cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
"reasons to start: ) => 0");
- CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ ASSERT_EQ(merge, _sender.getLastCommand(true));
{
- const api::MergeBucketCommand& cmd(
- dynamic_cast<api::MergeBucketCommand&>(*_sender.commands[0]));
- CPPUNIT_ASSERT_EQUAL(uint16_t(0), cmd.getSourceIndex());
+ auto& cmd = dynamic_cast<api::MergeBucketCommand&>(*_sender.command(0));
+ EXPECT_EQ(0, cmd.getSourceIndex());
}
// Source-only copy changed during merge
@@ -130,8 +89,8 @@ MergeOperationTest::testFailIfSourceOnlyCopiesChanged()
"2=10/1/1/t");
sendReply(op);
// Should not be a remove here!
- CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
- CPPUNIT_ASSERT(!op.ok());
+ ASSERT_EQ(merge, _sender.getLastCommand(true));
+ EXPECT_FALSE(op.ok());
}
namespace {
@@ -176,130 +135,96 @@ std::string getNodeList(std::string state, uint32_t redundancy, std::string exis
}
}
-void
-MergeOperationTest::testGenerateNodeList()
-{
+TEST_F(MergeOperationTest, generate_node_list) {
// If this fails, the distribution has changed and the rest of the test will
// likely fail
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1,4"),
- getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
+ ASSERT_EQ("3,5,7,6,8,0,9,2,1,4",
+ getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
// Nodes that are initializing should be treated as up
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7s,6s"),
- getNodeList("storage:10 .3.s:i .5.s:i", 2, "7,6,3,5")); // Ideal: 3,5
+ EXPECT_EQ("3,5,7s,6s",
+ getNodeList("storage:10 .3.s:i .5.s:i", 2, "7,6,3,5")); // Ideal: 3,5
// Order is given by ideal state algorithm, not order of storagenodes in bucket db
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7"),
- getNodeList("storage:10", 3, "3,7,5"));
+ EXPECT_EQ("3,5,7",
+ getNodeList("storage:10", 3, "3,7,5"));
// Node not in ideal state will be used if not enough nodes in ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,7,6"),
- getNodeList("storage:10", 3, "3,7,6"));
+ EXPECT_EQ("3,7,6",
+ getNodeList("storage:10", 3, "3,7,6"));
// Nodes not in ideal state will be included as source only after redundancy
// is reached
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,8s"),
- getNodeList("storage:10", 3, "3,5,7,8"));
+ EXPECT_EQ("3,5,7,8s",
+ getNodeList("storage:10", 3, "3,5,7,8"));
// Need at least redundancy copies that are not source only
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,8,9s"),
- getNodeList("storage:10", 3, "3,5,8,9"));
+ EXPECT_EQ("3,5,8,9s",
+ getNodeList("storage:10", 3, "3,5,8,9"));
// Order is given by storagenodes in bucket db
// when no nodes are in ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("4,1,2"),
- getNodeList("storage:10", 3, "4,1,2"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,0s,1s,2s,4s,5s,6s,7s,8s,9s"),
- getNodeList("storage:10", 1, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,0s,1s,2s,4s,6s,7s,8s,9s"),
- getNodeList("storage:10", 2, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,0s,1s,2s,4s,6s,8s,9s"),
- getNodeList("storage:10", 3, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,0s,1s,2s,4s,8s,9s"),
- getNodeList("storage:10", 4, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0s,1s,2s,4s,9s"),
- getNodeList("storage:10", 5, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,1s,2s,4s,9s"),
- getNodeList("storage:10", 6, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,1s,2s,4s"),
- getNodeList("storage:10", 7, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1s,4s"),
- getNodeList("storage:10", 8, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1,4s"),
- getNodeList("storage:10", 9, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1,4"),
- getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,9s,8s,7s,6s,5s,4s,2s,1s,0s"),
- getNodeList("storage:10", 1, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,9s,8s,7s,6s,4s,2s,1s,0s"),
- getNodeList("storage:10", 2, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,9s,8s,6s,4s,2s,1s,0s"),
- getNodeList("storage:10", 3, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,9s,8s,4s,2s,1s,0s"),
- getNodeList("storage:10", 4, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,9s,4s,2s,1s,0s"),
- getNodeList("storage:10", 5, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9s,4s,2s,1s"),
- getNodeList("storage:10", 6, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,4s,2s,1s"),
- getNodeList("storage:10", 7, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,4s,1s"),
- getNodeList("storage:10", 8, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1,4s"),
- getNodeList("storage:10", 9, "9,8,7,6,5,4,3,2,1,0"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,2,1,4"),
- getNodeList("storage:10", 10, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("4,1,2",
+ getNodeList("storage:10", 3, "4,1,2"));
+
+ EXPECT_EQ("3,0s,1s,2s,4s,5s,6s,7s,8s,9s",
+ getNodeList("storage:10", 1, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,0s,1s,2s,4s,6s,7s,8s,9s",
+ getNodeList("storage:10", 2, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,0s,1s,2s,4s,6s,8s,9s",
+ getNodeList("storage:10", 3, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,0s,1s,2s,4s,8s,9s",
+ getNodeList("storage:10", 4, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0s,1s,2s,4s,9s",
+ getNodeList("storage:10", 5, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,1s,2s,4s,9s",
+ getNodeList("storage:10", 6, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,1s,2s,4s",
+ getNodeList("storage:10", 7, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,1s,4s",
+ getNodeList("storage:10", 8, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,1,4s",
+ getNodeList("storage:10", 9, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,1,4",
+ getNodeList("storage:10", 10, "0,1,2,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,9s,8s,7s,6s,5s,4s,2s,1s,0s",
+ getNodeList("storage:10", 1, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,9s,8s,7s,6s,4s,2s,1s,0s",
+ getNodeList("storage:10", 2, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,9s,8s,6s,4s,2s,1s,0s",
+ getNodeList("storage:10", 3, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,9s,8s,4s,2s,1s,0s",
+ getNodeList("storage:10", 4, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,9s,4s,2s,1s,0s",
+ getNodeList("storage:10", 5, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,0,9s,4s,2s,1s",
+ getNodeList("storage:10", 6, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,0,9,4s,2s,1s",
+ getNodeList("storage:10", 7, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,4s,1s",
+ getNodeList("storage:10", 8, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,1,4s",
+ getNodeList("storage:10", 9, "9,8,7,6,5,4,3,2,1,0"));
+ EXPECT_EQ("3,5,7,6,8,0,9,2,1,4",
+ getNodeList("storage:10", 10, "9,8,7,6,5,4,3,2,1,0"));
// Trusted copies can be source-only if they are in the non-ideal node set.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,1s,2s,4s"),
- getNodeList("storage:10", 7, "0,1t,2t,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,1s,2s,4s",
+ getNodeList("storage:10", 7, "0,1t,2t,3,4,5,6,7,8,9"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6,8,0,9,1s,2s,4s"),
- getNodeList("storage:10", 7, "0,1,2t,3,4,5,6,7,8,9"));
+ EXPECT_EQ("3,5,7,6,8,0,9,1s,2s,4s",
+ getNodeList("storage:10", 7, "0,1,2t,3,4,5,6,7,8,9"));
// Retired nodes are not in ideal state
// Ideal: 5,7
- CPPUNIT_ASSERT_EQUAL(
- std::string("0,2,3s"),
- getNodeList("storage:10 .3.s:r", 2, "0,2,3"));
+ EXPECT_EQ("0,2,3s",
+ getNodeList("storage:10 .3.s:r", 2, "0,2,3"));
// Ideal: 5,7,6
- CPPUNIT_ASSERT_EQUAL(
- std::string("0,2,3"),
- getNodeList("storage:10 .3.s:r", 3, "0,2,3"));
+ EXPECT_EQ("0,2,3",
+ getNodeList("storage:10 .3.s:r", 3, "0,2,3"));
}
-void
-MergeOperationTest::doNotRemoveCopiesWithPendingMessages() {
+TEST_F(MergeOperationTest, do_not_remove_copies_with_pending_messages) {
document::BucketId bucket(16, 1);
getClock().setAbsoluteTimeInSeconds(10);
@@ -318,20 +243,20 @@ MergeOperationTest::doNotRemoveCopiesWithPendingMessages() {
"cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
"reasons to start: ) => 0");
- CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ ASSERT_EQ(merge, _sender.getLastCommand(true));
// Suddenly a wild operation appears to the source only copy!
// Removes are blocked by all and any operation types, so can just choose
// at will.
- api::StorageMessage::SP msg(
- new api::SetBucketStateCommand(makeDocumentBucket(bucket), api::SetBucketStateCommand::ACTIVE));
+ auto msg = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bucket), api::SetBucketStateCommand::ACTIVE);
msg->setAddress(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
_pendingTracker->insert(msg);
sendReply(op);
// Should not be a remove here!
- CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
- CPPUNIT_ASSERT(!op.ok());
+ ASSERT_EQ(merge, _sender.getLastCommand(true));
+ EXPECT_FALSE(op.ok());
}
/*
@@ -359,9 +284,7 @@ MergeOperationTest::doNotRemoveCopiesWithPendingMessages() {
* should be an uncommon edge case and it's arguably better than to never
* activate the ideal replicas at all.
*/
-void
-MergeOperationTest::allow_deleting_active_source_only_replica()
-{
+TEST_F(MergeOperationTest, allow_deleting_active_source_only_replica) {
getClock().setAbsoluteTimeInSeconds(10);
addNodesToBucketDB(document::BucketId(16, 1),
@@ -379,111 +302,92 @@ MergeOperationTest::allow_deleting_active_source_only_replica()
"MergeBucketCommand(BucketId(0x4000000000000001), to time "
"10000000, cluster state version: 0, nodes: [0, 2, 1 "
"(source only)], chain: [], reasons to start: ) => 0");
- CPPUNIT_ASSERT_EQUAL(merge, _sender.getLastCommand(true));
+ ASSERT_EQ(merge, _sender.getLastCommand(true));
sendReply(op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DeleteBucketCommand(BucketId(0x4000000000000001)) "
- "Reasons to start: => 1"),
- _sender.getLastCommand(true));
+ ASSERT_EQ("DeleteBucketCommand(BucketId(0x4000000000000001)) "
+ "Reasons to start: => 1",
+ _sender.getLastCommand(true));
}
-void
-MergeOperationTest::testMarkRedundantTrustedCopiesAsSourceOnly()
-{
+TEST_F(MergeOperationTest, MarkRedundantTrustedCopiesAsSourceOnly) {
// This test uses the same distribution as testGenerateNodeList(), i.e.
// an ideal state sequence of [3, 5, 7, 6, 8, 0, 9, 2, 1, 4]
// 3 redundancy, 5 trusted -> 2 trusted source only.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3t,5t,7t,6t,8t"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3t,5t,7t,6t,8t"));
// 3 redundancy, 4 trusted -> 1 trusted source only.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3t,5t,7t,6t,8"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3t,5t,7t,6t,8"));
// 3 redundancy, 3 trusted -> 0 trusted source only, 2 non-trusted sources.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3t,5t,7t,6,8"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3t,5t,7t,6,8"));
// Trusted-ness should not be taken into account when marking nodes as source-only.
// 2 out of 3 ideal replicas trusted.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3t,5t,7,6t,8t"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3t,5t,7,6t,8t"));
// 1 out of 3 ideal replicas trusted.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3t,5,7,6t,8t"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3t,5,7,6t,8t"));
// 0 out of 3 ideal replicas trusted.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s"),
- getNodeList("storage:10", 3, "3,5,7,6t,8t"));
+ EXPECT_EQ("3,5,7,6s,8s",
+ getNodeList("storage:10", 3, "3,5,7,6t,8t"));
// #redundancy of trusted, but none are ideal. Non-ideal trusted may be
// marked as source only.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s,0s,9s"),
- getNodeList("storage:10", 3, "3,5,7,6,8t,0t,9t"));
+ EXPECT_EQ("3,5,7,6s,8s,0s,9s",
+ getNodeList("storage:10", 3, "3,5,7,6,8t,0t,9t"));
// Allow for removing excess trusted, non-ideal copies.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6s,8s,0s,9s"),
- getNodeList("storage:10", 3, "3,5,7,6t,8t,0t,9t"));
+ EXPECT_EQ("3,5,7,6s,8s,0s,9s",
+ getNodeList("storage:10", 3, "3,5,7,6t,8t,0t,9t"));
}
-void
-MergeOperationTest::onlyMarkRedundantRetiredReplicasAsSourceOnly()
-{
+TEST_F(MergeOperationTest, only_mark_redundant_retired_replicas_as_source_only) {
// No nodes in ideal state and all nodes are retired. With redundancy of 2
// we can only mark the last replica in the DB as source-only. Retired
// nodes are meant as source-only due to being migrated away from, but
// source-only nodes will have their replica removed after a successful
// merge, which we cannot allow to happen here.
- CPPUNIT_ASSERT_EQUAL(
- std::string("1,0,2s"),
- getNodeList("storage:3 .0.s:r .1.s:r .2.s:r", 2, "1,0,2"));
+ EXPECT_EQ("1,0,2s",
+ getNodeList("storage:3 .0.s:r .1.s:r .2.s:r", 2, "1,0,2"));
}
-void MergeOperationTest::mark_post_merge_redundant_replicas_source_only() {
+TEST_F(MergeOperationTest, mark_post_merge_redundant_replicas_source_only) {
// Ideal state sequence is [3, 5, 7, 6, 8, 0, 9, 2, 1, 4]
// Retired node 7 is not part of the #redundancy ideal state and should be moved
// to node 6. Once the merge is done we'll end up with too many replicas unless
// we allow marking the to-be-moved replica as source only.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,6,7s"),
- getNodeList("storage:10 .7.s:r", 3, "3t,5t,7t,6"));
+ EXPECT_EQ("3,5,6,7s",
+ getNodeList("storage:10 .7.s:r", 3, "3t,5t,7t,6"));
// Should be allowed to mark as source only even if retired replica is the
// only trusted replica at the time the merge starts.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,6,7s"),
- getNodeList("storage:10 .7.s:r", 3, "3,5,7t,6"));
+ EXPECT_EQ("3,5,6,7s",
+ getNodeList("storage:10 .7.s:r", 3, "3,5,7t,6"));
// This extends to multiple retired nodes.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,6,8,5s,7s"),
- getNodeList("storage:10 .5.s:r .7.s:r", 3, "3t,5t,7t,6,8"));
+ EXPECT_EQ("3,6,8,5s,7s",
+ getNodeList("storage:10 .5.s:r .7.s:r", 3, "3t,5t,7t,6,8"));
// If number of post-merge ideal nodes is lower than desired redundancy, don't
// mark any as source only.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6"),
- getNodeList("storage:10", 5, "3,5,7,6"));
+ EXPECT_EQ("3,5,7,6",
+ getNodeList("storage:10", 5, "3,5,7,6"));
// Same applies to when post-merge ideal nodes is _equal_ to desired redundancy.
- CPPUNIT_ASSERT_EQUAL(
- std::string("3,5,7,6"),
- getNodeList("storage:10", 4, "3,5,7,6"));
+ EXPECT_EQ("3,5,7,6",
+ getNodeList("storage:10", 4, "3,5,7,6"));
}
-void MergeOperationTest::merge_operation_is_blocked_by_any_busy_target_node() {
+TEST_F(MergeOperationTest, merge_operation_is_blocked_by_any_busy_target_node) {
getClock().setAbsoluteTimeInSeconds(10);
addNodesToBucketDB(document::BucketId(16, 1), "0=10/1/1/t,1=20/1/1,2=10/1/1/t");
enableDistributorClusterState("distributor:1 storage:3");
@@ -492,21 +396,21 @@ void MergeOperationTest::merge_operation_is_blocked_by_any_busy_target_node() {
// Should not block on nodes _not_ included in operation node set
_pendingTracker->getNodeInfo().setBusy(3, std::chrono::seconds(10));
- CPPUNIT_ASSERT(!op.isBlocked(*_pendingTracker));
+ EXPECT_FALSE(op.isBlocked(*_pendingTracker));
// Node 1 is included in operation node set and should cause a block
_pendingTracker->getNodeInfo().setBusy(0, std::chrono::seconds(10));
- CPPUNIT_ASSERT(op.isBlocked(*_pendingTracker));
+ EXPECT_TRUE(op.isBlocked(*_pendingTracker));
getClock().addSecondsToTime(11);
- CPPUNIT_ASSERT(!op.isBlocked(*_pendingTracker)); // No longer busy
+ EXPECT_FALSE(op.isBlocked(*_pendingTracker)); // No longer busy
// Should block on other operation nodes than the first listed as well
_pendingTracker->getNodeInfo().setBusy(1, std::chrono::seconds(10));
- CPPUNIT_ASSERT(op.isBlocked(*_pendingTracker));
+ EXPECT_TRUE(op.isBlocked(*_pendingTracker));
}
-void MergeOperationTest::missing_replica_is_included_in_limited_node_list() {
+TEST_F(MergeOperationTest, missing_replica_is_included_in_limited_node_list) {
setupDistributor(Redundancy(4), NodeCount(4), "distributor:1 storage:4");
getClock().setAbsoluteTimeInSeconds(10);
addNodesToBucketDB(document::BucketId(16, 1), "1=0/0/0/t,2=0/0/0/t,3=0/0/0/t");
@@ -516,12 +420,10 @@ void MergeOperationTest::missing_replica_is_included_in_limited_node_list() {
op.start(_sender, framework::MilliSecTime(0));
// Must include missing node 0 and not just 2 existing replicas
- CPPUNIT_ASSERT_EQUAL(
- std::string("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
- "cluster state version: 0, nodes: [0, 1], chain: [], "
- "reasons to start: ) => 0"),
- _sender.getLastCommand(true));
+ EXPECT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [0, 1], chain: [], "
+ "reasons to start: ) => 0",
+ _sender.getLastCommand(true));
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/messagesenderstub.h b/storage/src/tests/distributor/messagesenderstub.h
deleted file mode 100644
index 1b526813ef7..00000000000
--- a/storage/src/tests/distributor/messagesenderstub.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include <vespa/storage/distributor/distributormessagesender.h>
-#include <cassert>
-#include <vector>
-#include <string>
-
-namespace storage {
-
-struct MessageSenderStub : distributor::DistributorMessageSender
-{
- std::vector<std::shared_ptr<api::StorageCommand> > commands;
- std::vector<std::shared_ptr<api::StorageReply> > replies;
-
- MessageSenderStub();
- ~MessageSenderStub();
-
- void clear() {
- commands.clear();
- replies.clear();
- }
-
- void sendCommand(const std::shared_ptr<api::StorageCommand>& cmd) override {
- commands.push_back(cmd);
- }
-
- void sendReply(const std::shared_ptr<api::StorageReply>& reply) override {
- replies.push_back(reply);
- }
-
- std::string getLastCommand(bool verbose = true) const;
-
- std::string getCommands(bool includeAddress = false,
- bool verbose = false,
- uint32_t fromIndex = 0) const;
-
- std::string getLastReply(bool verbose = true) const;
-
- std::string getReplies(bool includeAddress = false,
- bool verbose = false) const;
-
- std::string dumpMessage(const api::StorageMessage& msg,
- bool includeAddress,
- bool verbose) const;
-
- int getDistributorIndex() const override {
- return 0;
- }
-
- const std::string& getClusterName() const override {
- return _clusterName;
- }
-
- const distributor::PendingMessageTracker& getPendingMessageTracker() const override {
- assert(_pendingMessageTracker);
- return *_pendingMessageTracker;
- }
-
- void setPendingMessageTracker(distributor::PendingMessageTracker& tracker) {
- _pendingMessageTracker = &tracker;
- }
-private:
- std::string _clusterName;
- distributor::PendingMessageTracker* _pendingMessageTracker;
-};
-
-}
diff --git a/storage/src/tests/distributor/nodeinfotest.cpp b/storage/src/tests/distributor/nodeinfotest.cpp
index 0363f25831a..d729c686fa3 100644
--- a/storage/src/tests/distributor/nodeinfotest.cpp
+++ b/storage/src/tests/distributor/nodeinfotest.cpp
@@ -1,46 +1,17 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <iomanip>
-#include <iostream>
-#include <memory>
-#include <vespa/storageapi/message/persistence.h>
-#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
-#include <vespa/storage/distributor/bucketdbupdater.h>
-#include <vespa/storageapi/message/bucket.h>
-#include <vespa/storageapi/message/state.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/vdslib/state/random.h>
-#include <vespa/storageapi/message/bucket.h>
-#include <vespa/storage/distributor/pendingclusterstate.h>
-#include <vespa/vespalib/text/stringtokenizer.h>
#include <vespa/storage/distributor/nodeinfo.h>
+#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
+#include <vespa/vespalib/gtest/gtest.h>
-#include <iostream>
-#include <fstream>
-#include <string>
-
-namespace storage {
-namespace distributor {
-
-class NodeInfoTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(NodeInfoTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST_SUITE_END();
-public:
- void testSimple();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(NodeInfoTest);
+namespace storage::distributor {
-void
-NodeInfoTest::testSimple()
-{
+TEST(NodeInfoTest, simple) {
framework::defaultimplementation::FakeClock clock;
NodeInfo info(clock);
- CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(3));
- CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(9));
+ EXPECT_EQ(0, info.getPendingCount(3));
+ EXPECT_EQ(0, info.getPendingCount(9));
info.incPending(3);
info.incPending(3);
@@ -52,10 +23,10 @@ NodeInfoTest::testSimple()
info.incPending(4);
info.decPending(3);
- CPPUNIT_ASSERT_EQUAL(2, (int)info.getPendingCount(3));
- CPPUNIT_ASSERT_EQUAL(1, (int)info.getPendingCount(4));
- CPPUNIT_ASSERT_EQUAL(1, (int)info.getPendingCount(7));
- CPPUNIT_ASSERT_EQUAL(0, (int)info.getPendingCount(5));
+ EXPECT_EQ(2, info.getPendingCount(3));
+ EXPECT_EQ(1, info.getPendingCount(4));
+ EXPECT_EQ(1, info.getPendingCount(7));
+ EXPECT_EQ(0, info.getPendingCount(5));
info.setBusy(5, std::chrono::seconds(60));
clock.addSecondsToTime(10);
@@ -63,19 +34,17 @@ NodeInfoTest::testSimple()
clock.addSecondsToTime(20);
info.setBusy(42, std::chrono::seconds(60));
- CPPUNIT_ASSERT_EQUAL(true, info.isBusy(5));
- CPPUNIT_ASSERT_EQUAL(true, info.isBusy(1));
- CPPUNIT_ASSERT_EQUAL(true, info.isBusy(42));
- CPPUNIT_ASSERT_EQUAL(false, info.isBusy(7));
+ EXPECT_TRUE(info.isBusy(5));
+ EXPECT_TRUE(info.isBusy(1));
+ EXPECT_TRUE(info.isBusy(42));
+ EXPECT_FALSE(info.isBusy(7));
clock.addSecondsToTime(42);
- CPPUNIT_ASSERT_EQUAL(false, info.isBusy(5));
- CPPUNIT_ASSERT_EQUAL(false, info.isBusy(1));
- CPPUNIT_ASSERT_EQUAL(true, info.isBusy(42));
- CPPUNIT_ASSERT_EQUAL(false, info.isBusy(7));
-
-}
+ EXPECT_FALSE(info.isBusy(5));
+ EXPECT_FALSE(info.isBusy(1));
+ EXPECT_TRUE(info.isBusy(42));
+ EXPECT_FALSE(info.isBusy(7));
}
diff --git a/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp b/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp
index ae0d0bc4478..58593799fc4 100644
--- a/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp
+++ b/storage/src/tests/distributor/nodemaintenancestatstrackertest.cpp
@@ -1,100 +1,77 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/document/test/make_bucket_space.h>
-#include <vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage::distributor {
using document::test::makeBucketSpace;
using document::BucketSpace;
+using namespace ::testing;
-class NodeMaintenanceStatsTrackerTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE(NodeMaintenanceStatsTrackerTest);
- CPPUNIT_TEST(emptyStatsInstancesAreEqual);
- CPPUNIT_TEST(statsFieldsAffectEqualityComparison);
- CPPUNIT_TEST(requestingNonExistingNodeGivesEmptyStats);
- CPPUNIT_TEST(statsAreTrackedPerNode);
- CPPUNIT_TEST(statsAreTrackedPerBucketSpace);
- CPPUNIT_TEST_SUITE_END();
-
- void emptyStatsInstancesAreEqual();
- void statsFieldsAffectEqualityComparison();
- void requestingNonExistingNodeGivesEmptyStats();
- void statsAreTrackedPerNode();
- void statsAreTrackedPerBucketSpace();
+struct NodeMaintenanceStatsTrackerTest : Test {
void assertEmptyBucketStats(BucketSpace bucketSpace, const NodeMaintenanceStatsTracker& tracker);
void assertBucketStats(uint64_t expMovingOut, uint64_t expSyncing, uint64_t expCopyingIn, uint64_t expCopyingOut, uint64_t expTotal,
BucketSpace bucketSpace, const NodeMaintenanceStatsTracker& tracker);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(NodeMaintenanceStatsTrackerTest);
-
-void
-NodeMaintenanceStatsTrackerTest::emptyStatsInstancesAreEqual()
-{
+TEST_F(NodeMaintenanceStatsTrackerTest, empty_stats_instances_are_equal) {
NodeMaintenanceStats a;
NodeMaintenanceStats b;
- CPPUNIT_ASSERT_EQUAL(a, b);
+ EXPECT_EQ(a, b);
}
-void
-NodeMaintenanceStatsTrackerTest::statsFieldsAffectEqualityComparison()
-{
+TEST_F(NodeMaintenanceStatsTrackerTest, stats_fields_affect_equality_comparison) {
NodeMaintenanceStats a;
NodeMaintenanceStats b;
a.movingOut = 1;
- CPPUNIT_ASSERT(!(a == b));
+ EXPECT_NE(a, b);
b.movingOut = 1;
- CPPUNIT_ASSERT(a == b);
+ EXPECT_EQ(a, b);
a.syncing = 1;
- CPPUNIT_ASSERT(!(a == b));
+ EXPECT_NE(a, b);
b.syncing = 1;
- CPPUNIT_ASSERT(a == b);
+ EXPECT_EQ(a, b);
a.copyingIn = 1;
- CPPUNIT_ASSERT(!(a == b));
+ EXPECT_NE(a, b);
b.copyingIn = 1;
- CPPUNIT_ASSERT(a == b);
+ EXPECT_EQ(a, b);
a.copyingOut = 1;
- CPPUNIT_ASSERT(!(a == b));
+ EXPECT_NE(a, b);
b.copyingOut = 1;
- CPPUNIT_ASSERT(a == b);
+ EXPECT_EQ(a, b);
}
-void
-NodeMaintenanceStatsTrackerTest::requestingNonExistingNodeGivesEmptyStats()
-{
+TEST_F(NodeMaintenanceStatsTrackerTest, requesting_non_existing_node_gives_empty_stats) {
NodeMaintenanceStatsTracker tracker;
NodeMaintenanceStats wanted;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0, makeBucketSpace()));
+ EXPECT_EQ(wanted, tracker.forNode(0, makeBucketSpace()));
}
-void
-NodeMaintenanceStatsTrackerTest::statsAreTrackedPerNode()
-{
+TEST_F(NodeMaintenanceStatsTrackerTest, stats_are_tracked_per_node){
NodeMaintenanceStatsTracker tracker;
NodeMaintenanceStats wanted;
BucketSpace space(1);
tracker.incMovingOut(0, space);
wanted.movingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0, space));
+ EXPECT_EQ(wanted, tracker.forNode(0, space));
wanted.movingOut = 0;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1, space));
+ EXPECT_EQ(wanted, tracker.forNode(1, space));
tracker.incMovingOut(0, space);
wanted.movingOut = 2;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(0, space));
+ EXPECT_EQ(wanted, tracker.forNode(0, space));
tracker.incMovingOut(1, space);
wanted.movingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1, space));
+ EXPECT_EQ(wanted, tracker.forNode(1, space));
tracker.incSyncing(1, space);
tracker.incCopyingIn(1, space);
@@ -102,12 +79,10 @@ NodeMaintenanceStatsTrackerTest::statsAreTrackedPerNode()
wanted.syncing = 1;
wanted.copyingIn = 1;
wanted.copyingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, tracker.forNode(1, space));
+ EXPECT_EQ(wanted, tracker.forNode(1, space));
}
-void
-NodeMaintenanceStatsTrackerTest::statsAreTrackedPerBucketSpace()
-{
+TEST_F(NodeMaintenanceStatsTrackerTest, statsAreTrackedPerBucketSpace) {
NodeMaintenanceStatsTracker tracker;
BucketSpace fooSpace(3);
BucketSpace barSpace(5);
@@ -143,7 +118,7 @@ NodeMaintenanceStatsTrackerTest::assertEmptyBucketStats(BucketSpace bucketSpace,
const NodeMaintenanceStatsTracker& tracker)
{
NodeMaintenanceStats expStats;
- CPPUNIT_ASSERT_EQUAL(expStats, tracker.forNode(0, bucketSpace));
+ EXPECT_EQ(expStats, tracker.forNode(0, bucketSpace));
}
void
@@ -156,7 +131,7 @@ NodeMaintenanceStatsTrackerTest::assertBucketStats(uint64_t expMovingOut,
const NodeMaintenanceStatsTracker& tracker)
{
NodeMaintenanceStats expStats(expMovingOut, expSyncing, expCopyingIn, expCopyingOut, expTotal);
- CPPUNIT_ASSERT_EQUAL(expStats, tracker.forNode(0, bucketSpace));
+ EXPECT_EQ(expStats, tracker.forNode(0, bucketSpace));
}
}
diff --git a/storage/src/tests/distributor/operation_sequencer_test.cpp b/storage/src/tests/distributor/operation_sequencer_test.cpp
index af9112aaeec..b3674ee2126 100644
--- a/storage/src/tests/distributor/operation_sequencer_test.cpp
+++ b/storage/src/tests/distributor/operation_sequencer_test.cpp
@@ -1,62 +1,46 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <vespa/storage/distributor/operation_sequencer.h>
#include <vespa/document/base/documentid.h>
+#include <vespa/storage/distributor/operation_sequencer.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage::distributor {
using document::DocumentId;
-class OperationSequencerTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(OperationSequencerTest);
- CPPUNIT_TEST(can_get_sequencing_handle_for_id_without_existing_handle);
- CPPUNIT_TEST(can_get_sequencing_handle_for_different_ids);
- CPPUNIT_TEST(cannot_get_sequencing_handle_for_id_with_existing_handle);
- CPPUNIT_TEST(releasing_handle_allows_for_getting_new_handles_for_id);
- CPPUNIT_TEST_SUITE_END();
-
- void can_get_sequencing_handle_for_id_without_existing_handle();
- void can_get_sequencing_handle_for_different_ids();
- void cannot_get_sequencing_handle_for_id_with_existing_handle();
- void releasing_handle_allows_for_getting_new_handles_for_id();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(OperationSequencerTest);
-
-void OperationSequencerTest::can_get_sequencing_handle_for_id_without_existing_handle() {
+TEST(OperationSequencerTest, can_get_sequencing_handle_for_id_without_existing_handle) {
OperationSequencer sequencer;
auto handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
- CPPUNIT_ASSERT(handle.valid());
+ EXPECT_TRUE(handle.valid());
}
-void OperationSequencerTest::cannot_get_sequencing_handle_for_id_with_existing_handle() {
+TEST(OperationSequencerTest, cannot_get_sequencing_handle_for_id_with_existing_handle) {
OperationSequencer sequencer;
auto first_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
auto second_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
- CPPUNIT_ASSERT(! second_handle.valid());
+ EXPECT_FALSE(second_handle.valid());
}
-void OperationSequencerTest::can_get_sequencing_handle_for_different_ids() {
+TEST(OperationSequencerTest, can_get_sequencing_handle_for_different_ids) {
OperationSequencer sequencer;
auto first_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
auto second_handle = sequencer.try_acquire(DocumentId("id:foo:test::efgh"));
- CPPUNIT_ASSERT(first_handle.valid());
- CPPUNIT_ASSERT(second_handle.valid());
+ EXPECT_TRUE(first_handle.valid());
+ EXPECT_TRUE(second_handle.valid());
}
-void OperationSequencerTest::releasing_handle_allows_for_getting_new_handles_for_id() {
+TEST(OperationSequencerTest, releasing_handle_allows_for_getting_new_handles_for_id) {
OperationSequencer sequencer;
auto first_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
// Explicit release
first_handle.release();
{
auto second_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
- CPPUNIT_ASSERT(second_handle.valid());
+ EXPECT_TRUE(second_handle.valid());
// Implicit release by scope exit
}
auto third_handle = sequencer.try_acquire(DocumentId("id:foo:test::abcd"));
- CPPUNIT_ASSERT(third_handle.valid());
+ EXPECT_TRUE(third_handle.valid());
}
} // storage::distributor
diff --git a/storage/src/tests/distributor/operationtargetresolvertest.cpp b/storage/src/tests/distributor/operationtargetresolvertest.cpp
index 17dbf007c63..da0206cf0a4 100644
--- a/storage/src/tests/distributor/operationtargetresolvertest.cpp
+++ b/storage/src/tests/distributor/operationtargetresolvertest.cpp
@@ -9,139 +9,104 @@
#include <vespa/storageapi/message/persistence.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/storage/distributor/distributor_bucket_space_repo.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/operationtargetresolverimpl.h>
#include <vespa/storage/distributor/externaloperationhandler.h>
#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
using document::BucketId;
using document::test::makeBucketSpace;
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
-
-struct OperationTargetResolverTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
+namespace storage::distributor {
+struct OperationTargetResolverTest : Test, DistributorTestUtil {
std::shared_ptr<const document::DocumentTypeRepo> _repo;
const document::DocumentType* _html_type;
std::unique_ptr<Operation> op;
- void testSimple();
- void testMultipleNodes();
- void testChooseIdealStateWhenManyCopies();
- void testChooseHighestSplitBucket();
- void testChooseHighestSplitBucketPerNode();
- void testChooseHighestSplitBucketWithTrusted();
- void testInconsistentBucketsAreNotExplicitlyCreated();
- void testNoTrustedOrIdealStateCopyAvailable();
- void testCreateMissingCopies();
- void testNoExistingCopies();
- void testCountMaintenanceNodesAsDown();
- void testResolvingDoesNotMutateDatabase();
- void testTrustedOverIdealState();
-
BucketInstanceList getInstances(const BucketId& bid,
bool stripToRedundancy);
- void setUp() override {
+ void SetUp() override {
_repo.reset(new document::DocumentTypeRepo(
*config::ConfigGetter<document::DocumenttypesConfig>::getConfig(
"config-doctypes",
- config::FileSpec(TEST_PATH("config-doctypes.cfg")))));
+ config::FileSpec("../config-doctypes.cfg"))));
_html_type = _repo->getDocumentType("text/html");
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
-
- CPPUNIT_TEST_SUITE(OperationTargetResolverTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testMultipleNodes);
- CPPUNIT_TEST(testChooseIdealStateWhenManyCopies);
- CPPUNIT_TEST(testChooseHighestSplitBucket);
- CPPUNIT_TEST(testChooseHighestSplitBucketPerNode);
- CPPUNIT_TEST(testChooseHighestSplitBucketWithTrusted);
- CPPUNIT_TEST(testNoTrustedOrIdealStateCopyAvailable);
- CPPUNIT_TEST(testInconsistentBucketsAreNotExplicitlyCreated);
- CPPUNIT_TEST(testCreateMissingCopies);
- CPPUNIT_TEST(testNoExistingCopies);
- CPPUNIT_TEST(testCountMaintenanceNodesAsDown);
- CPPUNIT_TEST(testResolvingDoesNotMutateDatabase);
- CPPUNIT_TEST(testTrustedOverIdealState);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(OperationTargetResolverTest);
namespace {
- // Create assertion that makes it easy to write tests, and report correct
- // line for problem at command line
-#define ASSERT_THAT(id) \
- { \
- struct MyAsserter : public Asserter { \
- void assertEqualMsg(std::string t1, OperationTargetList t2, \
- OperationTargetList t3) override { \
- CPPUNIT_ASSERT_EQUAL_MSG(t1, t2, t3); \
- } \
- }; \
- _asserters.push_back(new MyAsserter); \
- } \
- TestTargets::createTest(id, *this, *_asserters.back())
-
- struct Asserter {
- virtual ~Asserter() {}
- virtual void assertEqualMsg(std::string t1,
- OperationTargetList t2,
- OperationTargetList t3) = 0;
- };
- std::vector<Asserter*> _asserters;
- struct TestTargets {
- const BucketId& _id;
- OperationTargetList _expected;
- OperationTargetResolverTest& _test;
- Asserter& _asserter;
-
- TestTargets(const BucketId& id,
- OperationTargetResolverTest& test,
- Asserter& asserter)
- : _id(id), _test(test), _asserter(asserter) {}
-
- ~TestTargets() {
- BucketInstanceList result(_test.getInstances(_id, true));
- BucketInstanceList all(_test.getInstances(_id, false));
- _asserter.assertEqualMsg(
- all.toString(), _expected, result.createTargets(makeBucketSpace()));
- delete _asserters.back();
- _asserters.pop_back();
- }
-
- TestTargets& sendsTo(const BucketId& id, uint16_t node) {
- _expected.push_back(OperationTarget(
- makeDocumentBucket(id), lib::Node(lib::NodeType::STORAGE, node), false));
- return *this;
- }
- TestTargets& createsAt(const BucketId& id, uint16_t node) {
- _expected.push_back(OperationTarget(
- makeDocumentBucket(id), lib::Node(lib::NodeType::STORAGE, node), true));
- return *this;
- }
-
- static TestTargets createTest(const BucketId& id,
- OperationTargetResolverTest& test,
- Asserter& asserter)
- {
- return TestTargets(id, test, asserter);
- }
- };
+// Create assertion that makes it easy to write tests, and report correct
+// line for problem at command line
+#define MY_ASSERT_THAT(id) \
+{ \
+ struct MyAsserter : public Asserter { \
+ void assertEqualMsg(std::string t1, OperationTargetList t2, \
+ OperationTargetList t3) override { \
+ ASSERT_EQ(t2, t3) << t1; \
+ } \
+ }; \
+ _asserters.push_back(new MyAsserter); \
+} \
+TestTargets::createTest(id, *this, *_asserters.back())
+
+struct Asserter {
+ virtual ~Asserter() {}
+ virtual void assertEqualMsg(std::string t1,
+ OperationTargetList t2,
+ OperationTargetList t3) = 0;
+};
+std::vector<Asserter*> _asserters;
+struct TestTargets {
+ const BucketId& _id;
+ OperationTargetList _expected;
+ OperationTargetResolverTest& _test;
+ Asserter& _asserter;
+
+ TestTargets(const BucketId& id,
+ OperationTargetResolverTest& test,
+ Asserter& asserter)
+ : _id(id), _test(test), _asserter(asserter) {}
+
+ ~TestTargets() {
+ BucketInstanceList result(_test.getInstances(_id, true));
+ BucketInstanceList all(_test.getInstances(_id, false));
+ _asserter.assertEqualMsg(
+ all.toString(), _expected, result.createTargets(makeBucketSpace()));
+ delete _asserters.back();
+ _asserters.pop_back();
+ }
+
+ TestTargets& sendsTo(const BucketId& id, uint16_t node) {
+ _expected.push_back(OperationTarget(
+ makeDocumentBucket(id), lib::Node(lib::NodeType::STORAGE, node), false));
+ return *this;
+ }
+ TestTargets& createsAt(const BucketId& id, uint16_t node) {
+ _expected.push_back(OperationTarget(
+ makeDocumentBucket(id), lib::Node(lib::NodeType::STORAGE, node), true));
+ return *this;
+ }
+ static TestTargets createTest(const BucketId& id,
+ OperationTargetResolverTest& test,
+ Asserter& asserter)
+ {
+ return TestTargets(id, test, asserter);
+ }
+};
} // anonymous
@@ -168,19 +133,15 @@ OperationTargetResolverTest::getInstances(const BucketId& id,
/*
* Test basic case with no inconsistencies
*/
-void
-OperationTargetResolverTest::testSimple()
-{
+TEST_F(OperationTargetResolverTest, simple) {
setupDistributor(2, 2, "storage:2 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0,1=0");
-
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
- .sendsTo(BucketId(16, 0), 0);
+
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
+ .sendsTo(BucketId(16, 0), 0);
}
-void
-OperationTargetResolverTest::testMultipleNodes()
-{
+TEST_F(OperationTargetResolverTest, multiple_nodes) {
setupDistributor(1, 2, "storage:2 distributor:1");
auto &bucketSpaceRepo(getExternalOperationHandler().getBucketSpaceRepo());
@@ -194,66 +155,54 @@ OperationTargetResolverTest::testMultipleNodes()
lib::IdealNodeList idealNodes(
idealNodeCalc.getIdealStorageNodes(BucketId(16, i)));
uint16_t expectedNode = idealNodes[0].getIndex();
- ASSERT_THAT(BucketId(32, i)).sendsTo(BucketId(16, i), expectedNode);
+ MY_ASSERT_THAT(BucketId(32, i)).sendsTo(BucketId(16, i), expectedNode);
}
}
-void
-OperationTargetResolverTest::testChooseIdealStateWhenManyCopies()
-{
+TEST_F(OperationTargetResolverTest, choose_ideal_state_when_many_copies) {
setupDistributor(2, 4, "storage:4 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0,1=0,2=0,3=0"); // ideal nodes: 1, 3
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
- .sendsTo(BucketId(16, 0), 3);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 1)
+ .sendsTo(BucketId(16, 0), 3);
}
-void
-OperationTargetResolverTest::testTrustedOverIdealState()
-{
+TEST_F(OperationTargetResolverTest, trusted_over_ideal_state) {
setupDistributor(2, 4, "storage:4 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0/0/0/t,1=0,2=0/0/0/t,3=0");
- // ideal nodes: 1, 3
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 0)
- .sendsTo(BucketId(16, 0), 2);
+ // ideal nodes: 1, 3
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(16, 0), 0)
+ .sendsTo(BucketId(16, 0), 2);
}
-void
-OperationTargetResolverTest::testChooseHighestSplitBucket()
-{
+TEST_F(OperationTargetResolverTest, choose_highest_split_bucket) {
setupDistributor(2, 2, "storage:2 distributor:1");
// 0, 1 are both in ideal state for both buckets.
addNodesToBucketDB(BucketId(16, 0), "0=0,1=0");
addNodesToBucketDB(BucketId(17, 0), "0=0,1=0");
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 1)
- .sendsTo(BucketId(17, 0), 0);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 1)
+ .sendsTo(BucketId(17, 0), 0);
}
-void
-OperationTargetResolverTest::testChooseHighestSplitBucketPerNode()
-{
+TEST_F(OperationTargetResolverTest, choose_highest_split_bucket_per_node) {
setupDistributor(2, 2, "storage:2 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "1=0");
addNodesToBucketDB(BucketId(17, 0), "0=0");
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
- .sendsTo(BucketId(16, 0), 1);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
+ .sendsTo(BucketId(16, 0), 1);
}
-void
-OperationTargetResolverTest::testChooseHighestSplitBucketWithTrusted()
-{
+TEST_F(OperationTargetResolverTest, choose_highest_split_bucket_with_trusted) {
setupDistributor(2, 2, "storage:2 distributor:1");
// Unfinished split scenario: split done on 0, not on 1.
// Copy on 1 is only remaining for (16, 0), so always trusted.
addNodesToBucketDB(BucketId(16, 0), "1=1/2/3/t");
addNodesToBucketDB(BucketId(17, 0), "0=2/3/4/t");
addNodesToBucketDB(BucketId(17, 1ULL << 16), "0=3/4/5/t");
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
- .sendsTo(BucketId(16, 0), 1);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(17, 0), 0)
+ .sendsTo(BucketId(16, 0), 1);
}
-void
-OperationTargetResolverTest::testInconsistentBucketsAreNotExplicitlyCreated()
-{
+TEST_F(OperationTargetResolverTest, inconsistent_buckets_are_not_explicitly_created) {
setupDistributor(2, 2, "bits:8 storage:2 distributor:1");
addNodesToBucketDB(BucketId(15, 0), "1=9/9/9/t");
addNodesToBucketDB(BucketId(16, 1 << 15), "0=9/9/9/t");
@@ -263,62 +212,50 @@ OperationTargetResolverTest::testInconsistentBucketsAreNotExplicitlyCreated()
// the inconsistent (15, 0) bucket since it already exists and will be
// split out very soon anyway. This is predominantly to avoid making things
// even worse than they are and to avoid the edge case in bug 7296087.
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(15, 0), 1)
- .createsAt(BucketId(16, 0), 0);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(15, 0), 1)
+ .createsAt(BucketId(16, 0), 0);
}
-void
-OperationTargetResolverTest::testNoTrustedOrIdealStateCopyAvailable()
-{
+TEST_F(OperationTargetResolverTest, no_trusted_or_ideal_state_copy_available) {
setupDistributor(2, 4, "storage:4 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0,2=0");
addNodesToBucketDB(BucketId(18, 0), "0=0"); // ideal nodes: 1, 3
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 0)
- .sendsTo(BucketId(16, 0), 2);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 0)
+ .sendsTo(BucketId(16, 0), 2);
}
-void
-OperationTargetResolverTest::testCreateMissingCopies()
-{
+TEST_F(OperationTargetResolverTest, create_missing_copies) {
setupDistributor(4, 10, "storage:10 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "6=0");
addNodesToBucketDB(BucketId(18, 0), "4=0"); // ideal nodes: 6, 8, 7, 1
- ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 4)
- .sendsTo(BucketId(16, 0), 6)
- .createsAt(BucketId(18, 0), 8)
- .createsAt(BucketId(18, 0), 7);
+ MY_ASSERT_THAT(BucketId(32, 0)).sendsTo(BucketId(18, 0), 4)
+ .sendsTo(BucketId(16, 0), 6)
+ .createsAt(BucketId(18, 0), 8)
+ .createsAt(BucketId(18, 0), 7);
}
-void
-OperationTargetResolverTest::testNoExistingCopies()
-{
+TEST_F(OperationTargetResolverTest, no_existing_copies) {
setupDistributor(2, 5, "storage:5 distributor:1");
- ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
- .createsAt(BucketId(16, 0), 3);
+ MY_ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
+ .createsAt(BucketId(16, 0), 3);
}
-void
-OperationTargetResolverTest::testCountMaintenanceNodesAsDown()
-{
+TEST_F(OperationTargetResolverTest, count_maintenance_nodes_as_down) {
setupDistributor(2, 5, "storage:5 .1.s:m distributor:1");
- ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 3)
- .createsAt(BucketId(16, 0), 2);
+ MY_ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 3)
+ .createsAt(BucketId(16, 0), 2);
}
-void
-OperationTargetResolverTest::testResolvingDoesNotMutateDatabase()
-{
+TEST_F(OperationTargetResolverTest, resolving_does_not_mutate_database) {
setupDistributor(2, 5, "storage:5 distributor:1");
- ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
- .createsAt(BucketId(16, 0), 3);
+ MY_ASSERT_THAT(BucketId(32, 0)).createsAt(BucketId(16, 0), 1)
+ .createsAt(BucketId(16, 0), 3);
- CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
- dumpBucket(BucketId(0x4000000000000000)));
+ EXPECT_EQ("NONEXISTING", dumpBucket(BucketId(0x4000000000000000)));
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/ownership_transfer_safe_time_point_calculator_test.cpp b/storage/src/tests/distributor/ownership_transfer_safe_time_point_calculator_test.cpp
index 6a3cd1a5537..c47cb862c73 100644
--- a/storage/src/tests/distributor/ownership_transfer_safe_time_point_calculator_test.cpp
+++ b/storage/src/tests/distributor/ownership_transfer_safe_time_point_calculator_test.cpp
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/distributor/ownership_transfer_safe_time_point_calculator.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/gtest/gtest.h>
template <typename Clock, typename Duration>
std::ostream& operator<<(std::ostream& os,
@@ -12,21 +12,7 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-namespace storage {
-namespace distributor {
-
-struct OwnershipTransferSafeTimePointCalculatorTest : CppUnit::TestFixture {
- void generated_safe_time_point_rounds_up_to_nearest_second();
- void zero_clock_skew_returns_epoch();
-
- CPPUNIT_TEST_SUITE(OwnershipTransferSafeTimePointCalculatorTest);
- CPPUNIT_TEST(generated_safe_time_point_rounds_up_to_nearest_second);
- CPPUNIT_TEST(zero_clock_skew_returns_epoch);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(OwnershipTransferSafeTimePointCalculatorTest);
-
+namespace storage::distributor {
using CalcType = OwnershipTransferSafeTimePointCalculator;
using Clock = CalcType::Clock;
@@ -34,23 +20,16 @@ using TimePoint = CalcType::TimePoint;
using namespace std::literals::chrono_literals;
-void OwnershipTransferSafeTimePointCalculatorTest::generated_safe_time_point_rounds_up_to_nearest_second() {
- CPPUNIT_ASSERT_EQUAL(TimePoint(6s),
- CalcType(1s).safeTimePoint(TimePoint(4001ms)));
- CPPUNIT_ASSERT_EQUAL(TimePoint(6s),
- CalcType(1s).safeTimePoint(TimePoint(4999ms)));
- CPPUNIT_ASSERT_EQUAL(TimePoint(6s),
- CalcType(1s).safeTimePoint(TimePoint(4000ms)));
- CPPUNIT_ASSERT_EQUAL(TimePoint(7s),
- CalcType(2s).safeTimePoint(TimePoint(4001ms)));
- CPPUNIT_ASSERT_EQUAL(TimePoint(7s),
- CalcType(2s).safeTimePoint(TimePoint(4999ms)));
+TEST(OwnershipTransferSafeTimePointCalculatorTest, generated_safe_time_point_rounds_up_to_nearest_second) {
+ EXPECT_EQ(TimePoint(6s), CalcType(1s).safeTimePoint(TimePoint(4001ms)));
+ EXPECT_EQ(TimePoint(6s), CalcType(1s).safeTimePoint(TimePoint(4999ms)));
+ EXPECT_EQ(TimePoint(6s), CalcType(1s).safeTimePoint(TimePoint(4000ms)));
+ EXPECT_EQ(TimePoint(7s), CalcType(2s).safeTimePoint(TimePoint(4001ms)));
+ EXPECT_EQ(TimePoint(7s), CalcType(2s).safeTimePoint(TimePoint(4999ms)));
}
-void OwnershipTransferSafeTimePointCalculatorTest::zero_clock_skew_returns_epoch() {
- CPPUNIT_ASSERT_EQUAL(TimePoint(0s),
- CalcType(0s).safeTimePoint(TimePoint(4001ms)));
+TEST(OwnershipTransferSafeTimePointCalculatorTest, zero_clock_skew_returns_epoch) {
+ EXPECT_EQ(TimePoint(0s), CalcType(0s).safeTimePoint(TimePoint(4001ms)));
}
}
-}
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index 1ded89dc6d3..f79b809de65 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -8,38 +8,17 @@
#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
#include <tests/common/dummystoragelink.h>
#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/vdslib/state/random.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage::distributor {
using namespace std::chrono_literals;
-class PendingMessageTrackerTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(PendingMessageTrackerTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testMultipleMessages);
- CPPUNIT_TEST(testStartPage);
- CPPUNIT_TEST(testGetPendingMessageTypes);
- CPPUNIT_TEST(testHasPendingMessage);
- CPPUNIT_TEST(testGetAllMessagesForSingleBucket);
- CPPUNIT_TEST(busy_reply_marks_node_as_busy);
- CPPUNIT_TEST(busy_node_duration_can_be_adjusted);
- CPPUNIT_TEST_SUITE_END();
-
-public:
- void testSimple();
- void testMultipleMessages();
- void testStartPage();
- void testGetPendingMessageTypes();
- void testHasPendingMessage();
- void testGetAllMessagesForSingleBucket();
- void busy_reply_marks_node_as_busy();
- void busy_node_duration_can_be_adjusted();
-
-private:
+struct PendingMessageTrackerTest : Test {
void insertMessages(PendingMessageTracker& tracker);
};
@@ -98,30 +77,6 @@ public:
_tracker->reply(*putReply);
}
- std::shared_ptr<api::RemoveCommand> sendRemove(
- const RequestBuilder& builder)
- {
- assignMockedTime(builder.atTime());
- auto remove = createRemoveToNode(builder.toNode());
- _tracker->insert(remove);
- return remove;
- }
-
- void sendRemoveReply(api::RemoveCommand& removeCmd,
- const RequestBuilder& builder)
- {
- assignMockedTime(builder.atTime());
- auto removeReply = removeCmd.makeReply();
- _tracker->reply(*removeReply);
- }
-
- void sendPutAndReplyWithLatency(uint16_t node,
- std::chrono::milliseconds latency)
- {
- auto put = sendPut(RequestBuilder().atTime(1000ms).toNode(node));
- sendPutReply(*put, RequestBuilder().atTime(1000ms + latency));
- }
-
PendingMessageTracker& tracker() { return *_tracker; }
auto& clock() { return _clock; }
@@ -145,10 +100,10 @@ private:
std::shared_ptr<api::PutCommand> createPutToNode(uint16_t node) const {
document::BucketId bucket(16, 1234);
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket),
- createDummyDocumentForBucket(bucket),
- api::Timestamp(123456)));
+ auto cmd = std::make_shared<api::PutCommand>(
+ makeDocumentBucket(bucket),
+ createDummyDocumentForBucket(bucket),
+ api::Timestamp(123456));
cmd->setAddress(makeStorageAddress(node));
return cmd;
}
@@ -157,11 +112,10 @@ private:
uint16_t node) const
{
document::BucketId bucket(16, 1234);
- std::shared_ptr<api::RemoveCommand> cmd(
- new api::RemoveCommand(makeDocumentBucket(bucket),
- document::DocumentId(
- createDummyIdString(bucket)),
- api::Timestamp(123456)));
+ auto cmd = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bucket),
+ document::DocumentId(createDummyIdString(bucket)),
+ api::Timestamp(123456));
cmd->setAddress(makeStorageAddress(node));
return cmd;
}
@@ -183,15 +137,11 @@ Fixture::Fixture()
// flip out and die on an explicit nullptr check.
_tracker = std::make_unique<PendingMessageTracker>(_compReg);
}
-Fixture::~Fixture() {}
+Fixture::~Fixture() = default;
-}
-
-CPPUNIT_TEST_SUITE_REGISTRATION(PendingMessageTrackerTest);
+} // anonymous namespace
-void
-PendingMessageTrackerTest::testSimple()
-{
+TEST_F(PendingMessageTrackerTest, simple) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -208,14 +158,12 @@ PendingMessageTrackerTest::testSimple()
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
- CPPUNIT_ASSERT_CONTAIN(
- std::string(
- "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000004d2))</b>\n"
- "<ul>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> "
- "Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "</ul>\n"),
- ost.str());
+ EXPECT_THAT(ost.str(), HasSubstr(
+ "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000004d2))</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> "
+ "Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "</ul>\n"));
}
api::RemoveReply reply(*remove);
@@ -225,7 +173,7 @@ PendingMessageTrackerTest::testSimple()
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
- CPPUNIT_ASSERT_MSG(ost.str(), ost.str().find("doc:") == std::string::npos);
+ EXPECT_THAT(ost.str(), Not(HasSubstr("doc:")));
}
}
@@ -251,9 +199,7 @@ PendingMessageTrackerTest::insertMessages(PendingMessageTracker& tracker)
}
}
-void
-PendingMessageTrackerTest::testStartPage()
-{
+TEST_F(PendingMessageTrackerTest, start_page) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -263,21 +209,16 @@ PendingMessageTrackerTest::testStartPage()
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages"));
- CPPUNIT_ASSERT_CONTAIN(
- std::string(
- "<h1>Pending messages to storage nodes</h1>\n"
- "View:\n"
- "<ul>\n"
- "<li><a href=\"?order=bucket\">Group by bucket</a></li>"
- "<li><a href=\"?order=node\">Group by node</a></li>"),
- ost.str());
-
+ EXPECT_THAT(ost.str(), HasSubstr(
+ "<h1>Pending messages to storage nodes</h1>\n"
+ "View:\n"
+ "<ul>\n"
+ "<li><a href=\"?order=bucket\">Group by bucket</a></li>"
+ "<li><a href=\"?order=node\">Group by node</a></li>"));
}
}
-void
-PendingMessageTrackerTest::testMultipleMessages()
-{
+TEST_F(PendingMessageTrackerTest, multiple_messages) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -290,45 +231,41 @@ PendingMessageTrackerTest::testMultipleMessages()
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=bucket"));
- CPPUNIT_ASSERT_CONTAIN(
- std::string(
- "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000004d2))</b>\n"
- "<ul>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "</ul>\n"
- "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000011d7))</b>\n"
- "<ul>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "</ul>\n"
- ),
- ost.str());
+ EXPECT_THAT(ost.str(), HasSubstr(
+ "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000004d2))</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "</ul>\n"
+ "<b>Bucket(BucketSpace(0x0000000000000001), BucketId(0x40000000000011d7))</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "</ul>\n"));
}
{
std::ostringstream ost;
tracker.reportStatus(ost, framework::HttpUrlPath("/pendingmessages?order=node"));
- CPPUNIT_ASSERT_CONTAIN(std::string(
- "<b>Node 0 (pending count: 4)</b>\n"
- "<ul>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "</ul>\n"
- "<b>Node 1 (pending count: 4)</b>\n"
- "<ul>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
- "</ul>\n"
- ), ost.str());
+ EXPECT_THAT(ost.str(), HasSubstr(
+ "<b>Node 0 (pending count: 4)</b>\n"
+ "<ul>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "<li><i>Node 0</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "</ul>\n"
+ "<b>Node 1 (pending count: 4)</b>\n"
+ "<ul>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000004d2), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "<li><i>Node 1</i>: <b>1970-01-01 00:00:01</b> Remove(BucketId(0x40000000000011d7), priority=127)</li>\n"
+ "</ul>\n"));
}
}
@@ -376,9 +313,7 @@ public:
}
-void
-PendingMessageTrackerTest::testGetPendingMessageTypes()
-{
+TEST_F(PendingMessageTrackerTest, get_pending_message_types) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -394,25 +329,23 @@ PendingMessageTrackerTest::testGetPendingMessageTypes()
{
TestChecker checker;
tracker.checkPendingMessages(0, makeDocumentBucket(bid), checker);
- CPPUNIT_ASSERT_EQUAL(127, (int)checker.pri);
+ EXPECT_EQ(127, static_cast<int>(checker.pri));
}
{
TestChecker checker;
tracker.checkPendingMessages(0, makeDocumentBucket(document::BucketId(16, 1235)), checker);
- CPPUNIT_ASSERT_EQUAL(255, (int)checker.pri);
+ EXPECT_EQ(255, static_cast<int>(checker.pri));
}
{
TestChecker checker;
tracker.checkPendingMessages(1, makeDocumentBucket(bid), checker);
- CPPUNIT_ASSERT_EQUAL(255, (int)checker.pri);
+ EXPECT_EQ(255, static_cast<int>(checker.pri));
}
}
-void
-PendingMessageTrackerTest::testHasPendingMessage()
-{
+TEST_F(PendingMessageTrackerTest, has_pending_message) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -420,7 +353,7 @@ PendingMessageTrackerTest::testHasPendingMessage()
PendingMessageTracker tracker(compReg);
document::BucketId bid(16, 1234);
- CPPUNIT_ASSERT(!tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
+ EXPECT_FALSE(tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
{
auto remove = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bid),
@@ -429,11 +362,11 @@ PendingMessageTrackerTest::testHasPendingMessage()
tracker.insert(remove);
}
- CPPUNIT_ASSERT(tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
- CPPUNIT_ASSERT(!tracker.hasPendingMessage(0, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
- CPPUNIT_ASSERT(!tracker.hasPendingMessage(2, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
- CPPUNIT_ASSERT(!tracker.hasPendingMessage(1, makeDocumentBucket(document::BucketId(16, 1233)), api::MessageType::REMOVE_ID));
- CPPUNIT_ASSERT(!tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::DELETEBUCKET_ID));
+ EXPECT_TRUE(tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
+ EXPECT_FALSE(tracker.hasPendingMessage(0, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
+ EXPECT_FALSE(tracker.hasPendingMessage(2, makeDocumentBucket(bid), api::MessageType::REMOVE_ID));
+ EXPECT_FALSE(tracker.hasPendingMessage(1, makeDocumentBucket(document::BucketId(16, 1233)), api::MessageType::REMOVE_ID));
+ EXPECT_FALSE(tracker.hasPendingMessage(1, makeDocumentBucket(bid), api::MessageType::DELETEBUCKET_ID));
}
namespace {
@@ -455,9 +388,7 @@ public:
} // anon ns
-void
-PendingMessageTrackerTest::testGetAllMessagesForSingleBucket()
-{
+TEST_F(PendingMessageTrackerTest, get_all_messages_for_single_bucket) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -469,16 +400,16 @@ PendingMessageTrackerTest::testGetAllMessagesForSingleBucket()
{
OperationEnumerator enumerator;
tracker.checkPendingMessages(makeDocumentBucket(document::BucketId(16, 1234)), enumerator);
- CPPUNIT_ASSERT_EQUAL(std::string("Remove -> 0\n"
- "Remove -> 0\n"
- "Remove -> 1\n"
- "Remove -> 1\n"),
- enumerator.str());
+ EXPECT_EQ("Remove -> 0\n"
+ "Remove -> 0\n"
+ "Remove -> 1\n"
+ "Remove -> 1\n",
+ enumerator.str());
}
{
OperationEnumerator enumerator;
tracker.checkPendingMessages(makeDocumentBucket(document::BucketId(16, 9876)), enumerator);
- CPPUNIT_ASSERT_EQUAL(std::string(""), enumerator.str());
+ EXPECT_EQ("", enumerator.str());
}
}
@@ -486,23 +417,23 @@ PendingMessageTrackerTest::testGetAllMessagesForSingleBucket()
// but have the same actual semantics as busy merges (i.e. "queue is full", not "node
// is too busy to accept new requests in general").
-void PendingMessageTrackerTest::busy_reply_marks_node_as_busy() {
+TEST_F(PendingMessageTrackerTest, busy_reply_marks_node_as_busy) {
Fixture f;
auto cmd = f.sendPut(RequestBuilder().toNode(0));
- CPPUNIT_ASSERT(!f.tracker().getNodeInfo().isBusy(0));
+ EXPECT_FALSE(f.tracker().getNodeInfo().isBusy(0));
f.sendPutReply(*cmd, RequestBuilder(), api::ReturnCode(api::ReturnCode::BUSY));
- CPPUNIT_ASSERT(f.tracker().getNodeInfo().isBusy(0));
- CPPUNIT_ASSERT(!f.tracker().getNodeInfo().isBusy(1));
+ EXPECT_TRUE(f.tracker().getNodeInfo().isBusy(0));
+ EXPECT_FALSE(f.tracker().getNodeInfo().isBusy(1));
}
-void PendingMessageTrackerTest::busy_node_duration_can_be_adjusted() {
+TEST_F(PendingMessageTrackerTest, busy_node_duration_can_be_adjusted) {
Fixture f;
auto cmd = f.sendPut(RequestBuilder().toNode(0));
f.tracker().setNodeBusyDuration(std::chrono::seconds(10));
f.sendPutReply(*cmd, RequestBuilder(), api::ReturnCode(api::ReturnCode::BUSY));
- CPPUNIT_ASSERT(f.tracker().getNodeInfo().isBusy(0));
+ EXPECT_TRUE(f.tracker().getNodeInfo().isBusy(0));
f.clock().addSecondsToTime(11);
- CPPUNIT_ASSERT(!f.tracker().getNodeInfo().isBusy(0));
+ EXPECT_FALSE(f.tracker().getNodeInfo().isBusy(0));
}
}
diff --git a/storage/src/tests/distributor/persistence_metrics_set_test.cpp b/storage/src/tests/distributor/persistence_metrics_set_test.cpp
index 22e187bf4c0..3210a59a567 100644
--- a/storage/src/tests/distributor/persistence_metrics_set_test.cpp
+++ b/storage/src/tests/distributor/persistence_metrics_set_test.cpp
@@ -2,71 +2,52 @@
#include <vespa/storage/distributor/distributormetricsset.h>
#include <vespa/storageapi/messageapi/returncode.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/vespalib/gtest/gtest.h>
-namespace storage {
-namespace distributor {
+using namespace ::testing;
-struct PersistenceMetricsSetTest : CppUnit::TestFixture {
- void successful_return_codes_are_counted_as_ok();
- void wrong_distribution_failure_is_counted();
- void timeout_failure_is_counted();
- // Note for these tests: busy, connection failures et al are sets of
- // failure codes and not just a single code. We only test certain members
- // of these sets here. See api::ReturnCode implementation for an exhaustive
- // list.
- void busy_failure_is_counted();
- void connection_failure_is_counted();
- void inconsistent_bucket_is_counted();
- void non_special_cased_failure_codes_are_catchall_counted();
-
- CPPUNIT_TEST_SUITE(PersistenceMetricsSetTest);
- CPPUNIT_TEST(successful_return_codes_are_counted_as_ok);
- CPPUNIT_TEST(wrong_distribution_failure_is_counted);
- CPPUNIT_TEST(timeout_failure_is_counted);
- CPPUNIT_TEST(busy_failure_is_counted);
- CPPUNIT_TEST(connection_failure_is_counted);
- CPPUNIT_TEST(inconsistent_bucket_is_counted);
- CPPUNIT_TEST(non_special_cased_failure_codes_are_catchall_counted);
- CPPUNIT_TEST_SUITE_END();
+namespace storage::distributor {
+struct PersistenceMetricsSetTest : Test {
void assert_failure_is_counted(PersistenceOperationMetricSet& metrics,
api::ReturnCode::Result failure_code,
const metrics::LongCountMetric& checked)
{
metrics.updateFromResult(api::ReturnCode(failure_code));
- CPPUNIT_ASSERT_EQUAL(int64_t(1), checked.getLongValue("count"));
- CPPUNIT_ASSERT_EQUAL(int64_t(0), metrics.ok.getLongValue("count"));
+ EXPECT_EQ(1, checked.getLongValue("count"));
+ EXPECT_EQ(0, metrics.ok.getLongValue("count"));
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceMetricsSetTest);
-
-void PersistenceMetricsSetTest::successful_return_codes_are_counted_as_ok() {
+TEST_F(PersistenceMetricsSetTest, successful_return_codes_are_counted_as_ok) {
PersistenceOperationMetricSet metrics("foo");
metrics.updateFromResult(api::ReturnCode());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), metrics.ok.getLongValue("count"));
+ EXPECT_EQ(1, metrics.ok.getLongValue("count"));
}
-void PersistenceMetricsSetTest::wrong_distribution_failure_is_counted() {
+TEST_F(PersistenceMetricsSetTest, wrong_distribution_failure_is_counted) {
PersistenceOperationMetricSet metrics("foo");
assert_failure_is_counted(metrics, api::ReturnCode::WRONG_DISTRIBUTION,
metrics.failures.wrongdistributor);
}
-void PersistenceMetricsSetTest::timeout_failure_is_counted() {
+TEST_F(PersistenceMetricsSetTest, timeout_failure_is_counted) {
PersistenceOperationMetricSet metrics("foo");
assert_failure_is_counted(metrics, api::ReturnCode::TIMEOUT,
metrics.failures.timeout);
}
-void PersistenceMetricsSetTest::busy_failure_is_counted() {
+// Note for these tests: busy, connection failures et al are sets of
+// failure codes and not just a single code. We only test certain members
+// of these sets here. See api::ReturnCode implementation for an exhaustive
+// list.
+TEST_F(PersistenceMetricsSetTest, busy_failure_is_counted) {
PersistenceOperationMetricSet metrics("foo");
assert_failure_is_counted(metrics, api::ReturnCode::BUSY,
metrics.failures.busy);
}
-void PersistenceMetricsSetTest::connection_failure_is_counted() {
+TEST_F(PersistenceMetricsSetTest, connection_failure_is_counted) {
PersistenceOperationMetricSet metrics("foo");
// This is dirty enum value coercion, but this is how "parent protocol"
// error codes are handled already.
@@ -76,17 +57,16 @@ void PersistenceMetricsSetTest::connection_failure_is_counted() {
metrics.failures.notconnected);
}
-void PersistenceMetricsSetTest::inconsistent_bucket_is_counted() {
+TEST_F(PersistenceMetricsSetTest, inconsistent_bucket_is_counted) {
PersistenceOperationMetricSet metrics("foo");
assert_failure_is_counted(metrics, api::ReturnCode::BUCKET_NOT_FOUND,
metrics.failures.inconsistent_bucket);
}
-void PersistenceMetricsSetTest::non_special_cased_failure_codes_are_catchall_counted() {
+TEST_F(PersistenceMetricsSetTest, non_special_cased_failure_codes_are_catchall_counted) {
PersistenceOperationMetricSet metrics("foo");
assert_failure_is_counted(metrics, api::ReturnCode::REJECTED,
metrics.failures.storagefailure);
}
}
-}
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index 881ccb560b4..d56ca69d52f 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -9,10 +9,8 @@
#include <tests/distributor/distributortestutil.h>
#include <tests/common/dummystoragelink.h>
#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/text/stringtokenizer.h>
-#include <iomanip>
+#include <vespa/vespalib/gtest/gtest.h>
using std::shared_ptr;
using config::ConfigGetter;
@@ -57,15 +55,15 @@ public:
= api::ReturnCode::OK,
api::BucketInfo info = api::BucketInfo(1,2,3,4,5))
{
- ASSERT_FALSE(_sender.commands.empty());
+ ASSERT_FALSE(_sender.commands().empty());
if (idx == -1) {
- idx = _sender.commands.size() - 1;
- } else if (static_cast<size_t>(idx) >= _sender.commands.size()) {
+ idx = _sender.commands().size() - 1;
+ } else if (static_cast<size_t>(idx) >= _sender.commands().size()) {
throw std::logic_error("Specified message index is greater "
"than number of received messages");
}
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[idx];
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(idx);
api::StorageReply::SP reply(msg->makeReply().release());
dynamic_cast<api::BucketInfoReply*>(reply.get())->setBucketInfo(info);
reply->setResult(result);
@@ -99,16 +97,12 @@ PutOperationTest::~PutOperationTest() = default;
document::BucketId
PutOperationTest::createAndSendSampleDocument(uint32_t timeout) {
- Document::SP
- doc(new Document(doc_type(), DocumentId(DocIdString("test", "test"))));
+ auto doc = std::make_shared<Document>(doc_type(), DocumentId(DocIdString("test", "test")));
document::BucketId id = getExternalOperationHandler().getBucketId(doc->getId());
addIdealNodes(id);
- std::shared_ptr<api::PutCommand> msg(
- new api::PutCommand(makeDocumentBucket(document::BucketId(0)),
- doc,
- 0));
+ auto msg = std::make_shared<api::PutCommand>(makeDocumentBucket(document::BucketId(0)), doc, 0);
msg->setTimestamp(100);
msg->setPriority(128);
msg->setTimeout(timeout);
@@ -118,10 +112,10 @@ PutOperationTest::createAndSendSampleDocument(uint32_t timeout) {
namespace {
-typedef int Redundancy;
-typedef int NodeCount;
-typedef uint32_t ReturnAfter;
-typedef bool RequirePrimaryWritten;
+using Redundancy = int;
+using NodeCount = int;
+using ReturnAfter = uint32_t;
+using RequirePrimaryWritten = bool;
}
@@ -306,7 +300,7 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required_not_done)
sendReply(4);
sendReply(5);
- ASSERT_EQ(0, _sender.replies.size());
+ ASSERT_EQ(0, _sender.replies().size());
}
TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
@@ -395,9 +389,9 @@ TEST_F(PutOperationTest, do_not_send_CreateBucket_if_already_pending) {
// Manually shove sent messages into pending message tracker, since
// this isn't done automatically.
- for (size_t i = 0; i < _sender.commands.size(); ++i) {
+ for (size_t i = 0; i < _sender.commands().size(); ++i) {
getExternalOperationHandler().getDistributor().getPendingMessageTracker()
- .insert(_sender.commands[i]);
+ .insert(_sender.command(i));
}
sendPut(createPut(doc));
@@ -420,17 +414,14 @@ TEST_F(PutOperationTest, no_storage_nodes) {
TEST_F(PutOperationTest, update_correct_bucket_on_remapped_put) {
setupDistributor(2, 2, "storage:2 distributor:1");
- Document::SP doc(new Document(doc_type(), DocumentId(
- UserDocIdString("userdoc:test:13:uri"))));
-
+ auto doc = std::make_shared<Document>(doc_type(), DocumentId(UserDocIdString("userdoc:test:13:uri")));
addNodesToBucketDB(document::BucketId(16,13), "0=0,1=0");
-
sendPut(createPut(doc));
ASSERT_EQ("Put => 0,Put => 1", _sender.getCommands(true));
{
- std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
PutReply* sreply = (PutReply*)reply.get();
sreply->remapBucketId(document::BucketId(17, 13));
@@ -610,7 +601,7 @@ void PutOperationTest::do_test_creation_with_bucket_activation_disabled(bool dis
sendPut(createPut(doc));
ASSERT_EQ("Create bucket => 0,Put => 0", _sender.getCommands(true));
- auto cmd = _sender.commands[0];
+ auto cmd = _sender.command(0);
auto createCmd = std::dynamic_pointer_cast<api::CreateBucketCommand>(cmd);
ASSERT_TRUE(createCmd.get() != nullptr);
// There's only 1 content node, so if activation were not disabled, it
diff --git a/storage/src/tests/distributor/removebucketoperationtest.cpp b/storage/src/tests/distributor/removebucketoperationtest.cpp
index f11c29bc55c..e2bf867ad11 100644
--- a/storage/src/tests/distributor/removebucketoperationtest.cpp
+++ b/storage/src/tests/distributor/removebucketoperationtest.cpp
@@ -1,5 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
+
#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/bucket.h>
@@ -8,41 +8,24 @@
#include <vespa/storage/distributor/distributor.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
+
+namespace storage::distributor {
-namespace storage {
-namespace distributor {
-
-class RemoveBucketOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(RemoveBucketOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testBucketInfoMismatchFailure);
- CPPUNIT_TEST(testFailWithInvalidBucketInfo);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
- void testSimple();
- void testBucketInfoMismatchFailure();
- void testFailWithInvalidBucketInfo();
-
-public:
- void setUp() override {
+struct RemoveBucketOperationTest : Test, DistributorTestUtil {
+ void SetUp() override {
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(RemoveBucketOperationTest);
-
-void
-RemoveBucketOperationTest::testSimple()
-{
+TEST_F(RemoveBucketOperationTest, simple) {
addNodesToBucketDB(document::BucketId(16, 1),
"0=10/100/1/t,"
"1=10/100/1/t,"
@@ -57,18 +40,16 @@ RemoveBucketOperationTest::testSimple()
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1,"
- "Delete bucket => 2"),
- _sender.getCommands(true));
+ ASSERT_EQ("Delete bucket => 1,"
+ "Delete bucket => 2",
+ _sender.getCommands(true));
sendReply(op, 0);
sendReply(op, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
+ ASSERT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)",
+ dumpBucket(document::BucketId(16, 1)));
}
/**
@@ -76,14 +57,11 @@ RemoveBucketOperationTest::testSimple()
* back actual bucket info reinserts that bucket info into the distributor
* bucket database.
*/
-void
-RemoveBucketOperationTest::testBucketInfoMismatchFailure()
-{
+TEST_F(RemoveBucketOperationTest, bucket_info_mismatch_failure) {
addNodesToBucketDB(document::BucketId(16, 1), "1=0/0/0/t");
- getComponentRegisterImpl().setDistribution(std::shared_ptr<lib::Distribution>(
- new lib::Distribution(
- lib::Distribution::getDefaultDistributionConfig(1, 10))));
+ getComponentRegisterImpl().setDistribution(
+ std::make_shared<lib::Distribution>(lib::Distribution::getDefaultDistributionConfig(1, 10)));
enableDistributorClusterState("distributor:1 storage:2");
@@ -93,11 +71,10 @@ RemoveBucketOperationTest::testBucketInfoMismatchFailure()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Delete bucket => 1", _sender.getCommands(true));
+ ASSERT_EQ(1, _sender.commands().size());
- CPPUNIT_ASSERT_EQUAL((size_t) 1, _sender.commands.size());
- std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
dynamic_cast<api::DeleteBucketReply&>(*reply).setBucketInfo(
api::BucketInfo(10, 100, 1));
@@ -105,11 +82,9 @@ RemoveBucketOperationTest::testBucketInfoMismatchFailure()
op.receive(_sender, reply);
// RemoveBucketOperation should reinsert bucketinfo into database
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "BucketId(0x4000000000000001) : "
- "node(idx=1,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
+ ASSERT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=1,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)",
+ dumpBucket(document::BucketId(16, 1)));
}
/**
@@ -117,14 +92,11 @@ RemoveBucketOperationTest::testBucketInfoMismatchFailure()
* not include valid BucketInfo in its reply does not reinsert the bucket
* into the distributor.
*/
-void
-RemoveBucketOperationTest::testFailWithInvalidBucketInfo()
-{
+TEST_F(RemoveBucketOperationTest, fail_with_invalid_bucket_info) {
addNodesToBucketDB(document::BucketId(16, 1), "1=0/0/0/t");
- getComponentRegisterImpl().setDistribution(std::shared_ptr<lib::Distribution>(
- new lib::Distribution(
- lib::Distribution::getDefaultDistributionConfig(1, 10))));
+ getComponentRegisterImpl().setDistribution(
+ std::make_shared<lib::Distribution>(lib::Distribution::getDefaultDistributionConfig(1, 10)));
enableDistributorClusterState("distributor:1 storage:2");
@@ -134,18 +106,15 @@ RemoveBucketOperationTest::testFailWithInvalidBucketInfo()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Delete bucket => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Delete bucket => 1", _sender.getCommands(true));
+ ASSERT_EQ(1, _sender.commands().size());
- CPPUNIT_ASSERT_EQUAL((size_t) 1, _sender.commands.size());
- std::shared_ptr<api::StorageCommand> msg2 = _sender.commands[0];
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
reply->setResult(api::ReturnCode::ABORTED);
op.receive(_sender, reply);
- CPPUNIT_ASSERT_EQUAL(std::string("NONEXISTING"),
- dumpBucket(document::BucketId(16, 1)));
+ EXPECT_EQ("NONEXISTING", dumpBucket(document::BucketId(16, 1)));
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/removelocationtest.cpp b/storage/src/tests/distributor/removelocationtest.cpp
index 5a6013c6fc4..74daba3d098 100644
--- a/storage/src/tests/distributor/removelocationtest.cpp
+++ b/storage/src/tests/distributor/removelocationtest.cpp
@@ -1,59 +1,43 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
-#include <iomanip>
-#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/removelocation.h>
#include <vespa/storage/distributor/operations/external/removelocationoperation.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/distributor.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
-class RemoveLocationOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(RemoveLocationOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
- void testSimple();
-
-public:
+struct RemoveLocationOperationTest : Test, DistributorTestUtil {
std::unique_ptr<RemoveLocationOperation> op;
- void setUp() override {
+ void SetUp() override {
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
void sendRemoveLocation(const std::string& selection) {
- std::shared_ptr<api::RemoveLocationCommand> msg(
- new api::RemoveLocationCommand(selection, makeDocumentBucket(document::BucketId(0))));
+ auto msg = std::make_shared<api::RemoveLocationCommand>(selection, makeDocumentBucket(document::BucketId(0)));
- op.reset(new RemoveLocationOperation(getExternalOperationHandler(),
- getDistributorBucketSpace(),
- msg,
- getDistributor().getMetrics().
- removelocations[msg->getLoadType()]));
+ op = std::make_unique<RemoveLocationOperation>(
+ getExternalOperationHandler(),
+ getDistributorBucketSpace(),
+ msg,
+ getDistributor().getMetrics().
+ removelocations[msg->getLoadType()]);
op->start(_sender, framework::MilliSecTime(0));
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(RemoveLocationOperationTest);
-
-void
-RemoveLocationOperationTest::testSimple()
-{
+TEST_F(RemoveLocationOperationTest, simple) {
enableDistributorClusterState("distributor:1 storage:3");
addNodesToBucketDB(document::BucketId(34, 0x000001234), "0=1,1=1");
@@ -63,25 +47,22 @@ RemoveLocationOperationTest::testSimple()
sendRemoveLocation("id.user=4660");
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove selection(id.user=4660): BucketInfoCommand() => 0,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
- "Remove selection(id.user=4660): BucketInfoCommand() => 2"),
- _sender.getCommands(true, true));
+ ASSERT_EQ("Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 0,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 1,"
+ "Remove selection(id.user=4660): BucketInfoCommand() => 2",
+ _sender.getCommands(true, true));
for (uint32_t i = 0; i < 8; ++i) {
sendReply(*op, i);
}
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketInfoReply(BucketInfo(invalid)) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("BucketInfoReply(BucketInfo(invalid)) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index 24c40f50894..bae2395bfa7 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
#include <iomanip>
#include <tests/common/dummystoragelink.h>
#include <vespa/storage/distributor/distributor.h>
@@ -8,40 +7,19 @@
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/operations/external/removeoperation.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
-
-class RemoveOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(RemoveOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testNotFound);
- CPPUNIT_TEST(testStorageFailure);
- CPPUNIT_TEST(testNotInDB);
- CPPUNIT_TEST(testMultipleCopies);
- CPPUNIT_TEST(canSendRemoveWhenAllReplicaNodesRetired);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
- void testSimple();
- void testNotFound();
- void testStorageFailure();
- void testNoReply();
- void testNotInDB();
- void testMultipleCopies();
- void testRevert();
- void canSendRemoveWhenAllReplicaNodesRetired();
-
-public:
+namespace storage::distributor {
+
+struct RemoveOperationTest : Test, DistributorTestUtil {
document::DocumentId docId;
document::BucketId bucketId;
std::unique_ptr<RemoveOperation> op;
- void setUp() override {
+ void SetUp() override {
createLinks();
docId = document::DocumentId(document::DocIdString("test", "uri"));
@@ -49,19 +27,19 @@ public:
enableDistributorClusterState("distributor:1 storage:4");
};
- void tearDown() override {
+ void TearDown() override {
close();
}
void sendRemove(document::DocumentId dId) {
- std::shared_ptr<api::RemoveCommand> msg(
- new api::RemoveCommand(makeDocumentBucket(document::BucketId(0)), dId, 100));
+ auto msg = std::make_shared<api::RemoveCommand>(makeDocumentBucket(document::BucketId(0)), dId, 100);
- op.reset(new RemoveOperation(getExternalOperationHandler(),
- getDistributorBucketSpace(),
- msg,
- getDistributor().getMetrics().
- removes[msg->getLoadType()]));
+ op = std::make_unique<RemoveOperation>(
+ getExternalOperationHandler(),
+ getDistributorBucketSpace(),
+ msg,
+ getDistributor().getMetrics().
+ removes[msg->getLoadType()]);
op->start(_sender, framework::MilliSecTime(0));
}
@@ -71,13 +49,13 @@ public:
uint64_t oldTimestamp)
{
if (index == (uint32_t)-1) {
- index = _sender.commands.size() - 1;
+ index = _sender.commands().size() - 1;
}
- std::shared_ptr<api::StorageMessage> msg2 = _sender.commands[index];
- api::RemoveCommand* removec = dynamic_cast<api::RemoveCommand*>(msg2.get());
+ std::shared_ptr<api::StorageMessage> msg2 = _sender.command(index);
+ auto* removec = dynamic_cast<api::RemoveCommand*>(msg2.get());
std::unique_ptr<api::StorageReply> reply(removec->makeReply());
- api::RemoveReply* removeR = static_cast<api::RemoveReply*>(reply.get());
+ auto* removeR = static_cast<api::RemoveReply*>(reply.get());
removeR->setOldTimestamp(oldTimestamp);
callback.onReceive(_sender,
std::shared_ptr<api::StorageReply>(reply.release()));
@@ -88,116 +66,92 @@ public:
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(RemoveOperationTest);
-
-void
-RemoveOperationTest::testSimple()
-{
+TEST_F(RemoveOperationTest, simple) {
addNodesToBucketDB(bucketId, "1=0");
sendRemove();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 1"),
+ ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1",
_sender.getLastCommand());
replyToMessage(*op, -1, 34);
- CPPUNIT_ASSERT_EQUAL(
- std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100, removed doc from 34) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, removed doc from 34) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-RemoveOperationTest::testNotFound()
-{
+TEST_F(RemoveOperationTest, not_found) {
addNodesToBucketDB(bucketId, "1=0");
sendRemove();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 1"),
- _sender.getLastCommand());
+ ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1",
+ _sender.getLastCommand());
replyToMessage(*op, -1, 0);
- CPPUNIT_ASSERT_EQUAL(
- std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100, not found) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, not found) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-RemoveOperationTest::testStorageFailure()
-{
+TEST_F(RemoveOperationTest, storage_failure) {
addNodesToBucketDB(bucketId, "1=0");
sendRemove();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 1"),
- _sender.getLastCommand());
+ ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1",
+ _sender.getLastCommand());
sendReply(*op, -1, api::ReturnCode::INTERNAL_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
- "timestamp 100, not found) ReturnCode(INTERNAL_FAILURE)"),
- _sender.getLastReply());
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), doc:test:uri, "
+ "timestamp 100, not found) ReturnCode(INTERNAL_FAILURE)",
+ _sender.getLastReply());
}
-void
-RemoveOperationTest::testNotInDB()
-{
+TEST_F(RemoveOperationTest, not_in_db) {
sendRemove();
- CPPUNIT_ASSERT_EQUAL(std::string("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, not found) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
+ "doc:test:uri, timestamp 100, not found) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-RemoveOperationTest::testMultipleCopies()
-{
+TEST_F(RemoveOperationTest, multiple_copies) {
addNodesToBucketDB(bucketId, "1=0, 2=0, 3=0");
sendRemove();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 1,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 2,"
- "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 3"),
- _sender.getCommands(true, true));
+ ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 1,"
+ "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 2,"
+ "Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 3",
+ _sender.getCommands(true, true));
replyToMessage(*op, 0, 34);
replyToMessage(*op, 1, 34);
replyToMessage(*op, 2, 75);
- CPPUNIT_ASSERT_EQUAL(
- std::string("RemoveReply(BucketId(0x0000000000000000), "
- "doc:test:uri, timestamp 100, removed doc from 75) ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
+ "doc:test:uri, timestamp 100, removed doc from 75) ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-RemoveOperationTest::canSendRemoveWhenAllReplicaNodesRetired()
-{
+TEST_F(RemoveOperationTest, can_send_remove_when_all_replica_nodes_retired) {
enableDistributorClusterState("distributor:1 storage:1 .0.s:r");
addNodesToBucketDB(bucketId, "0=123");
sendRemove();
- CPPUNIT_ASSERT_EQUAL(
- std::string("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
- "timestamp 100) => 0"),
- _sender.getLastCommand());
+ ASSERT_EQ("Remove(BucketId(0x4000000000002a52), doc:test:uri, "
+ "timestamp 100) => 0",
+ _sender.getLastCommand());
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp b/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp
index f12917b0936..21ef2fa18bd 100644
--- a/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp
+++ b/storage/src/tests/distributor/simplebucketprioritydatabasetest.cpp
@@ -1,69 +1,38 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <string>
-#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <string>
using document::test::makeDocumentBucket;
-namespace storage {
-
-namespace distributor {
+namespace storage::distributor {
using document::BucketId;
-typedef MaintenancePriority Priority;
-
-class SimpleBucketPriorityDatabaseTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(SimpleBucketPriorityDatabaseTest);
- CPPUNIT_TEST(testIteratorRangeIsEqualOnEmptyDatabase);
- CPPUNIT_TEST(testCanGetPrioritizedBucket);
- CPPUNIT_TEST(testIterateOverMultiplePriorities);
- CPPUNIT_TEST(testMultipleSetPriorityForOneBucket);
- CPPUNIT_TEST(testIterateOverMultipleBucketsWithMultiplePriorities);
- CPPUNIT_TEST(testNoMaintenanceNeededClearsBucketFromDatabase);
- CPPUNIT_TEST_SUITE_END();
-
- typedef SimpleBucketPriorityDatabase::const_iterator const_iterator;
-
-public:
- void testIteratorRangeIsEqualOnEmptyDatabase();
- void testCanGetPrioritizedBucket();
- void testIterateOverMultiplePriorities();
- void testMultipleSetPriorityForOneBucket();
- void testIterateOverMultipleBucketsWithMultiplePriorities();
- void testNoMaintenanceNeededClearsBucketFromDatabase();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(SimpleBucketPriorityDatabaseTest);
-
-void
-SimpleBucketPriorityDatabaseTest::testIteratorRangeIsEqualOnEmptyDatabase()
-{
+using Priority = MaintenancePriority;
+
+TEST(SimpleBucketPriorityDatabaseTest, iterator_range_is_equal_on_empty_database) {
SimpleBucketPriorityDatabase queue;
- const_iterator begin(queue.begin());
- const_iterator end(queue.end());
+ auto begin = queue.begin();
+ auto end = queue.end();
- CPPUNIT_ASSERT(begin == end);
- CPPUNIT_ASSERT(begin == begin);
- CPPUNIT_ASSERT(end == end);
+ EXPECT_TRUE(begin == end);
+ EXPECT_TRUE(begin == begin);
+ EXPECT_TRUE(end == end);
}
-void
-SimpleBucketPriorityDatabaseTest::testCanGetPrioritizedBucket()
-{
+TEST(SimpleBucketPriorityDatabaseTest, can_get_prioritized_bucket) {
SimpleBucketPriorityDatabase queue;
PrioritizedBucket lowPriBucket(makeDocumentBucket(BucketId(16, 1234)), Priority::VERY_LOW);
queue.setPriority(lowPriBucket);
PrioritizedBucket highest(*queue.begin());
- CPPUNIT_ASSERT_EQUAL(lowPriBucket, highest);
+ EXPECT_EQ(lowPriBucket, highest);
}
-void
-SimpleBucketPriorityDatabaseTest::testIterateOverMultiplePriorities()
-{
+TEST(SimpleBucketPriorityDatabaseTest, iterate_over_multiple_priorities) {
SimpleBucketPriorityDatabase queue;
PrioritizedBucket lowPriBucket(makeDocumentBucket(BucketId(16, 1234)), Priority::LOW);
@@ -71,18 +40,16 @@ SimpleBucketPriorityDatabaseTest::testIterateOverMultiplePriorities()
queue.setPriority(lowPriBucket);
queue.setPriority(highPriBucket);
- const_iterator iter(queue.begin());
- CPPUNIT_ASSERT_EQUAL(highPriBucket, *iter);
+ auto iter = queue.begin();
+ ASSERT_EQ(highPriBucket, *iter);
++iter;
- CPPUNIT_ASSERT(iter != queue.end());
- CPPUNIT_ASSERT_EQUAL(lowPriBucket, *iter);
+ ASSERT_TRUE(iter != queue.end());
+ ASSERT_EQ(lowPriBucket, *iter);
++iter;
- CPPUNIT_ASSERT(iter == queue.end());
+ ASSERT_TRUE(iter == queue.end());
}
-void
-SimpleBucketPriorityDatabaseTest::testMultipleSetPriorityForOneBucket()
-{
+TEST(SimpleBucketPriorityDatabaseTest, multiple_set_priority_for_one_bucket) {
SimpleBucketPriorityDatabase queue;
PrioritizedBucket lowPriBucket(makeDocumentBucket(BucketId(16, 1234)), Priority::LOW);
@@ -91,15 +58,13 @@ SimpleBucketPriorityDatabaseTest::testMultipleSetPriorityForOneBucket()
queue.setPriority(lowPriBucket);
queue.setPriority(highPriBucket);
- const_iterator iter(queue.begin());
- CPPUNIT_ASSERT_EQUAL(highPriBucket, *iter);
+ auto iter = queue.begin();
+ ASSERT_EQ(highPriBucket, *iter);
++iter;
- CPPUNIT_ASSERT(iter == queue.end());
+ ASSERT_TRUE(iter == queue.end());
}
-void
-SimpleBucketPriorityDatabaseTest::testNoMaintenanceNeededClearsBucketFromDatabase()
-{
+TEST(SimpleBucketPriorityDatabaseTest, no_maintenance_needed_clears_bucket_from_database) {
SimpleBucketPriorityDatabase queue;
PrioritizedBucket highPriBucket(makeDocumentBucket(BucketId(16, 1234)), Priority::HIGH);
@@ -108,13 +73,11 @@ SimpleBucketPriorityDatabaseTest::testNoMaintenanceNeededClearsBucketFromDatabas
queue.setPriority(highPriBucket);
queue.setPriority(noPriBucket);
- const_iterator iter(queue.begin());
- CPPUNIT_ASSERT(iter == queue.end());
+ auto iter = queue.begin();
+ ASSERT_TRUE(iter == queue.end());
}
-void
-SimpleBucketPriorityDatabaseTest::testIterateOverMultipleBucketsWithMultiplePriorities()
-{
+TEST(SimpleBucketPriorityDatabaseTest, iterate_over_multiple_buckets_with_multiple_priorities) {
SimpleBucketPriorityDatabase queue;
PrioritizedBucket lowPriBucket1(makeDocumentBucket(BucketId(16, 1)), Priority::LOW);
@@ -129,17 +92,15 @@ SimpleBucketPriorityDatabaseTest::testIterateOverMultipleBucketsWithMultiplePrio
queue.setPriority(highPriBucket2);
queue.setPriority(lowPriBucket1);
- const_iterator iter(queue.begin());
+ auto iter = queue.begin();
PrioritizedBucket lastBucket(makeDocumentBucket(BucketId()), Priority::PRIORITY_LIMIT);
for (int i = 0; i < 5; ++i) {
- CPPUNIT_ASSERT(iter != queue.end());
- CPPUNIT_ASSERT(!iter->moreImportantThan(lastBucket));
+ ASSERT_TRUE(iter != queue.end());
+ ASSERT_FALSE(iter->moreImportantThan(lastBucket));
lastBucket = *iter;
++iter;
}
- CPPUNIT_ASSERT(iter == queue.end());
+ ASSERT_TRUE(iter == queue.end());
}
}
-}
-
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index ac4a5bbfb91..b21a10c319e 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -6,29 +6,19 @@
#include <vespa/storage/distributor/distributor_bucket_space_repo.h>
#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/storage/distributor/maintenance/simplemaintenancescanner.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage::distributor {
using document::BucketId;
using document::test::makeBucketSpace;
-typedef MaintenancePriority Priority;
-
-class SimpleMaintenanceScannerTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(SimpleMaintenanceScannerTest);
- CPPUNIT_TEST(testPrioritizeSingleBucket);
- CPPUNIT_TEST(testPrioritizeSingleBucketAltBucketSpace);
- CPPUNIT_TEST(testPrioritizeMultipleBuckets);
- CPPUNIT_TEST(testPendingMaintenanceOperationStatistics);
- CPPUNIT_TEST(perNodeMaintenanceStatsAreTracked);
- CPPUNIT_TEST(testReset);
- CPPUNIT_TEST_SUITE_END();
+using Priority = MaintenancePriority;
+using namespace ::testing;
+struct SimpleMaintenanceScannerTest : Test {
using PendingStats = SimpleMaintenanceScanner::PendingMaintenanceStats;
- std::string dumpPriorityDbToString(const BucketPriorityDatabase&) const;
-
std::unique_ptr<MockMaintenancePriorityGenerator> _priorityGenerator;
std::unique_ptr<DistributorBucketSpaceRepo> _bucketSpaceRepo;
std::unique_ptr<SimpleBucketPriorityDatabase> _priorityDb;
@@ -36,38 +26,26 @@ class SimpleMaintenanceScannerTest : public CppUnit::TestFixture {
void addBucketToDb(document::BucketSpace bucketSpace, int bucketNum);
void addBucketToDb(int bucketNum);
-
bool scanEntireDatabase(int expected);
+ std::string stringifyGlobalPendingStats(const PendingStats& stats) const;
- std::string stringifyGlobalPendingStats(const PendingStats&) const;
-
-public:
- void testPrioritizeSingleBucket();
- void testPrioritizeSingleBucketAltBucketSpace();
- void testPrioritizeMultipleBuckets();
- void testPendingMaintenanceOperationStatistics();
- void perNodeMaintenanceStatsAreTracked();
- void testReset();
-
- void setUp() override;
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SimpleMaintenanceScannerTest);
-
void
-SimpleMaintenanceScannerTest::setUp()
+SimpleMaintenanceScannerTest::SetUp()
{
- _priorityGenerator.reset(new MockMaintenancePriorityGenerator());
+ _priorityGenerator = std::make_unique<MockMaintenancePriorityGenerator>();
_bucketSpaceRepo = std::make_unique<DistributorBucketSpaceRepo>();
- _priorityDb.reset(new SimpleBucketPriorityDatabase());
- _scanner.reset(new SimpleMaintenanceScanner(*_priorityDb, *_priorityGenerator, *_bucketSpaceRepo));
+ _priorityDb = std::make_unique<SimpleBucketPriorityDatabase>();
+ _scanner = std::make_unique<SimpleMaintenanceScanner>(*_priorityDb, *_priorityGenerator, *_bucketSpaceRepo);
}
void
SimpleMaintenanceScannerTest::addBucketToDb(document::BucketSpace bucketSpace, int bucketNum)
{
BucketDatabase::Entry entry(BucketId(16, bucketNum), BucketInfo());
- auto &bucketDb(_bucketSpaceRepo->get(bucketSpace).getBucketDatabase());
+ auto& bucketDb(_bucketSpaceRepo->get(bucketSpace).getBucketDatabase());
bucketDb.update(entry);
}
@@ -86,24 +64,20 @@ SimpleMaintenanceScannerTest::stringifyGlobalPendingStats(
return ss.str();
}
-void
-SimpleMaintenanceScannerTest::testPrioritizeSingleBucket()
-{
+TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket) {
addBucketToDb(1);
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri VERY_HIGH)\n");
auto scanResult = _scanner->scanNext();
- CPPUNIT_ASSERT(!scanResult.isDone());
- CPPUNIT_ASSERT_EQUAL(makeBucketSpace().getId(), scanResult.getBucketSpace().getId());
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ ASSERT_FALSE(scanResult.isDone());
+ EXPECT_EQ(makeBucketSpace().getId(), scanResult.getBucketSpace().getId());
+ EXPECT_EQ(expected, _priorityDb->toString());
- CPPUNIT_ASSERT(_scanner->scanNext().isDone());
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ ASSERT_TRUE(_scanner->scanNext().isDone());
+ EXPECT_EQ(expected, _priorityDb->toString());
}
-void
-SimpleMaintenanceScannerTest::testPrioritizeSingleBucketAltBucketSpace()
-{
+TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket_alt_bucket_space) {
document::BucketSpace bucketSpace(4);
_bucketSpaceRepo->add(bucketSpace, std::make_unique<DistributorBucketSpace>());
_scanner->reset();
@@ -111,31 +85,31 @@ SimpleMaintenanceScannerTest::testPrioritizeSingleBucketAltBucketSpace()
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000004), BucketId(0x4000000000000001)), pri VERY_HIGH)\n");
auto scanResult = _scanner->scanNext();
- CPPUNIT_ASSERT(!scanResult.isDone());
- CPPUNIT_ASSERT_EQUAL(bucketSpace.getId(), scanResult.getBucketSpace().getId());
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ ASSERT_FALSE(scanResult.isDone());
+ EXPECT_EQ(bucketSpace.getId(), scanResult.getBucketSpace().getId());
+ EXPECT_EQ(expected, _priorityDb->toString());
- CPPUNIT_ASSERT(_scanner->scanNext().isDone());
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ ASSERT_TRUE(_scanner->scanNext().isDone());
+ EXPECT_EQ(expected, _priorityDb->toString());
}
namespace {
- std::string sortLines(const std::string& source) {
- vespalib::StringTokenizer st(source,"\n","");
- std::vector<std::string> lines;
- std::copy(st.begin(), st.end(), std::back_inserter(lines));
- std::sort(lines.begin(), lines.end());
- std::ostringstream ost;
- for (auto& line : lines) {
- ost << line << "\n";
- }
- return ost.str();
+
+std::string sortLines(const std::string& source) {
+ vespalib::StringTokenizer st(source,"\n","");
+ std::vector<std::string> lines;
+ std::copy(st.begin(), st.end(), std::back_inserter(lines));
+ std::sort(lines.begin(), lines.end());
+ std::ostringstream ost;
+ for (auto& line : lines) {
+ ost << line << "\n";
}
+ return ost.str();
}
-void
-SimpleMaintenanceScannerTest::testPrioritizeMultipleBuckets()
-{
+}
+
+TEST_F(SimpleMaintenanceScannerTest, prioritize_multiple_buckets) {
addBucketToDb(1);
addBucketToDb(2);
addBucketToDb(3);
@@ -143,9 +117,9 @@ SimpleMaintenanceScannerTest::testPrioritizeMultipleBuckets()
"PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)), pri VERY_HIGH)\n"
"PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)), pri VERY_HIGH)\n");
- CPPUNIT_ASSERT(scanEntireDatabase(3));
- CPPUNIT_ASSERT_EQUAL(sortLines(expected),
- sortLines(_priorityDb->toString()));
+ ASSERT_TRUE(scanEntireDatabase(3));
+ EXPECT_EQ(sortLines(expected),
+ sortLines(_priorityDb->toString()));
}
bool
@@ -159,33 +133,29 @@ SimpleMaintenanceScannerTest::scanEntireDatabase(int expected)
return _scanner->scanNext().isDone();
}
-void
-SimpleMaintenanceScannerTest::testReset()
-{
+TEST_F(SimpleMaintenanceScannerTest, reset) {
addBucketToDb(1);
addBucketToDb(3);
- CPPUNIT_ASSERT(scanEntireDatabase(2));
+ ASSERT_TRUE(scanEntireDatabase(2));
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri VERY_HIGH)\n"
"PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)), pri VERY_HIGH)\n");
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ EXPECT_EQ(expected, _priorityDb->toString());
addBucketToDb(2);
- CPPUNIT_ASSERT(scanEntireDatabase(0));
- CPPUNIT_ASSERT_EQUAL(expected, _priorityDb->toString());
+ ASSERT_TRUE(scanEntireDatabase(0));
+ EXPECT_EQ(expected, _priorityDb->toString());
_scanner->reset();
- CPPUNIT_ASSERT(scanEntireDatabase(3));
+ ASSERT_TRUE(scanEntireDatabase(3));
expected = "PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri VERY_HIGH)\n"
"PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000002)), pri VERY_HIGH)\n"
"PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000003)), pri VERY_HIGH)\n";
- CPPUNIT_ASSERT_EQUAL(sortLines(expected), sortLines(_priorityDb->toString()));
+ EXPECT_EQ(sortLines(expected), sortLines(_priorityDb->toString()));
}
-void
-SimpleMaintenanceScannerTest::testPendingMaintenanceOperationStatistics()
-{
+TEST_F(SimpleMaintenanceScannerTest, pending_maintenance_operation_statistics) {
addBucketToDb(1);
addBucketToDb(3);
@@ -194,10 +164,10 @@ SimpleMaintenanceScannerTest::testPendingMaintenanceOperationStatistics()
"set bucket state: 0, garbage collection: 0");
{
auto stats(_scanner->getPendingMaintenanceStats());
- CPPUNIT_ASSERT_EQUAL(expectedEmpty, stringifyGlobalPendingStats(stats));
+ EXPECT_EQ(expectedEmpty, stringifyGlobalPendingStats(stats));
}
- CPPUNIT_ASSERT(scanEntireDatabase(2));
+ ASSERT_TRUE(scanEntireDatabase(2));
// All mock operations generated have the merge type.
{
@@ -205,39 +175,37 @@ SimpleMaintenanceScannerTest::testPendingMaintenanceOperationStatistics()
std::string expected("delete bucket: 0, merge bucket: 2, "
"split bucket: 0, join bucket: 0, "
"set bucket state: 0, garbage collection: 0");
- CPPUNIT_ASSERT_EQUAL(expected, stringifyGlobalPendingStats(stats));
+ EXPECT_EQ(expected, stringifyGlobalPendingStats(stats));
}
_scanner->reset();
{
auto stats(_scanner->getPendingMaintenanceStats());
- CPPUNIT_ASSERT_EQUAL(expectedEmpty, stringifyGlobalPendingStats(stats));
+ EXPECT_EQ(expectedEmpty, stringifyGlobalPendingStats(stats));
}
}
-void
-SimpleMaintenanceScannerTest::perNodeMaintenanceStatsAreTracked()
-{
+TEST_F(SimpleMaintenanceScannerTest, per_node_maintenance_stats_are_tracked) {
addBucketToDb(1);
addBucketToDb(3);
{
auto stats(_scanner->getPendingMaintenanceStats());
NodeMaintenanceStats emptyStats;
- CPPUNIT_ASSERT_EQUAL(emptyStats, stats.perNodeStats.forNode(0, makeBucketSpace()));
+ EXPECT_EQ(emptyStats, stats.perNodeStats.forNode(0, makeBucketSpace()));
}
- CPPUNIT_ASSERT(scanEntireDatabase(2));
+ ASSERT_TRUE(scanEntireDatabase(2));
// Mock is currently hardwired to increment movingOut for node 1 and
// copyingIn for node 2 per bucket iterated (we've got 2).
auto stats(_scanner->getPendingMaintenanceStats());
{
NodeMaintenanceStats wantedNode1Stats;
wantedNode1Stats.movingOut = 2;
- CPPUNIT_ASSERT_EQUAL(wantedNode1Stats, stats.perNodeStats.forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wantedNode1Stats, stats.perNodeStats.forNode(1, makeBucketSpace()));
}
{
NodeMaintenanceStats wantedNode2Stats;
wantedNode2Stats.copyingIn = 2;
- CPPUNIT_ASSERT_EQUAL(wantedNode2Stats, stats.perNodeStats.forNode(2, makeBucketSpace()));
+ EXPECT_EQ(wantedNode2Stats, stats.perNodeStats.forNode(2, makeBucketSpace()));
}
}
diff --git a/storage/src/tests/distributor/splitbuckettest.cpp b/storage/src/tests/distributor/splitbuckettest.cpp
index aead043e120..d88b02b332e 100644
--- a/storage/src/tests/distributor/splitbuckettest.cpp
+++ b/storage/src/tests/distributor/splitbuckettest.cpp
@@ -1,6 +1,4 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
-#include <iomanip>
#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/storage/distributor/operations/idealstate/splitoperation.h>
@@ -10,53 +8,34 @@
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/distributor.h>
+#include <vespa/vespalib/gtest/gtest.h>
-using std::shared_ptr;
-using namespace document;
using document::test::makeDocumentBucket;
+using namespace document;
+using namespace ::testing;
-namespace storage {
-
-namespace distributor {
-
-class SplitOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(SplitOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testMultiNodeFailure);
- CPPUNIT_TEST(testCopyTrustedStatusNotCarriedOverAfterSplit);
- CPPUNIT_TEST(testOperationBlockedByPendingJoin);
- CPPUNIT_TEST_SUITE_END();
+namespace storage::distributor {
+struct SplitOperationTest : Test, DistributorTestUtil {
uint32_t splitByteSize;
uint32_t tooLargeBucketSize;
uint32_t splitCount;
uint32_t maxSplitBits;
-protected:
- void testSimple();
- void testMultiNodeFailure();
- void testCopyTrustedStatusNotCarriedOverAfterSplit();
- void testOperationBlockedByPendingJoin();
-
-public:
SplitOperationTest();
- void setUp() override {
+ void SetUp() override {
createLinks();
getConfig().setSplitCount(splitCount);
getConfig().setSplitSize(splitByteSize);
}
- void tearDown() override {
+ void TearDown() override {
close();
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SplitOperationTest);
-
SplitOperationTest::SplitOperationTest()
: splitByteSize(10*1024*1024),
tooLargeBucketSize(splitByteSize * 1.1),
@@ -65,9 +44,7 @@ SplitOperationTest::SplitOperationTest()
{
}
-void
-SplitOperationTest::testSimple()
-{
+TEST_F(SplitOperationTest, simple) {
enableDistributorClusterState("distributor:1 storage:1");
insertBucketInfo(document::BucketId(16, 1), 0, 0xabc, 1000,
@@ -84,60 +61,51 @@ SplitOperationTest::testSimple()
op.start(_sender, framework::MilliSecTime(0));
{
- CPPUNIT_ASSERT_EQUAL(size_t(1), _sender.commands.size());
+ ASSERT_EQ(1, _sender.commands().size());
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0)
- .toString(),
- msg->getAddress()->toString());
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
+ EXPECT_EQ(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
- api::SplitBucketReply* sreply(
- static_cast<api::SplitBucketReply*>(reply.get()));
+ auto* sreply = static_cast<api::SplitBucketReply*>(reply.get());
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- document::BucketId(17, 1),
- api::BucketInfo(100, 600, 5000000)));
+ sreply->getSplitInfo().emplace_back(document::BucketId(17, 1),
+ api::BucketInfo(100, 600, 5000000));
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- document::BucketId(17, 0x10001),
- api::BucketInfo(110, 400, 6000000)));
+ sreply->getSplitInfo().emplace_back(document::BucketId(17, 0x10001),
+ api::BucketInfo(110, 400, 6000000));
op.receive(_sender, reply);
}
- CPPUNIT_ASSERT(!getBucket(document::BucketId(16, 1)).valid());
+ ASSERT_FALSE(getBucket(document::BucketId(16, 1)).valid());
{
BucketDatabase::Entry entry = getBucket(document::BucketId(17, 1));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL((uint32_t)100, entry->getNodeRef(0).getChecksum());
- CPPUNIT_ASSERT_EQUAL((uint32_t)5000000,
- entry->getNodeRef(0).getTotalDocumentSize());
- CPPUNIT_ASSERT_EQUAL((uint32_t)600,
- entry->getNodeRef(0).getDocumentCount());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(1, entry->getNodeCount());
+ EXPECT_EQ(0, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(100, entry->getNodeRef(0).getChecksum());
+ EXPECT_EQ(5000000, entry->getNodeRef(0).getTotalDocumentSize());
+ EXPECT_EQ(600, entry->getNodeRef(0).getDocumentCount());
}
{
BucketDatabase::Entry entry(getBucket(document::BucketId(17, 0x10001)));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL((uint32_t)110, entry->getNodeRef(0).getChecksum());
- CPPUNIT_ASSERT_EQUAL((uint32_t)6000000,
- entry->getNodeRef(0).getTotalDocumentSize());
- CPPUNIT_ASSERT_EQUAL((uint32_t)400,
- entry->getNodeRef(0).getDocumentCount());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(1, entry->getNodeCount());
+ EXPECT_EQ(0, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(110, entry->getNodeRef(0).getChecksum());
+ EXPECT_EQ(6000000, entry->getNodeRef(0).getTotalDocumentSize());
+ EXPECT_EQ(400, entry->getNodeRef(0).getDocumentCount());
}
}
-void
-SplitOperationTest::testMultiNodeFailure()
-{
+TEST_F(SplitOperationTest, multi_node_failure) {
{
BucketDatabase::Entry entry(document::BucketId(16, 1));
@@ -151,7 +119,6 @@ SplitOperationTest::testMultiNodeFailure()
enableDistributorClusterState("distributor:1 storage:2");
-
SplitOperation op("storage",
BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)),
toVector<uint16_t>(0,1)),
@@ -163,28 +130,22 @@ SplitOperationTest::testMultiNodeFailure()
op.start(_sender, framework::MilliSecTime(0));
{
- CPPUNIT_ASSERT_EQUAL((size_t)2, _sender.commands.size());
+ ASSERT_EQ(2, _sender.commands().size());
{
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[0];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
- CPPUNIT_ASSERT_EQUAL(
- api::StorageMessageAddress("storage",
- lib::NodeType::STORAGE, 0).toString(),
- msg->getAddress()->toString());
-
- api::SplitBucketReply* sreply(
- static_cast<api::SplitBucketReply*>(
- msg->makeReply().release()));
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
+ EXPECT_EQ(api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 0).toString(),
+ msg->getAddress()->toString());
+
+ auto* sreply = static_cast<api::SplitBucketReply*>(msg->makeReply().release());
sreply->setResult(api::ReturnCode::OK);
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- document::BucketId(17, 1),
- api::BucketInfo(100, 600, 5000000)));
+ sreply->getSplitInfo().emplace_back(document::BucketId(17, 1),
+ api::BucketInfo(100, 600, 5000000));
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- document::BucketId(17, 0x10001),
- api::BucketInfo(110, 400, 6000000)));
+ sreply->getSplitInfo().emplace_back(document::BucketId(17, 0x10001),
+ api::BucketInfo(110, 400, 6000000));
op.receive(_sender, std::shared_ptr<api::StorageReply>(sreply));
}
@@ -195,49 +156,41 @@ SplitOperationTest::testMultiNodeFailure()
{
BucketDatabase::Entry entry = getBucket(document::BucketId(16, 1));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(1, entry->getNodeCount());
- CPPUNIT_ASSERT_EQUAL((uint16_t)1, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL((uint32_t)250, entry->getNodeRef(0).getChecksum());
- CPPUNIT_ASSERT_EQUAL(tooLargeBucketSize,
- entry->getNodeRef(0).getTotalDocumentSize());
- CPPUNIT_ASSERT_EQUAL((uint32_t)1000,
- entry->getNodeRef(0).getDocumentCount());
+ EXPECT_EQ(1, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(250, entry->getNodeRef(0).getChecksum());
+ EXPECT_EQ(tooLargeBucketSize, entry->getNodeRef(0).getTotalDocumentSize());
+ EXPECT_EQ(1000, entry->getNodeRef(0).getDocumentCount());
}
{
BucketDatabase::Entry entry = getBucket(document::BucketId(17, 1));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(1, entry->getNodeCount());
- CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL((uint32_t)100, entry->getNodeRef(0).getChecksum());
- CPPUNIT_ASSERT_EQUAL((uint32_t)5000000,
- entry->getNodeRef(0).getTotalDocumentSize());
- CPPUNIT_ASSERT_EQUAL((uint32_t)600,
- entry->getNodeRef(0).getDocumentCount());
+ EXPECT_EQ(0, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(100, entry->getNodeRef(0).getChecksum());
+ EXPECT_EQ(5000000, entry->getNodeRef(0).getTotalDocumentSize());
+ EXPECT_EQ(600, entry->getNodeRef(0).getDocumentCount());
}
{
BucketDatabase::Entry entry(getBucket(document::BucketId(17, 0x10001)));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL((uint32_t)1, entry->getNodeCount());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(1, entry->getNodeCount());
- CPPUNIT_ASSERT_EQUAL((uint16_t)0, entry->getNodeRef(0).getNode());
- CPPUNIT_ASSERT_EQUAL((uint32_t)110, entry->getNodeRef(0).getChecksum());
- CPPUNIT_ASSERT_EQUAL((uint32_t)6000000,
- entry->getNodeRef(0).getTotalDocumentSize());
- CPPUNIT_ASSERT_EQUAL((uint32_t)400,
- entry->getNodeRef(0).getDocumentCount());
+ EXPECT_EQ(0, entry->getNodeRef(0).getNode());
+ EXPECT_EQ(110, entry->getNodeRef(0).getChecksum());
+ EXPECT_EQ(6000000, entry->getNodeRef(0).getTotalDocumentSize());
+ EXPECT_EQ(400, entry->getNodeRef(0).getDocumentCount());
}
}
-void
-SplitOperationTest::testCopyTrustedStatusNotCarriedOverAfterSplit()
-{
+TEST_F(SplitOperationTest, copy_trusted_status_not_carried_over_after_split) {
enableDistributorClusterState("distributor:1 storage:2");
document::BucketId sourceBucket(16, 1);
@@ -260,48 +213,43 @@ SplitOperationTest::testCopyTrustedStatusNotCarriedOverAfterSplit()
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(size_t(3), _sender.commands.size());
+ ASSERT_EQ(3, _sender.commands().size());
std::vector<document::BucketId> childBuckets;
- childBuckets.push_back(document::BucketId(17, 1));
- childBuckets.push_back(document::BucketId(17, 0x10001));
+ childBuckets.emplace_back(17, 1);
+ childBuckets.emplace_back(17, 0x10001);
// Note: only 2 out of 3 requests replied to!
for (int i = 0; i < 2; ++i) {
- std::shared_ptr<api::StorageCommand> msg = _sender.commands[i];
- CPPUNIT_ASSERT(msg->getType() == api::MessageType::SPLITBUCKET);
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(i);
+ ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
- api::SplitBucketReply* sreply(
- static_cast<api::SplitBucketReply*>(reply.get()));
+ auto* sreply = static_cast<api::SplitBucketReply*>(reply.get());
// Make sure copies differ so they cannot become implicitly trusted.
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- childBuckets[0],
- api::BucketInfo(100 + i, 600, 5000000)));
- sreply->getSplitInfo().push_back(api::SplitBucketReply::Entry(
- childBuckets[1],
- api::BucketInfo(110 + i, 400, 6000000)));
+ sreply->getSplitInfo().emplace_back(childBuckets[0],
+ api::BucketInfo(100 + i, 600, 5000000));
+ sreply->getSplitInfo().emplace_back(childBuckets[1],
+ api::BucketInfo(110 + i, 400, 6000000));
op.receive(_sender, reply);
}
- CPPUNIT_ASSERT(getBucket(sourceBucket).valid()); // Still alive
+ ASSERT_TRUE(getBucket(sourceBucket).valid()); // Still alive
for (uint32_t i = 0; i < 2; ++i) {
BucketDatabase::Entry entry(getBucket(childBuckets[i]));
- CPPUNIT_ASSERT(entry.valid());
- CPPUNIT_ASSERT_EQUAL(size_t(2), entry->getNodes().size());
+ ASSERT_TRUE(entry.valid());
+ ASSERT_EQ(2, entry->getNodes().size());
for (uint16_t j = 0; j < 2; ++j) {
- CPPUNIT_ASSERT(!entry->getNodeRef(i).trusted());
+ EXPECT_FALSE(entry->getNodeRef(i).trusted());
}
}
}
-void
-SplitOperationTest::testOperationBlockedByPendingJoin()
-{
+TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
StorageComponentRegisterImpl compReg;
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
@@ -321,7 +269,7 @@ SplitOperationTest::testOperationBlockedByPendingJoin()
tracker.insert(joinCmd);
- insertBucketInfo(joinTarget, 0, 0xabc, 1000, 1234, 250);
+ insertBucketInfo(joinTarget, 0, 0xabc, 1000, 1234, true);
SplitOperation op("storage",
BucketAndNodes(makeDocumentBucket(joinTarget), toVector<uint16_t>(0)),
@@ -329,19 +277,18 @@ SplitOperationTest::testOperationBlockedByPendingJoin()
splitCount,
splitByteSize);
- CPPUNIT_ASSERT(op.isBlocked(tracker));
+ EXPECT_TRUE(op.isBlocked(tracker));
// Now, pretend there's a join for another node in the same bucket. This
// will happen when a join is partially completed.
tracker.clearMessagesForNode(0);
- CPPUNIT_ASSERT(!op.isBlocked(tracker));
+ EXPECT_FALSE(op.isBlocked(tracker));
joinCmd->setAddress(
api::StorageMessageAddress("storage", lib::NodeType::STORAGE, 1));
tracker.insert(joinCmd);
- CPPUNIT_ASSERT(op.isBlocked(tracker));
+ EXPECT_TRUE(op.isBlocked(tracker));
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 53de8ded38c..7282d2e7d2a 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -11,23 +11,23 @@
#include <vespa/storage/distributor/statecheckers.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/stat.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
-using namespace std::literals::string_literals;
using document::test::makeBucketSpace;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage::distributor {
-struct StateCheckersTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- StateCheckersTest() {}
+struct StateCheckersTest : Test, DistributorTestUtil {
+ StateCheckersTest() = default;
- void setUp() override {
+ void SetUp() override {
createLinks();
}
- void tearDown() override {
+ void TearDown() override {
close();
}
@@ -44,50 +44,6 @@ struct StateCheckersTest : public CppUnit::TestFixture,
bool shouldCheck() const { return _msgType != UINT32_MAX; }
};
- void testSplit();
- void testInconsistentSplit();
- void splitCanBeScheduledWhenReplicasOnRetiredNodes();
- void testSynchronizeAndMove();
- void testDoNotMergeInconsistentlySplitBuckets();
- void doNotMoveReplicasWithinRetiredNodes();
- void testDeleteExtraCopies();
- void testDoNotDeleteActiveExtraCopies();
- void testConsistentCopiesOnRetiredNodesMayBeDeleted();
- void redundantCopyDeletedEvenWhenAllNodesRetired();
- void testJoin();
- void testDoNotJoinBelowClusterStateBitCount();
- void testAllowInconsistentJoinInDifferingSiblingIdealState();
- void testDoNotAllowInconsistentJoinWhenNotInIdealState();
- void testDoNotAllowInconsistentJoinWhenConfigDisabled();
- void testNoJoinWhenInvalidCopyExists();
- void testNoJoinOnDifferentNodes();
- void testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling();
- void testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling();
- void testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings();
- void joinCanBeScheduledWhenReplicasOnRetiredNodes();
- void testBucketState();
- void testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance();
- void testDoNotChangeActiveStateForInconsistentlySplitBuckets();
- void testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical();
- void testBucketStatePerGroup();
- void allowActivationOfRetiredNodes();
- void inhibitBucketActivationIfDisabledInConfig();
- void inhibitBucketDeactivationIfDisabledInConfig();
- void retiredNodesOutOfSyncAreMerged();
- void testGarbageCollection();
- void gc_ops_are_prioritized_with_low_priority_category();
- void gcInhibitedWhenIdealNodeInMaintenance();
- void testNoRemoveWhenIdealNodeInMaintenance();
- void testStepwiseJoinForSmallBucketsWithoutSiblings();
- void testNoStepwiseJoinWhenDisabledThroughConfig();
- void testNoStepwiseJoinWhenSingleSiblingTooLarge();
- void testStepwiseJoinMaySkipMultipleBitsWhenConsistent();
- void testStepwiseJoinDoesNotSkipBeyondLevelWithSibling();
- void contextPopulatesIdealStateContainers();
- void statsUpdatedWhenMergingDueToMove();
- void statsUpdatedWhenMergingDueToMissingCopy();
- void statsUpdatedWhenMergingDueToOutOfSyncCopies();
-
void enableClusterState(const lib::ClusterState& systemState) {
_distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
}
@@ -95,16 +51,16 @@ struct StateCheckersTest : public CppUnit::TestFixture,
void insertJoinableBuckets();
void assertCurrentIdealState(const document::BucketId& bucket,
- const std::vector<uint16_t> expected)
+ const std::vector<uint16_t>& expected)
{
- auto &distributorBucketSpace(getIdealStateManager().getBucketSpaceRepo().get(makeBucketSpace()));
+ auto& distributorBucketSpace(getIdealStateManager().getBucketSpaceRepo().get(makeBucketSpace()));
std::vector<uint16_t> idealNodes(
distributorBucketSpace
.getDistribution().getIdealStorageNodes(
distributorBucketSpace.getClusterState(),
bucket,
"ui"));
- CPPUNIT_ASSERT_EQUAL(expected, idealNodes);
+ ASSERT_EQ(expected, idealNodes);
}
void enableInconsistentJoinInConfig(bool enabled);
@@ -259,7 +215,7 @@ struct StateCheckersTest : public CppUnit::TestFixture,
checker, c, false, *params._blockerMessage,
params._includeMessagePriority,
params._includeSchedulingPriority);
- CPPUNIT_ASSERT_EQUAL(params._expect, result);
+ ASSERT_EQ(params._expect, result);
}
std::string testSynchronizeAndMove(
@@ -281,59 +237,10 @@ struct StateCheckersTest : public CppUnit::TestFixture,
bool includePriority = false);
std::string testBucketStatePerGroup(const std::string& bucketInfo,
bool includePriority = false);
-
- CPPUNIT_TEST_SUITE(StateCheckersTest);
- CPPUNIT_TEST(testSplit);
- CPPUNIT_TEST(testInconsistentSplit);
- CPPUNIT_TEST(splitCanBeScheduledWhenReplicasOnRetiredNodes);
- CPPUNIT_TEST(testSynchronizeAndMove);
- CPPUNIT_TEST(testDoNotMergeInconsistentlySplitBuckets);
- CPPUNIT_TEST(doNotMoveReplicasWithinRetiredNodes);
- CPPUNIT_TEST(retiredNodesOutOfSyncAreMerged);
- CPPUNIT_TEST(testDoNotChangeActiveStateForInconsistentlySplitBuckets);
- CPPUNIT_TEST(testDeleteExtraCopies);
- CPPUNIT_TEST(testDoNotDeleteActiveExtraCopies);
- CPPUNIT_TEST(testConsistentCopiesOnRetiredNodesMayBeDeleted);
- CPPUNIT_TEST(redundantCopyDeletedEvenWhenAllNodesRetired);
- CPPUNIT_TEST(testJoin);
- CPPUNIT_TEST(testDoNotJoinBelowClusterStateBitCount);
- CPPUNIT_TEST(testAllowInconsistentJoinInDifferingSiblingIdealState);
- CPPUNIT_TEST(testDoNotAllowInconsistentJoinWhenNotInIdealState);
- CPPUNIT_TEST(testDoNotAllowInconsistentJoinWhenConfigDisabled);
- CPPUNIT_TEST(testNoJoinWhenInvalidCopyExists);
- CPPUNIT_TEST(testNoJoinOnDifferentNodes);
- CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling);
- CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling);
- CPPUNIT_TEST(testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings);
- CPPUNIT_TEST(joinCanBeScheduledWhenReplicasOnRetiredNodes);
- CPPUNIT_TEST(testBucketState);
- CPPUNIT_TEST(testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance);
- CPPUNIT_TEST(testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical);
- CPPUNIT_TEST(testBucketStatePerGroup);
- CPPUNIT_TEST(allowActivationOfRetiredNodes);
- CPPUNIT_TEST(inhibitBucketActivationIfDisabledInConfig);
- CPPUNIT_TEST(inhibitBucketDeactivationIfDisabledInConfig);
- CPPUNIT_TEST(testGarbageCollection);
- CPPUNIT_TEST(gc_ops_are_prioritized_with_low_priority_category);
- CPPUNIT_TEST(gcInhibitedWhenIdealNodeInMaintenance);
- CPPUNIT_TEST(testNoRemoveWhenIdealNodeInMaintenance);
- CPPUNIT_TEST(testStepwiseJoinForSmallBucketsWithoutSiblings);
- CPPUNIT_TEST(testNoStepwiseJoinWhenDisabledThroughConfig);
- CPPUNIT_TEST(testNoStepwiseJoinWhenSingleSiblingTooLarge);
- CPPUNIT_TEST(testStepwiseJoinMaySkipMultipleBitsWhenConsistent);
- CPPUNIT_TEST(testStepwiseJoinDoesNotSkipBeyondLevelWithSibling);
- CPPUNIT_TEST(contextPopulatesIdealStateContainers);
- CPPUNIT_TEST(statsUpdatedWhenMergingDueToMove);
- CPPUNIT_TEST(statsUpdatedWhenMergingDueToMissingCopy);
- CPPUNIT_TEST(statsUpdatedWhenMergingDueToOutOfSyncCopies);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(StateCheckersTest);
-
-
-StateCheckersTest::CheckerParams::CheckerParams() {}
-StateCheckersTest::CheckerParams::~CheckerParams() {}
+StateCheckersTest::CheckerParams::CheckerParams() = default;
+StateCheckersTest::CheckerParams::~CheckerParams() = default;
const StateCheckersTest::PendingMessage
@@ -359,95 +266,76 @@ std::string StateCheckersTest::testSplit(uint32_t splitCount,
return testStateChecker(checker, c, false, blocker, includePriority);
}
+TEST_F(StateCheckersTest, split) {
+ setupDistributor(3, 10, "distributor:1 storage:2");
+ EXPECT_EQ("[Splitting bucket because its maximum size (2000 b, 10 docs, 10 meta, 2000 b total) "
+ "is higher than the configured limit of (1000, 4294967295)]",
+ testSplit((uint32_t)-1, 1000, 16, "0=100/10/2000"));
-void
-StateCheckersTest::testSplit()
-{
- setupDistributor(3, 10, "distributor:1 storage:2");
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, "
+ "200 docs, 200 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)] "
+ "(pri 175)",
+ testSplit(100, 10000, 16, "0=100/200/1000", PendingMessage(), true));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSplit(1000, 1000, 16, "0=100/200/200"));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSplit(1000, 1000, 16, "0=100/200/200/2000/2000"));
+
+ EXPECT_EQ("[Splitting bucket because the current system size requires "
+ "a higher minimum split bit]",
+ testSplit((uint32_t)-1, (uint32_t)-1, 24, "0=100/200/1000"));
+
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]",
+ testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (2000 b, 10 docs, 10 meta, 2000 b total) "
- "is higher than the configured limit of (1000, 4294967295)]"),
- testSplit((uint32_t)-1, 1000, 16, "0=100/10/2000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, "
- "200 docs, 200 meta, 1000 b total) "
- "is higher than the configured limit of (10000, 100)] "
- "(pri 175)"),
- testSplit(100, 10000, 16, "0=100/200/1000", PendingMessage(), true));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSplit(1000, 1000, 16, "0=100/200/200"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSplit(1000, 1000, 16, "0=100/200/200/2000/2000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because the current system size requires "
- "a higher minimum split bit]"),
- testSplit((uint32_t)-1, (uint32_t)-1, 24, "0=100/200/1000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
- "is higher than the configured limit of (10000, 100)]"),
- testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
- "is higher than the configured limit of (10000, 100)]"),
- testSplit(100, 10000, 16, "0=1/0/0,1=100/1000/1000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
- "is higher than the configured limit of (10000, 100)]"),
- testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSplit(1000, 1000, 16, "0=100/1/200000"));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("BLOCKED"),
- testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
- PendingMessage(api::MessageType::SPLITBUCKET_ID, 0)));
-
- // Split on too high meta
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 100 docs, 2100 meta, 15000000 b total) "
- "is higher than the configured limit of (10000000, 1000)]"),
- testSplit(1000, 10000000, 16, "0=14/100/1000/2100/15000000"));
- // Split on too high file size
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 100 docs, 1500 meta, 21000000 b total) "
- "is higher than the configured limit of (10000000, 1000)]"),
- testSplit(1000, 10000000, 16, "0=14/100/1000/1500/21000000"));
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]",
+ testSplit(100, 10000, 16, "0=1/0/0,1=100/1000/1000"));
+
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]",
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000"));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSplit(1000, 1000, 16, "0=100/1/200000"));
+
+ EXPECT_EQ("BLOCKED",
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 0)));
+
+ // Split on too high meta
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 100 docs, 2100 meta, 15000000 b total) "
+ "is higher than the configured limit of (10000000, 1000)]",
+ testSplit(1000, 10000000, 16, "0=14/100/1000/2100/15000000"));
+ // Split on too high file size
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 100 docs, 1500 meta, 21000000 b total) "
+ "is higher than the configured limit of (10000000, 1000)]",
+ testSplit(1000, 10000000, 16, "0=14/100/1000/1500/21000000"));
// Don't block higher priority splits than what's already pending.
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
- "is higher than the configured limit of (10000, 100)]"),
- testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000",
- PendingMessage(api::MessageType::SPLITBUCKET_ID, 255)));
+ EXPECT_EQ("[Splitting bucket because its maximum size (1000 b, 1000 docs, 1000 meta, 1000 b total) "
+ "is higher than the configured limit of (10000, 100)]",
+ testSplit(100, 10000, 16, "0=100/10/10,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 255)));
// But must block equal priority splits that are already pending, or
// we'll end up spamming the nodes with splits!
// NOTE: assuming split priority of 175.
- CPPUNIT_ASSERT_EQUAL(
- std::string("BLOCKED"),
- testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
- PendingMessage(api::MessageType::SPLITBUCKET_ID, 175)));
+ EXPECT_EQ("BLOCKED",
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::SPLITBUCKET_ID, 175)));
// Don't split if we're already joining, since there's a window of time
// where the bucket will appear to be inconsistently split when the join
// is not finished on all the nodes.
- CPPUNIT_ASSERT_EQUAL(
- std::string("BLOCKED"),
- testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
- PendingMessage(api::MessageType::JOINBUCKETS_ID, 175)));
+ EXPECT_EQ("BLOCKED",
+ testSplit(100, 10000, 16, "0=0/0/1,1=100/1000/1000",
+ PendingMessage(api::MessageType::JOINBUCKETS_ID, 175)));
}
std::string
@@ -461,53 +349,43 @@ StateCheckersTest::testInconsistentSplit(const document::BucketId& bid,
PendingMessage(), includePriority);
}
-void
-StateCheckersTest::testInconsistentSplit()
-{
+TEST_F(StateCheckersTest, inconsistent_split) {
setupDistributor(3, 10, "distributor:1 storage:2");
insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testInconsistentSplit(document::BucketId(16, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testInconsistentSplit(document::BucketId(16, 1)));
insertBucketInfo(document::BucketId(17, 1), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001): [Bucket is inconsistently "
- "split (list includes 0x4000000000000001, 0x4400000000000001) "
- "Splitting it to improve the problem (max used bits 17)]"),
- testInconsistentSplit(document::BucketId(16, 1)));
+ EXPECT_EQ("BucketId(0x4000000000000001): [Bucket is inconsistently "
+ "split (list includes 0x4000000000000001, 0x4400000000000001) "
+ "Splitting it to improve the problem (max used bits 17)]",
+ testInconsistentSplit(document::BucketId(16, 1)));
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testInconsistentSplit(document::BucketId(17, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testInconsistentSplit(document::BucketId(17, 1)));
insertBucketInfo(document::BucketId(17, 1), 0, 0x0, 0, 0);
insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000001): [Bucket is inconsistently "
- "split (list includes 0x4000000000000001, 0x4400000000000001) "
- "Splitting it to improve the problem (max used bits "
- "17)] (pri 110)"),
- testInconsistentSplit(document::BucketId(16, 1), true));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testInconsistentSplit(document::BucketId(17, 1)));
+ EXPECT_EQ("BucketId(0x4000000000000001): [Bucket is inconsistently "
+ "split (list includes 0x4000000000000001, 0x4400000000000001) "
+ "Splitting it to improve the problem (max used bits "
+ "17)] (pri 110)",
+ testInconsistentSplit(document::BucketId(16, 1), true));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testInconsistentSplit(document::BucketId(17, 1)));
}
-void
-StateCheckersTest::splitCanBeScheduledWhenReplicasOnRetiredNodes()
-{
+TEST_F(StateCheckersTest, split_can_be_scheduled_when_replicas_on_retired_nodes) {
setupDistributor(Redundancy(2), NodeCount(2),
"distributor:1 storage:2, .0.s:r .1.s:r");
- CPPUNIT_ASSERT_EQUAL(
- "[Splitting bucket because its maximum size (2000 b, 10 docs, "
- "10 meta, 2000 b total) is higher than the configured limit of "
- "(1000, 4294967295)]"s,
- testSplit(UINT32_MAX, 1000, 16, "0=100/10/2000"));
+ EXPECT_EQ("[Splitting bucket because its maximum size (2000 b, 10 docs, "
+ "10 meta, 2000 b total) is higher than the configured limit of "
+ "(1000, 4294967295)]",
+ testSplit(UINT32_MAX, 1000, 16, "0=100/10/2000"));
}
std::string
@@ -535,55 +413,50 @@ StateCheckersTest::insertJoinableBuckets()
insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
}
-void
-StateCheckersTest::testJoin()
-{
+TEST_F(StateCheckersTest, join) {
setupDistributor(3, 10, "distributor:1 storage:2");
insertJoinableBuckets();
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x8000000000000001): "
- "[Joining buckets BucketId(0x8400000000000001) and "
- "BucketId(0x8400000100000001) because their size "
- "(2 bytes, 2 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 16, document::BucketId(33, 1)));
+ EXPECT_EQ("BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
insertJoinableBuckets();
// Join size is 0, so only look at document count
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x8000000000000001): "
- "[Joining buckets BucketId(0x8400000000000001) and "
- "BucketId(0x8400000100000001) because their size "
- "(2 bytes, 2 docs) is less than the configured limit "
- "of (0, 3) (pri 155)"),
- testJoin(3, 0, 16, document::BucketId(33, 1), PendingMessage(), true));
+ EXPECT_EQ("BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (0, 3) (pri 155)",
+ testJoin(3, 0, 16, document::BucketId(33, 1), PendingMessage(), true));
insertJoinableBuckets();
// Should not generate joins for both pairs, just the primary
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 0x100000001)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 0x100000001)));
insertJoinableBuckets();
// Should not generate join if min split bits is higher
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 33, document::BucketId(33, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 33, document::BucketId(33, 1)));
insertJoinableBuckets();
// Meta data too big, no join
insertBucketInfo(document::BucketId(33, 1), 1,
api::BucketInfo(0x1, 1, 1, 1000, 1000));
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
insertJoinableBuckets();
// Bucket recently created
insertBucketInfo(document::BucketId(33, 1), 1,
api::BucketInfo(0x1, 0, 0, 0, 0));
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
}
@@ -594,19 +467,16 @@ StateCheckersTest::testJoin()
* the safest is to never violate this and to effectively make distribution
* bit increases a one-way street.
*/
-void
-StateCheckersTest::testDoNotJoinBelowClusterStateBitCount()
-{
+TEST_F(StateCheckersTest, do_not_join_below_cluster_state_bit_count) {
setupDistributor(2, 2, "bits:16 distributor:1 storage:2");
// Insert sibling buckets at 16 bits that are small enough to be joined
// unless there is special logic for dealing with distribution bits.
insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(16, (1 << 15) | 1), 1, 0x1, 1, 1);
using ConfiguredMinSplitBits = uint32_t;
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(100, 100, ConfiguredMinSplitBits(8),
- document::BucketId(16, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(100, 100, ConfiguredMinSplitBits(8),
+ document::BucketId(16, 1)));
}
void
@@ -617,9 +487,7 @@ StateCheckersTest::enableInconsistentJoinInConfig(bool enabled)
getConfig().configure(config);
}
-void
-StateCheckersTest::testAllowInconsistentJoinInDifferingSiblingIdealState()
-{
+TEST_F(StateCheckersTest, allow_inconsistent_join_in_differing_sibling_ideal_state) {
// Normally, bucket siblings have an ideal state on the same node in order
// to enable joining these back together. However, the ideal disks assigned
// may differ and it's sufficient for a sibling bucket's ideal disk to be
@@ -645,18 +513,15 @@ StateCheckersTest::testAllowInconsistentJoinInDifferingSiblingIdealState()
enableInconsistentJoinInConfig(true);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x8000000000000001): "
- "[Joining buckets BucketId(0x8400000000000001) and "
- "BucketId(0x8400000100000001) because their size "
- "(6 bytes, 4 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 16, sibling1));
+ EXPECT_EQ("BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(6 bytes, 4 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 16, sibling1));
}
-void
-StateCheckersTest::testDoNotAllowInconsistentJoinWhenNotInIdealState()
-{
+TEST_F(StateCheckersTest, do_not_allow_inconsistent_join_when_not_in_ideal_state) {
setupDistributor(2, 4, "distributor:1 storage:4 .0.d:20 .0.d.14.s:d .2.d:20 .3.d:20");
document::BucketId sibling1(33, 0x000000001);
document::BucketId sibling2(33, 0x100000001);
@@ -671,13 +536,11 @@ StateCheckersTest::testDoNotAllowInconsistentJoinWhenNotInIdealState()
enableInconsistentJoinInConfig(true);
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, sibling1));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, sibling1));
}
-void
-StateCheckersTest::testDoNotAllowInconsistentJoinWhenConfigDisabled()
-{
+TEST_F(StateCheckersTest, do_not_allow_inconsistent_join_when_config_disabled) {
setupDistributor(2, 3, "distributor:1 storage:3 .0.d:20 .0.d.14.s:d .2.d:20");
document::BucketId sibling1(33, 0x000000001); // ideal disk 14 on node 0
document::BucketId sibling2(33, 0x100000001); // ideal disk 1 on node 0
@@ -694,74 +557,60 @@ StateCheckersTest::testDoNotAllowInconsistentJoinWhenConfigDisabled()
enableInconsistentJoinInConfig(false);
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, sibling1));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, sibling1));
}
-void
-StateCheckersTest::testNoJoinWhenInvalidCopyExists()
-{
+TEST_F(StateCheckersTest, no_join_when_invalid_copy_exists) {
setupDistributor(3, 10, "distributor:1 storage:3");
insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
// No join when there exists an invalid copy
insertBucketInfo(document::BucketId(33, 1), 1, api::BucketInfo());
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
}
-void
-StateCheckersTest::testNoJoinOnDifferentNodes()
-{
+TEST_F(StateCheckersTest, no_join_on_different_nodes) {
setupDistributor(3, 10, "distributor:1 storage:2");
insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
}
-void
-StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForLeftSibling()
-{
+TEST_F(StateCheckersTest, no_join_when_copy_count_above_redundancy_levels_for_left_sibling) {
setupDistributor(3, 10, "distributor:1 storage:2");
setRedundancy(1);
insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
}
-void
-StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForRightSibling()
-{
+TEST_F(StateCheckersTest, no_join_when_copy_count_above_redundancy_levels_for_right_sibling) {
setupDistributor(3, 10, "distributor:1 storage:2");
setRedundancy(1);
insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
}
-void
-StateCheckersTest::testNoJoinWhenCopyCountAboveRedundancyLevelsForBothSiblings()
-{
+TEST_F(StateCheckersTest, no_join_when_copy_count_above_redundancy_levels_for_both_siblings) {
setupDistributor(3, 10, "distributor:1 storage:2");
setRedundancy(1);
insertBucketInfo(document::BucketId(33, 0x000000001), 0, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x000000001), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 0, 0x1, 1, 1);
insertBucketInfo(document::BucketId(33, 0x100000001), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 16, document::BucketId(33, 0x1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 16, document::BucketId(33, 0x1)));
}
std::string
@@ -784,9 +633,7 @@ StateCheckersTest::testSynchronizeAndMove(const std::string& bucketInfo,
return testStateChecker(checker, c, false, blocker, includePriority);
}
-void
-StateCheckersTest::testSynchronizeAndMove()
-{
+TEST_F(StateCheckersTest, synchronize_and_move) {
// Plus if it was more obvious which nodes were in ideal state for various
// cluster states. (One possibility to override ideal state function for
// test)
@@ -903,30 +750,24 @@ StateCheckersTest::testSynchronizeAndMove()
.clusterState("distributor:1 storage:4"));
}
-void
-StateCheckersTest::testDoNotMergeInconsistentlySplitBuckets()
-{
+TEST_F(StateCheckersTest, do_not_merge_inconsistently_split_buckets) {
// No merge generated if buckets are inconsistently split.
// This matches the case where a bucket has been split into 2 on one
// node and is not yet split on another; we should never try to merge
// either two of the split leaf buckets back onto the first node!
// Running state checker on a leaf:
addNodesToBucketDB(document::BucketId(16, 0), "0=2");
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSynchronizeAndMove("1=1", // 17 bits
- "distributor:1 storage:4"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSynchronizeAndMove("1=1", // 17 bits
+ "distributor:1 storage:4"));
// Running state checker on an inner node bucket:
addNodesToBucketDB(document::BucketId(18, 0), "0=2");
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSynchronizeAndMove("0=1", // 17 bits
- "distributor:1 storage:4"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSynchronizeAndMove("0=1", // 17 bits
+ "distributor:1 storage:4"));
}
-void
-StateCheckersTest::doNotMoveReplicasWithinRetiredNodes()
-{
+TEST_F(StateCheckersTest, do_not_move_replicas_within_retired_nodes) {
// Nodes 1 and 3 would be in ideal state if the nodes were not retired.
// Here, all nodes are retired and we should thus not do any sort of
// moving.
@@ -938,9 +779,7 @@ StateCheckersTest::doNotMoveReplicasWithinRetiredNodes()
".0.s:r .1.s:r .2.s:r .3.s:r"));
}
-void
-StateCheckersTest::retiredNodesOutOfSyncAreMerged()
-{
+TEST_F(StateCheckersTest, retired_nodes_out_of_sync_are_merged) {
// Normally, we'd do a merge that'd move the bucket to new nodes, leaving
// the out of sync retired nodes as source-only replicas. But here we
// don't have that choice and thus try to do the most useful thing we can
@@ -980,139 +819,106 @@ StateCheckersTest::testDeleteExtraCopies(
}
-void
-StateCheckersTest::testDeleteExtraCopies()
-{
+TEST_F(StateCheckersTest, delete_extra_copies) {
setupDistributor(2, 100, "distributor:1 storage:4");
{
- auto &distributorBucketSpace(getIdealStateManager().getBucketSpaceRepo().get(makeBucketSpace()));
+ auto& distributorBucketSpace(getIdealStateManager().getBucketSpaceRepo().get(makeBucketSpace()));
std::vector<uint16_t> idealNodes(
distributorBucketSpace
.getDistribution().getIdealStorageNodes(
distributorBucketSpace.getClusterState(),
document::BucketId(17, 0),
"ui"));
- std::vector<uint16_t> wanted;
- wanted.push_back(1);
- wanted.push_back(3);
- CPPUNIT_ASSERT_EQUAL(wanted, idealNodes);
+ std::vector<uint16_t> wanted = {1, 3};
+ ASSERT_EQ(wanted, idealNodes);
}
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove empty buckets",
- std::string("[Removing all copies since bucket is empty:node(idx=0,crc=0x0,"
- "docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]"
- " (pri 100)"),
- testDeleteExtraCopies("0=0", 2, PendingMessage(), "", true));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove extra trusted copy",
- std::string("[Removing redundant in-sync copy from node 2]"),
- testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Redundant copies in sync can be removed without trusted being a "
- "factor of consideration. Ideal state copy not removed.",
- std::string("[Removing redundant in-sync copy from node 2]"),
- testDeleteExtraCopies("3=3/3/3,1=3/3/3/t,2=3/3/3/t"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Need redundancy number of copies",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=3,1=3"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove extra copies without enough trusted copies",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=0/0/1,1=3,2=3"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove buckets that have meta entries",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=0/0/1,1=0/0/1"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove any recently created copies",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=1/0/0/t,1=1/0/0/t,2=1/0/0/t"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove untrusted copy that is out of sync",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove out of sync copies, even if we have more than #"
- "redundancy trusted copies",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t,3=1/2/3/t"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Don't remove unless we have enough trusted "
- "copies to satisfy redundancy",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Only remove empty copies unless all other copies are in sync",
- std::string("[Removing empty copy from node 4]"),
- testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3,4=0/0/0"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove redundant empty copy",
- std::string("[Removing empty copy from node 0]"),
- testDeleteExtraCopies("1=2/3,3=1/2/3,0=0/0/0"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove empty bucket with multiple copies",
- std::string(
- "[Removing all copies since bucket is empty:"
- "node(idx=0,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]"),
- testDeleteExtraCopies("0=0/0/0,1=0/0/0,2=0/0/0"));
-
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Pending persistence operation blocks delete",
- std::string("BLOCKED"),
- testDeleteExtraCopies("0=0/0/0,1=1/2/3/t,2=1/2/3/t",
- 2,
- PendingMessage(api::MessageType::PUT_ID, 255)));
+ EXPECT_EQ("[Removing all copies since bucket is empty:node(idx=0,crc=0x0,"
+ "docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]"
+ " (pri 100)",
+ testDeleteExtraCopies("0=0", 2, PendingMessage(), "", true)) << "Remove empty buckets";
+
+ EXPECT_EQ("[Removing redundant in-sync copy from node 2]",
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t")) << "Remove extra trusted copy";
+
+ EXPECT_EQ("[Removing redundant in-sync copy from node 2]",
+ testDeleteExtraCopies("3=3/3/3,1=3/3/3/t,2=3/3/3/t"))
+ << "Redundant copies in sync can be removed without trusted being a "
+ "factor of consideration. Ideal state copy not removed.";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=3,1=3")) << "Need redundancy number of copies";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=0/0/1,1=3,2=3"))
+ << "Do not remove extra copies without enough trusted copies";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=0/0/1,1=0/0/1"))
+ << "Do not remove buckets that have meta entries";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=1/0/0/t,1=1/0/0/t,2=1/0/0/t"))
+ << "Do not remove any recently created copies";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t"))
+ << "Do not remove untrusted copy that is out of sync";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3/t,2=1/2/3/t,3=1/2/3/t"))
+ << "Do not remove out of sync copies, even if we have more than #"
+ "redundancy trusted copies";
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3"))
+ << "Don't remove unless we have enough trusted "
+ "copies to satisfy redundancy";
+
+ EXPECT_EQ("[Removing empty copy from node 4]",
+ testDeleteExtraCopies("0=2/3/4,1=1/2/3,2=2/3/4,3=1/2/3,4=0/0/0"))
+ << "Only remove empty copies unless all other copies are in sync";
+
+ EXPECT_EQ("[Removing empty copy from node 0]",
+ testDeleteExtraCopies("1=2/3,3=1/2/3,0=0/0/0")) << "Remove redundant empty copy";
+
+ EXPECT_EQ("[Removing all copies since bucket is empty:"
+ "node(idx=0,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false), "
+ "node(idx=2,crc=0x0,docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]",
+ testDeleteExtraCopies("0=0/0/0,1=0/0/0,2=0/0/0")) << "Remove empty bucket with multiple copies";
+
+ EXPECT_EQ("BLOCKED",
+ testDeleteExtraCopies("0=0/0/0,1=1/2/3/t,2=1/2/3/t",
+ 2,
+ PendingMessage(api::MessageType::PUT_ID, 255)))
+ << "Pending persistence operation blocks delete";
}
-void
-StateCheckersTest::testDoNotDeleteActiveExtraCopies()
-{
+TEST_F(StateCheckersTest, do_not_delete_active_extra_copies) {
setupDistributor(2, 100, "distributor:1 storage:4");
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not delete redundant copy if it is marked active",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t/a"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t/a"))
+ << "Do not delete redundant copy if it is marked active";
}
-void
-StateCheckersTest::testConsistentCopiesOnRetiredNodesMayBeDeleted()
-{
+TEST_F(StateCheckersTest, consistent_copies_on_retired_nodes_may_be_deleted) {
setupDistributor(2, 100, "distributor:1 storage:4 .1.s:r");
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove in-sync copy on node that is retired",
- std::string("[Removing redundant in-sync copy from node 1]"),
- testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
+ EXPECT_EQ("[Removing redundant in-sync copy from node 1]",
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"))
+ << "Remove in-sync copy on node that is retired";
}
-void
-StateCheckersTest::redundantCopyDeletedEvenWhenAllNodesRetired()
-{
+TEST_F(StateCheckersTest, redundant_copy_deleted_even_when_all_nodes_retired) {
setupDistributor(2, 100, "distributor:1 storage:4 "
".0.s:r .1.s:r .2.s:r .3.s:r");
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Remove in-sync copy on node that is retired",
- "[Removing redundant in-sync copy from node 2]"s,
- testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"));
+ EXPECT_EQ("[Removing redundant in-sync copy from node 2]",
+ testDeleteExtraCopies("3=3/3/3/t,1=3/3/3/t,2=3/3/3/t"))
+ << "Remove in-sync copy on node that is retired";
}
std::string StateCheckersTest::testBucketState(
@@ -1130,9 +936,7 @@ std::string StateCheckersTest::testBucketState(
includePriority);
}
-void
-StateCheckersTest::testBucketState()
-{
+TEST_F(StateCheckersTest, bucket_state) {
setupDistributor(2, 100, "distributor:1 storage:4");
{
@@ -1144,105 +948,87 @@ StateCheckersTest::testBucketState()
getConfig().setMaintenancePriorities(mp);
}
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState(""));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState(""));
// Node 1 is in ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active:"
- " copy is ideal state priority 0] (pri 90)"),
- testBucketState("1=2/3/4", 2, true));
+ EXPECT_EQ("[Setting node 1 as active:"
+ " copy is ideal state priority 0] (pri 90)",
+ testBucketState("1=2/3/4", 2, true));
// Node 3 is in ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 3 as active:"
- " copy is ideal state priority 1]"),
- testBucketState("3=2/3/4"));
+ EXPECT_EQ("[Setting node 3 as active:"
+ " copy is ideal state priority 1]",
+ testBucketState("3=2/3/4"));
// No trusted nodes, but node 1 is first in ideal state.
// Also check bad case where more than 1 node is set as active just
// to ensure we can get out of that situation if it should ever happen.
// Nothing done with node 3 since is't not active and shouldn't be.
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active:"
- " copy is ideal state priority 0]"
- "[Setting node 0 as inactive]"
- "[Setting node 2 as inactive] (pri 120)"),
- testBucketState("0=3/4/5/u/a,1=3,2=4/5/6/u/a,3=3", 2, true));
+ EXPECT_EQ("[Setting node 1 as active:"
+ " copy is ideal state priority 0]"
+ "[Setting node 0 as inactive]"
+ "[Setting node 2 as inactive] (pri 120)",
+ testBucketState("0=3/4/5/u/a,1=3,2=4/5/6/u/a,3=3", 2, true));
// Test setting active when only node available is not contained
// within the resolved ideal state.
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 0 as active: first available copy]"),
- testBucketState("0=2/3/4"));
+ EXPECT_EQ("[Setting node 0 as active: first available copy]",
+ testBucketState("0=2/3/4"));
// A trusted ideal state copy should be set active rather than a non-trusted
// ideal state copy
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 3 as active:"
- " copy is trusted and ideal state priority 1]"
- "[Setting node 1 as inactive]"),
- testBucketState("1=2/3/4/u/a,3=5/6/7/t"));
+ EXPECT_EQ("[Setting node 3 as active:"
+ " copy is trusted and ideal state priority 1]"
+ "[Setting node 1 as inactive]",
+ testBucketState("1=2/3/4/u/a,3=5/6/7/t"));
// None of the ideal state copies are trusted but a non-ideal copy is.
// The trusted copy should be active.
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as active: copy is trusted]"),
- testBucketState("1=2/3/4,3=5/6/7/,2=8/9/10/t"));
+ EXPECT_EQ("[Setting node 2 as active: copy is trusted]",
+ testBucketState("1=2/3/4,3=5/6/7/,2=8/9/10/t"));
// Make sure bucket db ordering does not matter
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as active: copy is trusted]"),
- testBucketState("2=8/9/10/t,1=2/3/4,3=5/6/7"));
+ EXPECT_EQ("[Setting node 2 as active: copy is trusted]",
+ testBucketState("2=8/9/10/t,1=2/3/4,3=5/6/7"));
// If copy is already active, we shouldn't generate operations
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=2/3/4/t/a"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=2/3/4,3=5/6/7/t/a"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("2=8/9/10/t/a,1=2/3/4,3=5/6/7"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=2/3/4/t/a"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=2/3/4,3=5/6/7/t/a"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("2=8/9/10/t/a,1=2/3/4,3=5/6/7"));
// If multiple buckets are active, deactive all but one
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as inactive]"
- "[Setting node 3 as inactive]"),
- testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
+ EXPECT_EQ("[Setting node 2 as inactive]"
+ "[Setting node 3 as inactive]",
+ testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
// Invalid buckets should not be included
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=0/0/1,3=0/0/1"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=0/0/1,3=0/0/1"));
// Ready preferred over trusted & ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("2=8/9/10/t/i/u,1=2/3/4/u/a/r,3=5/6/7"));
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as active: copy is ready]"
- "[Setting node 1 as inactive]"),
- testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/u"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("2=8/9/10/t/i/u,1=2/3/4/u/a/r,3=5/6/7"));
+ EXPECT_EQ("[Setting node 2 as active: copy is ready]"
+ "[Setting node 1 as inactive]",
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/u"));
// Prefer in ideal state if multiple copies ready
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 3 as active: copy is ready]"
- "[Setting node 1 as inactive]"),
- testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/r"));
+ EXPECT_EQ("[Setting node 3 as active: copy is ready]"
+ "[Setting node 1 as inactive]",
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/a/u,3=5/6/7/u/i/r"));
// Prefer ideal state if all ready but no trusted
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active: copy is ready]"),
- testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/i/r,3=5/6/7/u/i/r"));
+ EXPECT_EQ("[Setting node 1 as active: copy is ready]",
+ testBucketState("2=8/9/10/u/i/r,1=2/3/4/u/i/r,3=5/6/7/u/i/r"));
// Prefer trusted over ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as active: copy is ready and trusted]"
- "[Setting node 1 as inactive]"),
- testBucketState("2=8/9/10/t/i/r,1=2/3/4/u/a/r,3=5/6/7"));
+ EXPECT_EQ("[Setting node 2 as active: copy is ready and trusted]"
+ "[Setting node 1 as inactive]",
+ testBucketState("2=8/9/10/t/i/r,1=2/3/4/u/a/r,3=5/6/7"));
}
/**
@@ -1251,38 +1037,30 @@ StateCheckersTest::testBucketState()
* into maintenance violates that assumption. See bug 6833209 for context and
* details.
*/
-void
-StateCheckersTest::testDoNotActivateNonReadyCopiesWhenIdealNodeInMaintenance()
-{
+TEST_F(StateCheckersTest, do_not_activate_non_ready_copies_when_ideal_node_in_maintenance) {
setupDistributor(2, 100, "distributor:1 storage:4 .1.s:m");
// Ideal node 1 is in maintenance and no ready copy available.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("2=8/9/10/t/i/u,3=5/6/7"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("2=8/9/10/t/i/u,3=5/6/7"));
// But we should activate another copy iff there's another ready copy.
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 2 as active: copy is ready]"),
- testBucketState("2=8/9/10/u/i/r,3=5/6/7/u/i/u"));
+ EXPECT_EQ("[Setting node 2 as active: copy is ready]",
+ testBucketState("2=8/9/10/u/i/r,3=5/6/7/u/i/u"));
}
/**
* We really do not want to activate buckets when they are inconsistent.
* See bug 6395693 for a set of reasons why.
*/
-void
-StateCheckersTest::testDoNotChangeActiveStateForInconsistentlySplitBuckets()
-{
+TEST_F(StateCheckersTest, do_not_change_active_state_for_inconsistently_split_buckets) {
setupDistributor(2, 100, "distributor:1 storage:4");
// Running state checker on a leaf:
addNodesToBucketDB(document::BucketId(16, 0), "0=2");
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=1")); // 17 bits
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=1")); // 17 bits
// Running state checker on an inner node bucket:
addNodesToBucketDB(document::BucketId(18, 0), "0=2");
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testSynchronizeAndMove("0=1")); // 17 bits
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testSynchronizeAndMove("0=1")); // 17 bits
}
/**
@@ -1299,21 +1077,17 @@ StateCheckersTest::testDoNotChangeActiveStateForInconsistentlySplitBuckets()
*
* See bug 7278932.
*/
-void
-StateCheckersTest::testNoActiveChangeForNonIdealCopiesWhenOtherwiseIdentical()
-{
+TEST_F(StateCheckersTest, no_active_change_for_non_ideal_copies_when_otherwise_identical) {
setupDistributor(2, 100, "distributor:1 storage:50");
// 1 is more ideal than 3 in this state, but since they're both not part
// of the #redundancy ideal set, activation should not change hands.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=2/3/4/t/i/r,3=2/3/4/t/a/r"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=2/3/4/t/i/r,3=2/3/4/t/a/r"));
// Same applies if the copies aren't ready, since if a copy has been marked
// as active it will already have started background indexing. No need in
// undoing that if we don't have any better candidates going anyway.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=2/3/4/t,3=2/3/4/t/a"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=2/3/4/t,3=2/3/4/t/a"));
}
std::string StateCheckersTest::testBucketStatePerGroup(
@@ -1329,9 +1103,7 @@ std::string StateCheckersTest::testBucketStatePerGroup(
includePriority);
}
-void
-StateCheckersTest::testBucketStatePerGroup()
-{
+TEST_F(StateCheckersTest, bucket_state_per_group) {
setupDistributor(6, 20, "distributor:1 storage:12 .2.s:d .4.s:d .7.s:d");
vespa::config::content::StorDistributionConfigBuilder config;
config.activePerLeafGroup = true;
@@ -1358,7 +1130,7 @@ StateCheckersTest::testBucketStatePerGroup()
config.group[3].nodes[0].index = 9;
config.group[3].nodes[1].index = 10;
config.group[3].nodes[2].index = 11;
- lib::Distribution::SP distr(new lib::Distribution(config));
+ auto distr = std::make_shared<lib::Distribution>(config);
triggerDistributionChange(std::move(distr));
{
@@ -1369,85 +1141,72 @@ StateCheckersTest::testBucketStatePerGroup()
}
// Node 1 and 8 is is ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active: "
- "copy is trusted and ideal state priority 4]"
- "[Setting node 6 as active: "
- "copy is trusted and ideal state priority 0] (pri 90)"),
- testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
- "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t", true));
+ EXPECT_EQ("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is trusted and ideal state priority 0] (pri 90)",
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t", true));
// Data differ between groups
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active: "
- "copy is trusted and ideal state priority 4]"
- "[Setting node 6 as active: "
- "copy is ideal state priority 0] (pri 90)"),
- testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
- "5=5/6/7, 6=5/6/7, 8=5/6/7", true));
+ EXPECT_EQ("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is ideal state priority 0] (pri 90)",
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=5/6/7, 6=5/6/7, 8=5/6/7", true));
// Disable too
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 0 as inactive]"
- "[Setting node 3 as inactive]"
- "[Setting node 5 as inactive]"
- "[Setting node 8 as inactive] (pri 90)"),
- testBucketStatePerGroup("0=2/3/4/t/a, 1=2/3/4/t/a, 3=2/3/4/t/a, "
- "5=2/3/4/t/a, 6=2/3/4/t/a, 8=2/3/4/t/a",
- true));
+ EXPECT_EQ("[Setting node 0 as inactive]"
+ "[Setting node 3 as inactive]"
+ "[Setting node 5 as inactive]"
+ "[Setting node 8 as inactive] (pri 90)",
+ testBucketStatePerGroup("0=2/3/4/t/a, 1=2/3/4/t/a, 3=2/3/4/t/a, "
+ "5=2/3/4/t/a, 6=2/3/4/t/a, 8=2/3/4/t/a",
+ true));
// Node 1 and 8 is is ideal state
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Setting node 1 as active: "
- "copy is trusted and ideal state priority 4]"
- "[Setting node 6 as active: "
- "copy is trusted and ideal state priority 0]"
- "[Setting node 9 as active: "
- "copy is trusted and ideal state priority 2] (pri 90)"),
- testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
- "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t, "
- "9=2/3/4/t, 10=2/3/4/t, 11=2/3/4/t",
- true));
+ EXPECT_EQ("[Setting node 1 as active: "
+ "copy is trusted and ideal state priority 4]"
+ "[Setting node 6 as active: "
+ "copy is trusted and ideal state priority 0]"
+ "[Setting node 9 as active: "
+ "copy is trusted and ideal state priority 2] (pri 90)",
+ testBucketStatePerGroup("0=2/3/4/t, 1=2/3/4/t, 3=2/3/4/t, "
+ "5=2/3/4/t, 6=2/3/4/t, 8=2/3/4/t, "
+ "9=2/3/4/t, 10=2/3/4/t, 11=2/3/4/t",
+ true));
}
-void
-StateCheckersTest::allowActivationOfRetiredNodes()
-{
+TEST_F(StateCheckersTest, allow_activation_of_retired_nodes) {
// All nodes in retired state implies that the ideal state is empty. But
// we still want to be able to shuffle bucket activations around in order
// to preserve coverage.
setupDistributor(2, 2, "distributor:1 storage:2 .0.s:r .1.s:r");
- CPPUNIT_ASSERT_EQUAL(
- "[Setting node 1 as active: copy is trusted]"
- "[Setting node 0 as inactive]"s,
- testBucketState("0=2/3/4/u/a,1=5/6/7/t"));
+ EXPECT_EQ("[Setting node 1 as active: copy is trusted]"
+ "[Setting node 0 as inactive]",
+ testBucketState("0=2/3/4/u/a,1=5/6/7/t"));
}
-void
-StateCheckersTest::inhibitBucketActivationIfDisabledInConfig()
-{
+TEST_F(StateCheckersTest, inhibit_bucket_activation_if_disabled_in_config) {
setupDistributor(2, 4, "distributor:1 storage:4");
disableBucketActivationInConfig(true);
// Node 1 is in ideal state and only replica and should be activated in
// an indexed cluster context (but not here).
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=2/3/4", 2, true));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=2/3/4", 2, true));
}
-void
-StateCheckersTest::inhibitBucketDeactivationIfDisabledInConfig()
-{
+TEST_F(StateCheckersTest, inhibit_bucket_deactivation_if_disabled_in_config) {
setupDistributor(2, 4, "distributor:1 storage:4");
disableBucketActivationInConfig(true);
// Multiple replicas which would have been deactivated. This test is mostly
// for the sake of completion; a scenario where buckets are active while
// having no indexed documents configured should not happen.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testBucketState("1=1/2/3/t/a,2=1/2/3/t/a,3=1/2/3/t/a"));
}
std::string StateCheckersTest::testGarbageCollection(
@@ -1473,60 +1232,48 @@ std::string StateCheckersTest::testGarbageCollection(
includePriority, includeSchedulingPri);
}
-void
-StateCheckersTest::testGarbageCollection()
-{
+TEST_F(StateCheckersTest, garbage_collection) {
// BucketId(17, 0) has id (and thus 'hash') 0x4400000000000000. With a
// check interval modulo of 3600, this implies a start point of 848.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testGarbageCollection(900, 3600 + 847, 3600));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testGarbageCollection(900, 3600 + 847, 3600));
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Needs garbage collection: Last check at 900, current time 4448, "
- "configured interval 3600]"),
- testGarbageCollection(900, 3600 + 848, 3600));
+ EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, "
+ "configured interval 3600]",
+ testGarbageCollection(900, 3600 + 848, 3600));
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]"),
- testGarbageCollection(3, 4000, 3600));
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 3600]",
+ testGarbageCollection(3, 4000, 3600));
// GC start point 3648.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testGarbageCollection(3, 3647, 8000));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testGarbageCollection(3, 3647, 8000));
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]"),
- testGarbageCollection(3, 4000, 3600));
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 3600]",
+ testGarbageCollection(3, 4000, 3600));
// GC explicitly disabled.
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testGarbageCollection(3, 4000, 0));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testGarbageCollection(3, 3, 1));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 300] (pri 200)"),
- testGarbageCollection(3, 4000, 300, 1, true));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("NO OPERATIONS GENERATED"),
- testGarbageCollection(3850, 4000, 300, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testGarbageCollection(3, 4000, 0));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testGarbageCollection(3, 3, 1));
+
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 300] (pri 200)",
+ testGarbageCollection(3, 4000, 300, 1, true));
+
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testGarbageCollection(3850, 4000, 300, 1));
}
-void StateCheckersTest::gc_ops_are_prioritized_with_low_priority_category() {
- CPPUNIT_ASSERT_EQUAL(
- std::string("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 300] (scheduling pri VERY_LOW)"),
- testGarbageCollection(3, 4000, 300, 1, false, true));
+TEST_F(StateCheckersTest, gc_ops_are_prioritized_with_low_priority_category) {
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
+ "configured interval 300] (scheduling pri VERY_LOW)",
+ testGarbageCollection(3, 4000, 300, 1, false, true));
}
/**
@@ -1535,9 +1282,7 @@ void StateCheckersTest::gc_ops_are_prioritized_with_low_priority_category() {
* the replicas when the node out of maintenance. Consequently we should not
* trigger GC for buckets when this is the case.
*/
-void
-StateCheckersTest::gcInhibitedWhenIdealNodeInMaintenance()
-{
+TEST_F(StateCheckersTest, gc_inhibited_when_ideal_node_in_maintenance) {
// Redundancy is 3, so with only 3 nodes, node 1 is guaranteed to be part of
// the ideal state of any bucket in the system.
setupDistributor(3, 3, "distributor:1 storage:3 .1.s:m");
@@ -1560,7 +1305,7 @@ StateCheckersTest::gcInhibitedWhenIdealNodeInMaintenance()
// overshot the GC check cycle.
auto result = testStateChecker(checker, c, false, PendingMessage(), false);
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"), result);
+ EXPECT_EQ("NO OPERATIONS GENERATED", result);
}
/*
@@ -1569,17 +1314,14 @@ StateCheckersTest::gcInhibitedWhenIdealNodeInMaintenance()
* (it's bad mojo to potentially delete something that would've been merged
* had it not been for a node being in maintenance).
*/
-void
-StateCheckersTest::testNoRemoveWhenIdealNodeInMaintenance()
-{
- CPPUNIT_ASSERT_EQUAL_MSG(
- "Do not remove when ideal node is in maintenance mode",
- std::string("NO OPERATIONS GENERATED"),
- testDeleteExtraCopies("0=10/100/1/true,"
- "1=10/100/1/true,"
- "2=10/100/1/true",
- 2, PendingMessage(),
- "distributor:1 storage:3 .1.s:m"));
+TEST_F(StateCheckersTest, no_remove_when_ideal_node_in_maintenance) {
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testDeleteExtraCopies("0=10/100/1/true,"
+ "1=10/100/1/true,"
+ "2=10/100/1/true",
+ 2, PendingMessage(),
+ "distributor:1 storage:3 .1.s:m"))
+ << "Do not remove when ideal node is in maintenance mode";
}
/*
@@ -1591,9 +1333,7 @@ StateCheckersTest::testNoRemoveWhenIdealNodeInMaintenance()
*
* See bug 6768991 for context.
*/
-void
-StateCheckersTest::testStepwiseJoinForSmallBucketsWithoutSiblings()
-{
+TEST_F(StateCheckersTest, stepwise_join_for_small_buckets_without_siblings) {
setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
vespa::config::content::core::StorDistributormanagerConfigBuilder config;
config.enableJoinForSiblingLessBuckets = true;
@@ -1602,31 +1342,27 @@ StateCheckersTest::testStepwiseJoinForSmallBucketsWithoutSiblings()
// into bucket (2, 1).
insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x0800000000000001): "
- "[Joining buckets BucketId(0x0c00000000000001) and "
- "BucketId(0x0c00000000000001) because their size "
- "(1 bytes, 1 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 2, document::BucketId(3, 1)));
+ EXPECT_EQ("BucketId(0x0800000000000001): "
+ "[Joining buckets BucketId(0x0c00000000000001) and "
+ "BucketId(0x0c00000000000001) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 2, document::BucketId(3, 1)));
// Other bucket should be joined as well. Together the two join targets
// will transform into a mighty sibling pair that can rule the galaxy
// (and also be joined together afterwards)!
insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x0800000000000003): "
- "[Joining buckets BucketId(0x0c00000000000003) and "
- "BucketId(0x0c00000000000003) because their size "
- "(1 bytes, 1 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 2, document::BucketId(3, 0x3)));
+ EXPECT_EQ("BucketId(0x0800000000000003): "
+ "[Joining buckets BucketId(0x0c00000000000003) and "
+ "BucketId(0x0c00000000000003) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 2, document::BucketId(3, 0x3)));
}
-void
-StateCheckersTest::testNoStepwiseJoinWhenDisabledThroughConfig()
-{
+TEST_F(StateCheckersTest, no_stepwise_join_when_disabled_through_config) {
setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
vespa::config::content::core::StorDistributormanagerConfigBuilder config;
config.enableJoinForSiblingLessBuckets = false;
@@ -1636,13 +1372,11 @@ StateCheckersTest::testNoStepwiseJoinWhenDisabledThroughConfig()
// into bucket 1 if it had been config-enabled.
insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 1, 1);
insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 1, document::BucketId(3, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 1, document::BucketId(3, 1)));
}
-void
-StateCheckersTest::testNoStepwiseJoinWhenSingleSiblingTooLarge()
-{
+TEST_F(StateCheckersTest, no_stepwise_join_when_single_sibling_too_large) {
setupDistributor(3, 10, "distributor:1 storage:2 bits:1");
vespa::config::content::core::StorDistributormanagerConfigBuilder config;
config.enableJoinForSiblingLessBuckets = true;
@@ -1651,13 +1385,11 @@ StateCheckersTest::testNoStepwiseJoinWhenSingleSiblingTooLarge()
// Bucket is exactly at the boundary where it's too big.
insertBucketInfo(document::BucketId(3, 1), 1, 0x1, 10, 100);
insertBucketInfo(document::BucketId(3, 0x3), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(std::string("NO OPERATIONS GENERATED"),
- testJoin(10, 100, 1, document::BucketId(3, 1)));
+ EXPECT_EQ("NO OPERATIONS GENERATED",
+ testJoin(10, 100, 1, document::BucketId(3, 1)));
}
-void
-StateCheckersTest::testStepwiseJoinMaySkipMultipleBitsWhenConsistent()
-{
+TEST_F(StateCheckersTest, stepwise_join_may_skip_multiple_bits_when_consistent) {
setupDistributor(2, 10, "distributor:1 storage:2 bits:8");
vespa::config::content::core::StorDistributormanagerConfigBuilder config;
config.enableJoinForSiblingLessBuckets = true;
@@ -1666,18 +1398,15 @@ StateCheckersTest::testStepwiseJoinMaySkipMultipleBitsWhenConsistent()
insertBucketInfo(document::BucketId(16, 1), 1, 0x1, 1, 1);
// No buckets further up in the tree, can join up to the distribution bit
// limit at 8.
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x2000000000000001): "
- "[Joining buckets BucketId(0x4000000000000001) and "
- "BucketId(0x4000000000000001) because their size "
- "(1 bytes, 1 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 8, document::BucketId(16, 1)));
+ EXPECT_EQ("BucketId(0x2000000000000001): "
+ "[Joining buckets BucketId(0x4000000000000001) and "
+ "BucketId(0x4000000000000001) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 8, document::BucketId(16, 1)));
}
-void
-StateCheckersTest::testStepwiseJoinDoesNotSkipBeyondLevelWithSibling()
-{
+TEST_F(StateCheckersTest, stepwise_join_does_not_skip_beyond_level_with_sibling) {
setupDistributor(2, 10, "distributor:1 storage:2 bits:8");
vespa::config::content::core::StorDistributormanagerConfigBuilder config;
config.enableJoinForSiblingLessBuckets = true;
@@ -1689,44 +1418,37 @@ StateCheckersTest::testStepwiseJoinDoesNotSkipBeyondLevelWithSibling()
// the (16, 0) bucket cannot be moved further up than level 11 as it has a
// sibling there (0x2c00000000000400 sibling of 0x2c00000000000000).
insertBucketInfo(document::BucketId(11, 1 << 10), 1, 0x1, 1, 1);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x2c00000000000000): "
- "[Joining buckets BucketId(0x4000000000000000) and "
- "BucketId(0x4000000000000000) because their size "
- "(1 bytes, 1 docs) is less than the configured limit "
- "of (100, 10)"),
- testJoin(10, 100, 8, document::BucketId(16, 0)));
+ EXPECT_EQ("BucketId(0x2c00000000000000): "
+ "[Joining buckets BucketId(0x4000000000000000) and "
+ "BucketId(0x4000000000000000) because their size "
+ "(1 bytes, 1 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 8, document::BucketId(16, 0)));
}
-void
-StateCheckersTest::joinCanBeScheduledWhenReplicasOnRetiredNodes()
-{
+TEST_F(StateCheckersTest, join_can_be_scheduled_when_replicas_on_retired_nodes) {
setupDistributor(1, 1, "distributor:1 storage:1 .0.s.:r");
insertJoinableBuckets();
- CPPUNIT_ASSERT_EQUAL(
- "BucketId(0x8000000000000001): "
- "[Joining buckets BucketId(0x8400000000000001) and "
- "BucketId(0x8400000100000001) because their size "
- "(2 bytes, 2 docs) is less than the configured limit "
- "of (100, 10)"s,
- testJoin(10, 100, 16, document::BucketId(33, 1)));
+ EXPECT_EQ("BucketId(0x8000000000000001): "
+ "[Joining buckets BucketId(0x8400000000000001) and "
+ "BucketId(0x8400000100000001) because their size "
+ "(2 bytes, 2 docs) is less than the configured limit "
+ "of (100, 10)",
+ testJoin(10, 100, 16, document::BucketId(33, 1)));
}
-void
-StateCheckersTest::contextPopulatesIdealStateContainers()
-{
+TEST_F(StateCheckersTest, context_populates_ideal_state_containers) {
// 1 and 3 are ideal nodes for bucket {17, 0}
setupDistributor(2, 100, "distributor:1 storage:4");
NodeMaintenanceStatsTracker statsTracker;
StateChecker::Context c(getExternalOperationHandler(), getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
- CPPUNIT_ASSERT_EQUAL((std::vector<uint16_t>{1, 3}), c.idealState);
- CPPUNIT_ASSERT_EQUAL(size_t(2), c.unorderedIdealState.size());
- CPPUNIT_ASSERT(c.unorderedIdealState.find(1)
- != c.unorderedIdealState.end());
- CPPUNIT_ASSERT(c.unorderedIdealState.find(3)
- != c.unorderedIdealState.end());
+ ASSERT_THAT(c.idealState, ElementsAre(1, 3));
+ // TODO replace with UnorderedElementsAre once we can build gmock without issues
+ std::vector<uint16_t> ideal_state(c.unorderedIdealState.begin(), c.unorderedIdealState.end());
+ std::sort(ideal_state.begin(), ideal_state.end());
+ ASSERT_THAT(ideal_state, ElementsAre(1, 3));
}
namespace {
@@ -1738,10 +1460,9 @@ class StateCheckerRunner
NodeMaintenanceStatsTracker _statsTracker;
std::string _result;
public:
- StateCheckerRunner(StateCheckersTest& fixture);
+ explicit StateCheckerRunner(StateCheckersTest& fixture);
~StateCheckerRunner();
-
StateCheckerRunner& addToDb(const document::BucketId& bid,
const std::string& bucketInfo)
{
@@ -1780,13 +1501,11 @@ StateCheckerRunner<Checker>::StateCheckerRunner(StateCheckersTest& fixture)
: _fixture(fixture)
{}
template <typename Checker>
-StateCheckerRunner<Checker>::~StateCheckerRunner() {}
+StateCheckerRunner<Checker>::~StateCheckerRunner() = default;
} // anon ns
-void
-StateCheckersTest::statsUpdatedWhenMergingDueToMove()
-{
+TEST_F(StateCheckersTest, stats_updated_when_merging_due_to_move) {
StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
// Ideal state for bucket {17,0} in given cluster state is [1, 3]
runner.addToDb({17, 0}, "0=1,1=1,2=1")
@@ -1796,7 +1515,7 @@ StateCheckersTest::statsUpdatedWhenMergingDueToMove()
{
NodeMaintenanceStats wanted;
wanted.copyingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(1, makeBucketSpace()));
}
// Moving 1 bucket from nodes {0, 2} into 3.
// Note that we do not at this point in time distinguish _which_ of these
@@ -1804,19 +1523,17 @@ StateCheckersTest::statsUpdatedWhenMergingDueToMove()
{
NodeMaintenanceStats wanted;
wanted.copyingIn = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(3, makeBucketSpace()));
}
{
NodeMaintenanceStats wanted;
wanted.movingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(0, makeBucketSpace()));
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(2, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(0, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(2, makeBucketSpace()));
}
}
-void
-StateCheckersTest::statsUpdatedWhenMergingDueToMissingCopy()
-{
+TEST_F(StateCheckersTest, stats_updated_when_merging_due_to_missing_copy) {
StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
// Ideal state for bucket {17,0} in given cluster state is [1, 3]
runner.addToDb({17, 0}, "1=1")
@@ -1826,18 +1543,16 @@ StateCheckersTest::statsUpdatedWhenMergingDueToMissingCopy()
{
NodeMaintenanceStats wanted;
wanted.copyingIn = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(3, makeBucketSpace()));
}
{
NodeMaintenanceStats wanted;
wanted.copyingOut = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(1, makeBucketSpace()));
}
}
-void
-StateCheckersTest::statsUpdatedWhenMergingDueToOutOfSyncCopies()
-{
+TEST_F(StateCheckersTest, stats_updated_when_merging_due_to_out_of_sync_copies) {
StateCheckerRunner<SynchronizeAndMoveStateChecker> runner(*this);
runner.addToDb({17, 0}, "1=1,3=2")
.clusterState("distributor:1 storage:4")
@@ -1845,8 +1560,8 @@ StateCheckersTest::statsUpdatedWhenMergingDueToOutOfSyncCopies()
{
NodeMaintenanceStats wanted;
wanted.syncing = 1;
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(1, makeBucketSpace()));
- CPPUNIT_ASSERT_EQUAL(wanted, runner.stats().forNode(3, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(1, makeBucketSpace()));
+ EXPECT_EQ(wanted, runner.stats().forNode(3, makeBucketSpace()));
}
}
diff --git a/storage/src/tests/distributor/statoperationtest.cpp b/storage/src/tests/distributor/statoperationtest.cpp
index 19337a0d52a..53ea0ec5efa 100644
--- a/storage/src/tests/distributor/statoperationtest.cpp
+++ b/storage/src/tests/distributor/statoperationtest.cpp
@@ -2,82 +2,63 @@
#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/stat.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/operations/external/statbucketoperation.h>
#include <vespa/storage/distributor/operations/external/statbucketlistoperation.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
-struct StatOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- void setUp() override {
+struct StatOperationTest : Test, DistributorTestUtil {
+ void SetUp() override {
createLinks();
};
- void tearDown() override {
+ void TearDown() override {
close();
}
-
- void testBucketInfo();
- void testBucketList();
-
- CPPUNIT_TEST_SUITE(StatOperationTest);
- CPPUNIT_TEST(testBucketInfo);
- CPPUNIT_TEST(testBucketList);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(StatOperationTest);
-
-void
-StatOperationTest::testBucketInfo()
-{
+TEST_F(StatOperationTest, bucket_info) {
enableDistributorClusterState("distributor:1 storage:2");
- addNodesToBucketDB(document::BucketId(16, 5),
- "0=4/2/100,1=4/2/100");
+ addNodesToBucketDB(document::BucketId(16, 5), "0=4/2/100,1=4/2/100");
StatBucketOperation op(
getExternalOperationHandler(),
getDistributorBucketSpace(),
- std::shared_ptr<api::StatBucketCommand>(
- new api::StatBucketCommand(makeDocumentBucket(document::BucketId(16, 5)), "")));
+ std::make_shared<api::StatBucketCommand>(
+ makeDocumentBucket(document::BucketId(16, 5)), ""));
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Statbucket => 0,Statbucket => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Statbucket => 0,Statbucket => 1", _sender.getCommands(true));
{
- api::StatBucketCommand* tmp(
- static_cast<api::StatBucketCommand*>(_sender.commands[0].get()));
- api::StatBucketReply* reply = new api::StatBucketReply(*tmp, "foo");
- op.receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ auto* tmp = static_cast<api::StatBucketCommand*>(_sender.command(0).get());
+ auto reply = std::make_shared<api::StatBucketReply>(*tmp, "foo");
+ op.receive(_sender, reply);
}
{
- api::StatBucketCommand* tmp(
- static_cast<api::StatBucketCommand*>(_sender.commands[1].get()));
- api::StatBucketReply* reply = new api::StatBucketReply(*tmp, "bar");
- op.receive(_sender, std::shared_ptr<api::StorageReply>(reply));
+ auto* tmp = static_cast<api::StatBucketCommand*>(_sender.command(1).get());
+ auto reply = std::make_shared<api::StatBucketReply>(*tmp, "bar");
+ op.receive(_sender, reply);
}
- api::StatBucketReply* replyback(
- static_cast<api::StatBucketReply*>(_sender.replies.back().get()));
- CPPUNIT_ASSERT_CONTAIN("foo", replyback->getResults());
- CPPUNIT_ASSERT_CONTAIN("bar", replyback->getResults());
+ auto* replyback = static_cast<api::StatBucketReply*>(_sender.replies().back().get());
+ EXPECT_THAT(replyback->getResults(), HasSubstr("foo"));
+ EXPECT_THAT(replyback->getResults(), HasSubstr("bar"));
}
-void
-StatOperationTest::testBucketList() {
+TEST_F(StatOperationTest, bucket_list) {
setupDistributor(2, 2, "distributor:1 storage:2");
getConfig().setSplitCount(10);
@@ -88,8 +69,7 @@ StatOperationTest::testBucketList() {
0xff, 100, 200, true, (i == 1));
}
- std::shared_ptr<api::GetBucketListCommand> msg(
- new api::GetBucketListCommand(makeDocumentBucket(document::BucketId(16, 5))));
+ auto msg = std::make_shared<api::GetBucketListCommand>(makeDocumentBucket(document::BucketId(16, 5)));
StatBucketListOperation op(
getDistributorBucketSpace().getBucketDatabase(),
@@ -98,23 +78,18 @@ StatOperationTest::testBucketList() {
msg);
op.start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(1, (int)_sender.replies.size());
-
- api::GetBucketListReply* repl(
- dynamic_cast<api::GetBucketListReply*>(_sender.replies[0].get()));
-
- CPPUNIT_ASSERT_EQUAL(1, (int)repl->getBuckets().size());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 5),
- repl->getBuckets()[0]._bucket);
- CPPUNIT_ASSERT_EQUAL(
- vespalib::string(
- "[distributor:0] split: "
- "[Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) "
- "is higher than the configured limit of (100, 10)] "
- "[node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=true,ready=false)]"),
- repl->getBuckets()[0]._bucketInformation);
+ ASSERT_EQ(1, _sender.replies().size());
+
+ auto& repl = dynamic_cast<api::GetBucketListReply&>(*_sender.reply(0));
+
+ ASSERT_EQ(1, repl.getBuckets().size());
+ EXPECT_EQ(repl.getBuckets()[0]._bucket, document::BucketId(16, 5));
+ EXPECT_EQ("[distributor:0] split: "
+ "[Splitting bucket because its maximum size (200 b, 100 docs, 100 meta, 200 b total) "
+ "is higher than the configured limit of (100, 10)] "
+ "[node(idx=0,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=false,ready=false), "
+ "node(idx=1,crc=0xff,docs=100/100,bytes=200/200,trusted=true,active=true,ready=false)]",
+ repl.getBuckets()[0]._bucketInformation);
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/statusreporterdelegatetest.cpp b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
index 99d576cad56..9e66f1920e2 100644
--- a/storage/src/tests/distributor/statusreporterdelegatetest.cpp
+++ b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
@@ -1,30 +1,16 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/common/testhelper.h>
#include <tests/distributor/distributortestutil.h>
-
#include <vespa/storage/distributor/statusreporterdelegate.h>
+#include <vespa/vespalib/gtest/gtest.h>
-namespace storage {
-namespace distributor {
-
-class StatusReporterDelegateTest : public CppUnit::TestFixture
-{
- CPPUNIT_TEST_SUITE(StatusReporterDelegateTest);
- CPPUNIT_TEST(testDelegateInvokesDelegatorOnStatusRequest);
- CPPUNIT_TEST_SUITE_END();
-
- void testDelegateInvokesDelegatorOnStatusRequest();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(StatusReporterDelegateTest);
+namespace storage::distributor {
namespace {
-// We really ought to get GoogleMock as part of our testing suite...
-class MockDelegator : public StatusDelegator
-{
+// TODO replace with gmock impl
+class MockDelegator : public StatusDelegator {
mutable std::ostringstream _calls;
bool handleStatusRequest(const DelegatedStatusRequest& request) const override {
_calls << "Request(" << request.path << ")";
@@ -58,30 +44,22 @@ public:
}
-void
-StatusReporterDelegateTest::testDelegateInvokesDelegatorOnStatusRequest()
-{
+TEST(StatusReporterDelegateTest, delegate_invokes_delegator_on_status_request) {
vdstestlib::DirConfig config(getStandardConfig(false));
TestDistributorApp app(config.getConfigId());
MockDelegator mockDelegator;
MockStatusReporter reporter;
- StatusReporterDelegate delegate(app.getComponentRegister(),
- mockDelegator,
- reporter);
+ StatusReporterDelegate delegate(app.getComponentRegister(), mockDelegator, reporter);
framework::HttpUrlPath path("dummy");
- CPPUNIT_ASSERT_EQUAL(vespalib::string("foo/bar"),
- delegate.getReportContentType(path));
+ EXPECT_EQ("foo/bar", delegate.getReportContentType(path));
std::ostringstream ss;
- CPPUNIT_ASSERT(delegate.reportStatus(ss, path));
+ ASSERT_TRUE(delegate.reportStatus(ss, path));
- CPPUNIT_ASSERT_EQUAL(std::string("Request(dummy)"),
- mockDelegator.getCalls());
- CPPUNIT_ASSERT_EQUAL(std::string("reportStatus with dummy"),
- ss.str());
+ EXPECT_EQ("Request(dummy)", mockDelegator.getCalls());
+ EXPECT_EQ("reportStatus with dummy", ss.str());
}
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/throttlingoperationstartertest.cpp b/storage/src/tests/distributor/throttlingoperationstartertest.cpp
index c3290a8c0f6..2dc4561068b 100644
--- a/storage/src/tests/distributor/throttlingoperationstartertest.cpp
+++ b/storage/src/tests/distributor/throttlingoperationstartertest.cpp
@@ -1,27 +1,17 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
+
#include <vespa/storage/distributor/throttlingoperationstarter.h>
#include <tests/distributor/maintenancemocks.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
-using document::test::makeDocumentBucket;
-
-namespace storage {
-
-namespace distributor {
+namespace storage::distributor {
using document::BucketId;
+using document::test::makeDocumentBucket;
+using namespace ::testing;
-class ThrottlingOperationStarterTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(ThrottlingOperationStarterTest);
- CPPUNIT_TEST(testOperationNotThrottledWhenSlotAvailable);
- CPPUNIT_TEST(testOperationStartingIsForwardedToImplementation);
- CPPUNIT_TEST(testOperationThrottledWhenNoAvailableSlots);
- CPPUNIT_TEST(testThrottlingWithMaxPendingRange);
- CPPUNIT_TEST(testStartingOperationsFillsUpPendingWindow);
- CPPUNIT_TEST(testFinishingOperationsAllowsMoreToStart);
- CPPUNIT_TEST_SUITE_END();
-
+struct ThrottlingOperationStarterTest : Test {
std::shared_ptr<Operation> createMockOperation() {
return std::shared_ptr<Operation>(new MockOperation(makeDocumentBucket(BucketId(16, 1))));
}
@@ -29,113 +19,90 @@ class ThrottlingOperationStarterTest : public CppUnit::TestFixture {
std::unique_ptr<MockOperationStarter> _starterImpl;
std::unique_ptr<ThrottlingOperationStarter> _operationStarter;
-public:
- void testOperationNotThrottledWhenSlotAvailable();
- void testOperationStartingIsForwardedToImplementation();
- void testOperationThrottledWhenNoAvailableSlots();
- void testThrottlingWithMaxPendingRange();
- void testStartingOperationsFillsUpPendingWindow();
- void testFinishingOperationsAllowsMoreToStart();
-
- void setUp() override;
- void tearDown() override;
+ void SetUp() override;
+ void TearDown() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ThrottlingOperationStarterTest);
-
void
-ThrottlingOperationStarterTest::setUp()
+ThrottlingOperationStarterTest::SetUp()
{
- _starterImpl.reset(new MockOperationStarter());
- _operationStarter.reset(new ThrottlingOperationStarter(*_starterImpl));
+ _starterImpl = std::make_unique<MockOperationStarter>();
+ _operationStarter = std::make_unique<ThrottlingOperationStarter>(*_starterImpl);
}
void
-ThrottlingOperationStarterTest::tearDown()
+ThrottlingOperationStarterTest::TearDown()
{
// Must clear before _operationStarter goes out of scope, or operation
// destructors will try to call method on destroyed object.
_starterImpl->getOperations().clear();
}
-void
-ThrottlingOperationStarterTest::testOperationNotThrottledWhenSlotAvailable()
-{
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
+TEST_F(ThrottlingOperationStarterTest, operation_not_throttled_when_slot_available) {
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
}
-void
-ThrottlingOperationStarterTest::testOperationStartingIsForwardedToImplementation()
-{
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
- CPPUNIT_ASSERT_EQUAL(std::string("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 0\n"),
- _starterImpl->toString());
+TEST_F(ThrottlingOperationStarterTest, operation_starting_is_forwarded_to_implementation) {
+ ASSERT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+ EXPECT_EQ("Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri 0\n",
+ _starterImpl->toString());
}
-void
-ThrottlingOperationStarterTest::testOperationThrottledWhenNoAvailableSlots()
-{
+TEST_F(ThrottlingOperationStarterTest, operation_throttled_when_no_available_slots) {
_operationStarter->setMaxPendingRange(0, 0);
- CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
}
-void
-ThrottlingOperationStarterTest::testThrottlingWithMaxPendingRange()
-{
+TEST_F(ThrottlingOperationStarterTest, throttling_with_max_pending_range) {
_operationStarter->setMaxPendingRange(0, 1);
- CPPUNIT_ASSERT(!_operationStarter->canStart(0, OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->canStart(0, OperationStarter::Priority(255)));
+ EXPECT_TRUE(_operationStarter->canStart(0, OperationStarter::Priority(0)));
_operationStarter->setMaxPendingRange(1, 1);
- CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(_operationStarter->canStart(0, OperationStarter::Priority(0)));
+ EXPECT_TRUE(_operationStarter->canStart(0, OperationStarter::Priority(255)));
+ EXPECT_TRUE(_operationStarter->canStart(0, OperationStarter::Priority(0)));
_operationStarter->setMaxPendingRange(1, 3);
- CPPUNIT_ASSERT(!_operationStarter->canStart(1, OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(_operationStarter->canStart(1, OperationStarter::Priority(100)));
- CPPUNIT_ASSERT(_operationStarter->canStart(1, OperationStarter::Priority(0)));
- CPPUNIT_ASSERT(_operationStarter->canStart(2, OperationStarter::Priority(0)));
- CPPUNIT_ASSERT(!_operationStarter->canStart(3, OperationStarter::Priority(0)));
- CPPUNIT_ASSERT(!_operationStarter->canStart(4, OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->canStart(1, OperationStarter::Priority(255)));
+ EXPECT_TRUE(_operationStarter->canStart(1, OperationStarter::Priority(100)));
+ EXPECT_TRUE(_operationStarter->canStart(1, OperationStarter::Priority(0)));
+ EXPECT_TRUE(_operationStarter->canStart(2, OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->canStart(3, OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->canStart(4, OperationStarter::Priority(0)));
}
-void
-ThrottlingOperationStarterTest::testStartingOperationsFillsUpPendingWindow()
-{
+TEST_F(ThrottlingOperationStarterTest, starting_operations_fills_up_pending_window) {
_operationStarter->setMaxPendingRange(1, 3);
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(100)));
- CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(100)));
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
- CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(0)));
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ EXPECT_FALSE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(100)));
+ EXPECT_FALSE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(100)));
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
+ EXPECT_FALSE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(0)));
}
-void
-ThrottlingOperationStarterTest::testFinishingOperationsAllowsMoreToStart()
-{
+TEST_F(ThrottlingOperationStarterTest, finishing_operations_allows_more_to_start) {
_operationStarter->setMaxPendingRange(1, 1);
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(!_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(!_starterImpl->getOperations().empty());
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ EXPECT_FALSE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ EXPECT_FALSE(_starterImpl->getOperations().empty());
_starterImpl->getOperations().pop_back();
- CPPUNIT_ASSERT(_operationStarter->start(createMockOperation(),
- OperationStarter::Priority(255)));
- CPPUNIT_ASSERT(!_starterImpl->getOperations().empty());
+ EXPECT_TRUE(_operationStarter->start(createMockOperation(),
+ OperationStarter::Priority(255)));
+ EXPECT_FALSE(_starterImpl->getOperations().empty());
}
}
-}
diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
index a8771ddc28a..edb5261fbfa 100644
--- a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
+++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/config/helper/configgetter.h>
-#include <cppunit/extensions/HelperMacros.h>
#include <vespa/document/config/config-documenttypes.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/base/testdocrepo.h>
@@ -14,13 +13,12 @@
#include <tests/distributor/distributortestutil.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/distributor.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
-using document::test::makeDocumentBucket;
-
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
-using std::shared_ptr;
+using document::test::makeDocumentBucket;
using config::ConfigGetter;
using document::DocumenttypesConfig;
using namespace document;
@@ -28,87 +26,22 @@ using namespace storage;
using namespace storage::distributor;
using namespace storage::api;
using namespace storage::lib;
+using namespace ::testing;
-using namespace std::literals::string_literals;
-
-class TwoPhaseUpdateOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(TwoPhaseUpdateOperationTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testNonExisting);
- CPPUNIT_TEST(testUpdateFailed);
- CPPUNIT_TEST(testFastPathInconsistentTimestamps);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsNotFound);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsUpdateError);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsGetError);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsPutError);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsPutNotStarted);
- CPPUNIT_TEST(testFastPathInconsistentTimestampsInconsistentSplit);
- CPPUNIT_TEST(testFastPathPropagatesMessageSettingsToUpdate);
- CPPUNIT_TEST(testNofM);
- CPPUNIT_TEST(testSafePathUpdatesNewestReceivedDocument);
- CPPUNIT_TEST(testCreateIfNonExistentCreatesDocumentIfAllEmptyGets);
- CPPUNIT_TEST(testUpdateFailsIfSafePathHasFailedPut);
- CPPUNIT_TEST(testUpdateFailsIfSafePathGetsFail);
- CPPUNIT_TEST(testUpdateFailsIfApplyThrowsException);
- CPPUNIT_TEST(testNonExistingWithAutoCreate);
- CPPUNIT_TEST(testSafePathFailsUpdateWhenMismatchingTimestampConstraint);
- CPPUNIT_TEST(testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts);
- CPPUNIT_TEST(testSafePathPropagatesMbusTracesFromReplies);
- CPPUNIT_TEST(testUpdateFailsIfOwnershipChangesBetweenGetAndPut);
- CPPUNIT_TEST(testSafePathConditionMismatchFailsWithTasError);
- CPPUNIT_TEST(testSafePathConditionMatchSendsPutsWithUpdatedDoc);
- CPPUNIT_TEST(testSafePathConditionParseFailureFailsWithIllegalParamsError);
- CPPUNIT_TEST(testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError);
- CPPUNIT_TEST(safe_path_condition_with_missing_doc_and_no_auto_create_fails_with_tas_error);
- CPPUNIT_TEST(safe_path_condition_with_missing_doc_and_auto_create_sends_puts);
- CPPUNIT_TEST(testFastPathCloseEdgeSendsCorrectReply);
- CPPUNIT_TEST(testSafePathCloseEdgeSendsCorrectReply);
- CPPUNIT_TEST_SUITE_END();
-
+struct TwoPhaseUpdateOperationTest : Test, DistributorTestUtil {
document::TestDocRepo _testRepo;
std::shared_ptr<const DocumentTypeRepo> _repo;
const DocumentType* _doc_type;
-protected:
- void testSimple();
- void testNonExisting();
- void testUpdateFailed();
- void testFastPathInconsistentTimestamps();
- void testFastPathInconsistentTimestampsNotFound();
- void testFastPathInconsistentTimestampsUpdateError();
- void testFastPathInconsistentTimestampsGetError();
- void testFastPathInconsistentTimestampsPutError();
- void testFastPathInconsistentTimestampsPutNotStarted();
- void testFastPathInconsistentTimestampsInconsistentSplit();
- void testFastPathPropagatesMessageSettingsToUpdate();
- void testNofM();
- void testSafePathUpdatesNewestReceivedDocument();
- void testCreateIfNonExistentCreatesDocumentIfAllEmptyGets();
- void testUpdateFailsIfSafePathHasFailedPut();
- void testUpdateFailsIfSafePathGetsFail();
- void testUpdateFailsIfApplyThrowsException();
- void testNonExistingWithAutoCreate();
- void testSafePathFailsUpdateWhenMismatchingTimestampConstraint();
- void testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts();
- void testSafePathPropagatesMbusTracesFromReplies();
- void testUpdateFailsIfOwnershipChangesBetweenGetAndPut();
- void testSafePathConditionMismatchFailsWithTasError();
- void testSafePathConditionMatchSendsPutsWithUpdatedDoc();
- void testSafePathConditionParseFailureFailsWithIllegalParamsError();
- void testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError();
- void safe_path_condition_with_missing_doc_and_no_auto_create_fails_with_tas_error();
- void safe_path_condition_with_missing_doc_and_auto_create_sends_puts();
- void testFastPathCloseEdgeSendsCorrectReply();
- void testSafePathCloseEdgeSendsCorrectReply();
+ TwoPhaseUpdateOperationTest();
+ ~TwoPhaseUpdateOperationTest();
void checkMessageSettingsPropagatedTo(
const api::StorageCommand::SP& msg) const;
- std::string getUpdatedValueFromLastPut(MessageSenderStub&);
-public:
- void setUp() override {
+ std::string getUpdatedValueFromLastPut(DistributorMessageSenderStub&);
+
+ void SetUp() override {
_repo = _testRepo.getTypeRepoSp();
_doc_type = _repo->getDocumentType("testdoctype1");
createLinks();
@@ -116,32 +49,32 @@ public:
getClock().setAbsoluteTimeInSeconds(200);
}
- void tearDown() override {
+ void TearDown() override {
close();
}
void replyToMessage(Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
uint64_t oldTimestamp,
api::ReturnCode::Result result = api::ReturnCode::OK);
void replyToPut(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
api::ReturnCode::Result result = api::ReturnCode::OK,
const std::string& traceMsg = "");
void replyToCreateBucket(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
api::ReturnCode::Result result = api::ReturnCode::OK);
void replyToGet(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
uint64_t oldTimestamp,
bool haveDocument = true,
@@ -191,21 +124,22 @@ public:
const UpdateOptions& options = UpdateOptions());
void assertAbortedUpdateReplyWithContextPresent(
- const MessageSenderStub& closeSender) const;
+ const DistributorMessageSenderStub& closeSender) const;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(TwoPhaseUpdateOperationTest);
+TwoPhaseUpdateOperationTest::TwoPhaseUpdateOperationTest() = default;
+TwoPhaseUpdateOperationTest::~TwoPhaseUpdateOperationTest() = default;
void
TwoPhaseUpdateOperationTest::replyToMessage(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
uint64_t oldTimestamp,
api::ReturnCode::Result result)
{
- std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
auto& updatec = dynamic_cast<UpdateCommand&>(*msg2);
std::unique_ptr<api::StorageReply> reply(updatec.makeReply());
static_cast<api::UpdateReply*>(reply.get())->setOldTimestamp(oldTimestamp);
@@ -218,12 +152,12 @@ TwoPhaseUpdateOperationTest::replyToMessage(
void
TwoPhaseUpdateOperationTest::replyToPut(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
api::ReturnCode::Result result,
const std::string& traceMsg)
{
- std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
auto& putc = dynamic_cast<PutCommand&>(*msg2);
std::unique_ptr<api::StorageReply> reply(putc.makeReply());
reply->setResult(api::ReturnCode(result, ""));
@@ -237,11 +171,11 @@ TwoPhaseUpdateOperationTest::replyToPut(
void
TwoPhaseUpdateOperationTest::replyToCreateBucket(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
api::ReturnCode::Result result)
{
- std::shared_ptr<api::StorageMessage> msg2 = sender.commands.at(index);
+ std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
auto& putc = dynamic_cast<CreateBucketCommand&>(*msg2);
std::unique_ptr<api::StorageReply> reply(putc.makeReply());
reply->setResult(api::ReturnCode(result, ""));
@@ -252,14 +186,14 @@ TwoPhaseUpdateOperationTest::replyToCreateBucket(
void
TwoPhaseUpdateOperationTest::replyToGet(
Operation& callback,
- MessageSenderStub& sender,
+ DistributorMessageSenderStub& sender,
uint32_t index,
uint64_t oldTimestamp,
bool haveDocument,
api::ReturnCode::Result result,
const std::string& traceMsg)
{
- auto& get = static_cast<const api::GetCommand&>(*sender.commands.at(index));
+ auto& get = static_cast<const api::GetCommand&>(*sender.command(index));
std::shared_ptr<api::StorageReply> reply;
if (haveDocument) {
@@ -340,294 +274,239 @@ TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
handler, getDistributorBucketSpace(), msg, getDistributor().getMetrics());
}
-
-void
-TwoPhaseUpdateOperationTest::testSimple()
-{
+TEST_F(TwoPhaseUpdateOperationTest, simple) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testNonExisting()
-{
+TEST_F(TwoPhaseUpdateOperationTest, non_existing) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate(""));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testUpdateFailed()
-{
+TEST_F(TwoPhaseUpdateOperationTest, update_failed) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90, api::ReturnCode::INTERNAL_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(INTERNAL_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(INTERNAL_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestamps()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
- sender.getLastCommand(true));
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ sender.getLastCommand(true));
replyToGet(*cb, sender, 2, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0",
+ sender.getCommands(true));
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 3);
replyToPut(*cb, sender, 4);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
- "(best node 1)) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsNotFound()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_not_found) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
- sender.getLastCommand(true));
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ sender.getLastCommand(true));
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110, false);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
- "(best node 1)) ReturnCode(INTERNAL_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(INTERNAL_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsUpdateError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_update_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToMessage(*cb, sender, 1, 110, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 90) "
- "ReturnCode(IO_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) "
+ "ReturnCode(IO_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsGetError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_get_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
- sender.getLastCommand(true));
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ sender.getLastCommand(true));
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110, false, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
- "(best node 1)) ReturnCode(IO_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(IO_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsPutError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
- sender.getLastCommand(true));
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ sender.getLastCommand(true));
replyToGet(*cb, sender, 2, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1,Get => 1,Put => 1,Put => 0",
+ sender.getCommands(true));
replyToPut(*cb, sender, 3, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
- "(best node 1)) ReturnCode(IO_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(IO_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsPutNotStarted()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_put_not_started) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
replyToMessage(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 1"),
- sender.getLastCommand(true));
- checkMessageSettingsPropagatedTo(sender.commands.back());
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 1",
+ sender.getLastCommand(true));
+ checkMessageSettingsPropagatedTo(sender.commands().back());
enableDistributorClusterState("storage:0 distributor:1");
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 2, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
- "(best node 1)) ReturnCode(NOT_CONNECTED, "
- "Can't store document: No storage nodes available)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110 Was inconsistent "
+ "(best node 1)) ReturnCode(NOT_CONNECTED, "
+ "Can't store document: No storage nodes available)",
+ sender.getLastReply(true));
}
-
-void
-TwoPhaseUpdateOperationTest::testFastPathInconsistentTimestampsInconsistentSplit()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_inconsistent_split) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3",
UpdateOptions().makeInconsistentSplit(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
std::string wanted("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
"Get(BucketId(0x4400000000008b13), doc:test:test) => 0");
std::string text = sender.getCommands(true, true);
- CPPUNIT_ASSERT_EQUAL(wanted, text);
+ ASSERT_EQ(wanted, text);
replyToGet(*cb, sender, 0, 90);
replyToGet(*cb, sender, 1, 120);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "Put(BucketId(0x4400000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4400000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0"),
- sender.getCommands(true, true, 2));
+ ASSERT_EQ("Put(BucketId(0x4400000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4400000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0",
+ sender.getCommands(true, true, 2));
replyToPut(*cb, sender, 2);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 3);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 120) "
- "ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 120) "
+ "ReturnCode(NONE)",
+ sender.getLastReply(true));
}
void
@@ -635,355 +514,301 @@ TwoPhaseUpdateOperationTest::checkMessageSettingsPropagatedTo(
const api::StorageCommand::SP& msg) const
{
// Settings set in sendUpdate().
- CPPUNIT_ASSERT_EQUAL(uint32_t(6), msg->getTrace().getLevel());
- CPPUNIT_ASSERT_EQUAL(uint32_t(6789), msg->getTimeout());
- CPPUNIT_ASSERT_EQUAL(uint8_t(99), msg->getPriority());
+ EXPECT_EQ(6, msg->getTrace().getLevel());
+ EXPECT_EQ(6789, msg->getTimeout());
+ EXPECT_EQ(99, msg->getPriority());
}
-void
-TwoPhaseUpdateOperationTest::testFastPathPropagatesMessageSettingsToUpdate()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_propagates_message_settings_to_update) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
- StorageCommand::SP msg(sender.commands.back());
+ StorageCommand::SP msg(sender.commands().back());
checkMessageSettingsPropagatedTo(msg);
}
-void
-TwoPhaseUpdateOperationTest::testNofM()
-{
+TEST_F(TwoPhaseUpdateOperationTest, n_of_m) {
setupDistributor(2, 2, "storage:2 distributor:1", 1);
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Update => 0,Update => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToMessage(*cb, sender, 0, 90);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 90) ReturnCode(NONE)",
+ sender.getLastReply(true));
replyToMessage(*cb, sender, 1, 123);
}
std::string
TwoPhaseUpdateOperationTest::getUpdatedValueFromLastPut(
- MessageSenderStub& sender)
+ DistributorMessageSenderStub& sender)
{
- Document::SP doc(dynamic_cast<api::PutCommand&>(*sender.commands.back())
+ Document::SP doc(dynamic_cast<api::PutCommand&>(*sender.commands().back())
.getDocument());
FieldValue::UP value(doc->getValue("headerval"));
return value->toString();
}
-void
-TwoPhaseUpdateOperationTest::testSafePathUpdatesNewestReceivedDocument()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_updates_newest_received_document) {
setupDistributor(3, 3, "storage:3 distributor:1");
// 0,1 in sync. 2 out of sync.
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
- "Get(BucketId(0x4000000000008b13), doc:test:test) => 2"),
- sender.getCommands(true, true));
+ ASSERT_EQ("Get(BucketId(0x4000000000008b13), doc:test:test) => 0,"
+ "Get(BucketId(0x4000000000008b13), doc:test:test) => 2",
+ sender.getCommands(true, true));
replyToGet(*cb, sender, 0, 50);
replyToGet(*cb, sender, 1, 70);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 2"),
- sender.getCommands(true, true, 2));
+ ASSERT_EQ("Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 2",
+ sender.getCommands(true, true, 2));
// Make sure Put contains an updated document (+10 arith. update on field
// whose value equals gotten timestamp). In this case we want 70 -> 80.
- CPPUNIT_ASSERT_EQUAL(std::string("80"), getUpdatedValueFromLastPut(sender));
+ ASSERT_EQ("80", getUpdatedValueFromLastPut(sender));
replyToPut(*cb, sender, 2);
replyToPut(*cb, sender, 3);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 70) "
- "ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testCreateIfNonExistentCreatesDocumentIfAllEmptyGets()
-{
+TEST_F(TwoPhaseUpdateOperationTest, create_if_non_existent_creates_document_if_all_empty_gets) {
setupDistributor(3, 3, "storage:3 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4",
UpdateOptions().createIfNonExistent(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 2", sender.getCommands(true));
replyToGet(*cb, sender, 0, 0, false);
replyToGet(*cb, sender, 1, 0, false);
// Since create-if-non-existent is set, distributor should create doc from
// scratch.
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 1,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 2"),
- sender.getCommands(true, true, 2));
-
- CPPUNIT_ASSERT_EQUAL(std::string("10"), getUpdatedValueFromLastPut(sender));
+ ASSERT_EQ("Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 1,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 2",
+ sender.getCommands(true, true, 2));
+
+ ASSERT_EQ("10", getUpdatedValueFromLastPut(sender));
replyToPut(*cb, sender, 2);
replyToPut(*cb, sender, 3);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 200000000) "
- "ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testUpdateFailsIfSafePathHasFailedPut()
-{
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_safe_path_has_failed_put) {
setupDistributor(3, 3, "storage:3 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4",
UpdateOptions().createIfNonExistent(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 2", sender.getCommands(true));
replyToGet(*cb, sender, 0, 0, false);
replyToGet(*cb, sender, 1, 0, false);
// Since create-if-non-existent is set, distributor should create doc from
// scratch.
- CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
- sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
replyToPut(*cb, sender, 2);
replyToPut(*cb, sender, 3);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 200000000) "
- "ReturnCode(IO_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(IO_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testUpdateFailsIfSafePathGetsFail()
-{
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_safe_path_gets_fail) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4",
UpdateOptions().createIfNonExistent(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", sender.getCommands(true));
replyToGet(*cb, sender, 0, 0, false, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 0, false, api::ReturnCode::IO_FAILURE);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(IO_FAILURE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(IO_FAILURE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testUpdateFailsIfApplyThrowsException()
-{
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_apply_throws_exception) {
setupDistributor(2, 2, "storage:2 distributor:1");
// Create update for wrong doctype which will fail the update.
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().withError()));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", sender.getCommands(true));
replyToGet(*cb, sender, 0, 50);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 70);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 70) "
- "ReturnCode(INTERNAL_FAILURE, Can not apply a "
- "\"testdoctype2\" document update to a "
- "\"testdoctype1\" document.)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(INTERNAL_FAILURE, Can not apply a "
+ "\"testdoctype2\" document update to a "
+ "\"testdoctype1\" document.)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testNonExistingWithAutoCreate()
-{
+TEST_F(TwoPhaseUpdateOperationTest, non_existing_with_auto_create) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("", UpdateOptions().createIfNonExistent(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "CreateBucketCommand(BucketId(0x4000000000008b13), active) "
- "Reasons to start: => 0,"
- "Put(BucketId(0x4000000000008b13), doc:test:test, "
- "timestamp 200000000, size 52) => 0"),
- sender.getCommands(true, true));
+ ASSERT_EQ("CreateBucketCommand(BucketId(0x4000000000008b13), active) "
+ "Reasons to start: => 0,"
+ "Put(BucketId(0x4000000000008b13), doc:test:test, "
+ "timestamp 200000000, size 52) => 0",
+ sender.getCommands(true, true));
- CPPUNIT_ASSERT_EQUAL(std::string("10"), getUpdatedValueFromLastPut(sender));
+ ASSERT_EQ("10", getUpdatedValueFromLastPut(sender));
replyToCreateBucket(*cb, sender, 0);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 1);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 200000000) "
- "ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 200000000) "
+ "ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathFailsUpdateWhenMismatchingTimestampConstraint()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_fails_update_when_mismatching_timestamp_constraint) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4",
UpdateOptions().timestampToUpdate(1234)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", sender.getCommands(true));
replyToGet(*cb, sender, 0, 100);
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToGet(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(NONE, No document with requested "
- "timestamp found)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(NONE, No document with requested "
+ "timestamp found)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathUpdatePropagatesMessageSettingsToGetsAndPuts()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_update_propagates_message_settings_to_gets_and_puts) {
setupDistributor(3, 3, "storage:3 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
- sender.getCommands(true));
- checkMessageSettingsPropagatedTo(sender.commands.at(0));
- checkMessageSettingsPropagatedTo(sender.commands.at(1));
+ ASSERT_EQ("Get => 0,Get => 2", sender.getCommands(true));
+ checkMessageSettingsPropagatedTo(sender.command(0));
+ checkMessageSettingsPropagatedTo(sender.command(1));
replyToGet(*cb, sender, 0, 50);
replyToGet(*cb, sender, 1, 70);
- CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
- sender.getCommands(true, false, 2));
- checkMessageSettingsPropagatedTo(sender.commands.at(2));
- checkMessageSettingsPropagatedTo(sender.commands.at(3));
- checkMessageSettingsPropagatedTo(sender.commands.at(4));
+ ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
+ checkMessageSettingsPropagatedTo(sender.command(2));
+ checkMessageSettingsPropagatedTo(sender.command(3));
+ checkMessageSettingsPropagatedTo(sender.command(4));
replyToPut(*cb, sender, 2);
replyToPut(*cb, sender, 3);
replyToPut(*cb, sender, 4);
}
-void
-TwoPhaseUpdateOperationTest::testSafePathPropagatesMbusTracesFromReplies()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_propagates_mbus_traces_from_replies) {
setupDistributor(3, 3, "storage:3 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=1/2/3,2=2/3/4"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 2"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 2", sender.getCommands(true));
replyToGet(*cb, sender, 0, 50, true,
api::ReturnCode::OK, "hello earthlings");
replyToGet(*cb, sender, 1, 70);
- CPPUNIT_ASSERT_EQUAL(std::string("Put => 1,Put => 0,Put => 2"),
- sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 0,Put => 2", sender.getCommands(true, false, 2));
replyToPut(*cb, sender, 2, api::ReturnCode::OK, "fooo");
replyToPut(*cb, sender, 3, api::ReturnCode::OK, "baaa");
- CPPUNIT_ASSERT(sender.replies.empty());
+ ASSERT_TRUE(sender.replies().empty());
replyToPut(*cb, sender, 4);
- CPPUNIT_ASSERT_EQUAL(std::string("Update Reply"),
- sender.getLastReply(false));
-
- std::string trace(sender.replies.back()->getTrace().toString());
- //std::cout << "\n\n" << trace << "\n\n";
- CPPUNIT_ASSERT(trace.find("hello earthlings") != std::string::npos);
- CPPUNIT_ASSERT(trace.find("fooo") != std::string::npos);
- CPPUNIT_ASSERT(trace.find("baaa") != std::string::npos);
+ ASSERT_EQ("Update Reply", sender.getLastReply(false));
+
+ std::string trace(sender.replies().back()->getTrace().toString());
+ ASSERT_THAT(trace, HasSubstr("hello earthlings"));
+ ASSERT_THAT(trace, HasSubstr("fooo"));
+ ASSERT_THAT(trace, HasSubstr("baaa"));
}
-void
-TwoPhaseUpdateOperationTest::testUpdateFailsIfOwnershipChangesBetweenGetAndPut()
-{
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_ownership_changes_between_get_and_put) {
setupDistributor(2, 2, "storage:2 distributor:1");
// Update towards inconsistent bucket invokes safe path.
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", sender.getCommands(true));
// Alter cluster state so that distributor is now down (technically the
// entire cluster is down in this state, but this should not matter). In
@@ -998,194 +823,170 @@ TwoPhaseUpdateOperationTest::testUpdateFailsIfOwnershipChangesBetweenGetAndPut()
// BUCKET_NOT_FOUND is a transient error code which should cause the client
// to re-send the operation, presumably to the correct distributor the next
// time.
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 70) "
- "ReturnCode(BUCKET_NOT_FOUND, Distributor lost "
- "ownership of bucket between executing the read "
- "and write phases of a two-phase update operation)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 70) "
+ "ReturnCode(BUCKET_NOT_FOUND, Distributor lost "
+ "ownership of bucket between executing the read "
+ "and write phases of a two-phase update operation)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathConditionMismatchFailsWithTasError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_mismatch_fails_with_tas_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
"testdoctype1.headerval==120")));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
// Newest doc has headerval==110, not 120.
replyToGet(*cb, sender, 0, 100);
replyToGet(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL(
- "UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
- "Condition did not match document)"s,
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
+ "Condition did not match document)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathConditionMatchSendsPutsWithUpdatedDoc()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_match_sends_puts_with_updated_doc) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
"testdoctype1.headerval==110")));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
replyToGet(*cb, sender, 0, 100);
replyToGet(*cb, sender, 1, 110);
- CPPUNIT_ASSERT_EQUAL("Put => 1,Put => 0"s,
- sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 0", sender.getCommands(true, false, 2));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathConditionParseFailureFailsWithIllegalParamsError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_parse_failure_fails_with_illegal_params_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
"testdoctype1.san==fran...cisco")));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
replyToGet(*cb, sender, 0, 100);
replyToGet(*cb, sender, 1, 110);
// NOTE: condition is currently not attempted parsed until Gets have been
// replied to. This may change in the future.
// XXX reliance on parser/exception error message is very fragile.
- CPPUNIT_ASSERT_EQUAL(
- "UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(ILLEGAL_PARAMETERS, "
- "Failed to parse test and set condition: "
- "syntax error, unexpected . at column 24 when "
- "parsing selection 'testdoctype1.san==fran...cisco')"s,
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(ILLEGAL_PARAMETERS, "
+ "Failed to parse test and set condition: "
+ "syntax error, unexpected . at column 24 when "
+ "parsing selection 'testdoctype1.san==fran...cisco')",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::testSafePathConditonUnknownDocTypeFailsWithIllegalParamsError()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_unknown_doc_type_fails_with_illegal_params_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
"langbein.headerval=1234")));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
replyToGet(*cb, sender, 0, 100);
replyToGet(*cb, sender, 1, 110);
// NOTE: condition is currently not attempted parsed until Gets have been
// replied to. This may change in the future.
- CPPUNIT_ASSERT_EQUAL(
- "UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(ILLEGAL_PARAMETERS, "
- "Failed to parse test and set condition: "
- "Document type 'langbein' not found at column 1 "
- "when parsing selection 'langbein.headerval=1234')"s,
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(ILLEGAL_PARAMETERS, "
+ "Failed to parse test and set condition: "
+ "Document type 'langbein' not found at column 1 "
+ "when parsing selection 'langbein.headerval=1234')",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::safe_path_condition_with_missing_doc_and_no_auto_create_fails_with_tas_error()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_with_missing_doc_and_no_auto_create_fails_with_tas_error) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().condition(
"testdoctype1.headerval==120")));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
// Both Gets return nothing at all, nothing at all.
replyToGet(*cb, sender, 0, 100, false);
replyToGet(*cb, sender, 1, 110, false);
- CPPUNIT_ASSERT_EQUAL(
- "UpdateReply(doc:test:test, "
- "BucketId(0x0000000000000000), "
- "timestamp 0, timestamp of updated doc: 0) "
- "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
- "Document did not exist)"s,
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(TEST_AND_SET_CONDITION_FAILED, "
+ "Document did not exist)",
+ sender.getLastReply(true));
}
-void
-TwoPhaseUpdateOperationTest::safe_path_condition_with_missing_doc_and_auto_create_sends_puts()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_condition_with_missing_doc_and_auto_create_sends_puts) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions()
.condition("testdoctype1.headerval==120")
.createIfNonExistent(true)));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
replyToGet(*cb, sender, 0, 100, false);
replyToGet(*cb, sender, 1, 110, false);
- CPPUNIT_ASSERT_EQUAL("Put => 1,Put => 0"s, sender.getCommands(true, false, 2));
+ ASSERT_EQ("Put => 1,Put => 0", sender.getCommands(true, false, 2));
}
void
TwoPhaseUpdateOperationTest::assertAbortedUpdateReplyWithContextPresent(
- const MessageSenderStub& closeSender) const
+ const DistributorMessageSenderStub& closeSender) const
{
- CPPUNIT_ASSERT_EQUAL(size_t(1), closeSender.replies.size());
- StorageReply::SP reply(closeSender.replies.back());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::UPDATE_REPLY, reply->getType());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED,
- reply->getResult().getResult());
+ ASSERT_EQ(1, closeSender.replies().size());
+ StorageReply::SP reply(closeSender.replies().back());
+ ASSERT_EQ(api::MessageType::UPDATE_REPLY, reply->getType());
+ ASSERT_EQ(api::ReturnCode::ABORTED, reply->getResult().getResult());
auto context = reply->getTransportContext(); // Transfers ownership
- CPPUNIT_ASSERT(context.get());
+ ASSERT_TRUE(context.get());
}
-void
-TwoPhaseUpdateOperationTest::testFastPathCloseEdgeSendsCorrectReply()
-{
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_close_edge_sends_correct_reply) {
setupDistributor(1, 1, "storage:1 distributor:1");
// Only 1 replica; consistent with itself by definition.
std::shared_ptr<TwoPhaseUpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL("Update => 0"s, sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
// Close the operation. This should generate a single reply that is
// bound to the original command. We can identify rogue replies by these
// not having a transport context, as these are unique_ptrs that are
// moved to the reply upon the first reply construction. Any subsequent or
// erroneous replies will not have this context attached to themselves.
- MessageSenderStub closeSender;
+ DistributorMessageSenderStub closeSender;
cb->onClose(closeSender);
assertAbortedUpdateReplyWithContextPresent(closeSender);
}
-void
-TwoPhaseUpdateOperationTest::testSafePathCloseEdgeSendsCorrectReply()
-{
+TEST_F(TwoPhaseUpdateOperationTest, safe_path_close_edge_sends_correct_reply) {
setupDistributor(2, 2, "storage:2 distributor:1");
std::shared_ptr<TwoPhaseUpdateOperation> cb(
sendUpdate("0=1/2/3,1=2/3/4")); // Inconsistent replicas.
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Get => 0,Get => 1"),
- sender.getCommands(true));
+ ASSERT_EQ("Get => 0,Get => 1", sender.getCommands(true));
// Closing the operation should now only return an ABORTED reply for
// the UpdateCommand, _not_ from the nested, pending Get operation (which
// will implicitly generate an ABORTED reply for the synthesized Get
// command passed to it).
- MessageSenderStub closeSender;
+ DistributorMessageSenderStub closeSender;
cb->onClose(closeSender);
assertAbortedUpdateReplyWithContextPresent(closeSender);
@@ -1198,5 +999,4 @@ TwoPhaseUpdateOperationTest::testSafePathCloseEdgeSendsCorrectReply()
// XXX: test case where update reply has been sent but callback still
// has pending messages (e.g. n-of-m case).
-} // distributor
-} // storage
+} // storage::distributor
diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp
index 67cd4f5f233..7cf3ea0ad18 100644
--- a/storage/src/tests/distributor/updateoperationtest.cpp
+++ b/storage/src/tests/distributor/updateoperationtest.cpp
@@ -1,7 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
-#include <iomanip>
#include <tests/common/dummystoragelink.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/state.h>
@@ -11,56 +9,39 @@
#include <vespa/document/update/documentupdate.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/operations/external/updateoperation.h>
-#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/storage/distributor/distributor.h>
#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
-using std::shared_ptr;
using namespace document;
using namespace storage;
using namespace storage::distributor;
using namespace storage::api;
using namespace std;
using namespace storage::lib;
+using namespace ::testing;
using config::ConfigGetter;
using config::FileSpec;
using vespalib::string;
using document::test::makeDocumentBucket;
-class UpdateOperation_Test : public CppUnit::TestFixture,
- public DistributorTestUtil
-{
- CPPUNIT_TEST_SUITE(UpdateOperation_Test);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST(testNotFound);
- CPPUNIT_TEST(testMultiNode);
- CPPUNIT_TEST(testMultiNodeInconsistentTimestamp);
- CPPUNIT_TEST_SUITE_END();
-
+struct UpdateOperationTest : Test, DistributorTestUtil {
std::shared_ptr<const DocumentTypeRepo> _repo;
- const DocumentType *_html_type;
-
-protected:
- void testSimple();
- void testNotFound();
- void testMultiNode();
- void testMultiNodeInconsistentTimestamp();
+ const DocumentType* _html_type;
-public:
- void setUp() override {
+ void SetUp() override {
_repo.reset(
new DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>::
- getConfig("config-doctypes",
- FileSpec(TEST_PATH("config-doctypes.cfg")))));
+ getConfig("config-doctypes", FileSpec("../config-doctypes.cfg"))));
_html_type = _repo->getDocumentType("text/html");
createLinks();
}
- void tearDown() override {
+ void TearDown() override {
close();
}
- void replyToMessage(UpdateOperation& callback, MessageSenderStub& sender, uint32_t index,
+ void replyToMessage(UpdateOperation& callback, DistributorMessageSenderStub& sender, uint32_t index,
uint64_t oldTimestamp, const api::BucketInfo& info = api::BucketInfo(2,4,6));
std::shared_ptr<UpdateOperation>
@@ -69,133 +50,116 @@ public:
document::BucketId _bId;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(UpdateOperation_Test);
-
std::shared_ptr<UpdateOperation>
-UpdateOperation_Test::sendUpdate(const std::string& bucketState)
+UpdateOperationTest::sendUpdate(const std::string& bucketState)
{
- document::DocumentUpdate::SP update(
- new document::DocumentUpdate(*_repo, *_html_type,
- document::DocumentId(document::DocIdString("test", "test"))));
+ auto update = std::make_shared<document::DocumentUpdate>(
+ *_repo, *_html_type,
+ document::DocumentId(document::DocIdString("test", "test")));
_bId = getExternalOperationHandler().getBucketId(update->getId());
addNodesToBucketDB(_bId, bucketState);
- std::shared_ptr<api::UpdateCommand> msg(
- new api::UpdateCommand(makeDocumentBucket(document::BucketId(0)), update, 100));
+ auto msg = std::make_shared<api::UpdateCommand>(makeDocumentBucket(document::BucketId(0)), update, 100);
ExternalOperationHandler& handler = getExternalOperationHandler();
- return std::shared_ptr<UpdateOperation>(
- new UpdateOperation(handler, getDistributorBucketSpace(), msg,
- getDistributor().getMetrics().updates[msg->getLoadType()]));
+ return std::make_shared<UpdateOperation>(
+ handler, getDistributorBucketSpace(), msg,
+ getDistributor().getMetrics().updates[msg->getLoadType()]);
}
void
-UpdateOperation_Test::replyToMessage(UpdateOperation& callback, MessageSenderStub& sender, uint32_t index,
+UpdateOperationTest::replyToMessage(UpdateOperation& callback, DistributorMessageSenderStub& sender, uint32_t index,
uint64_t oldTimestamp, const api::BucketInfo& info)
{
- std::shared_ptr<api::StorageMessage> msg2 = sender.commands[index];
- UpdateCommand* updatec = dynamic_cast<UpdateCommand*>(msg2.get());
+ std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
+ auto* updatec = dynamic_cast<UpdateCommand*>(msg2.get());
std::unique_ptr<api::StorageReply> reply(updatec->makeReply());
- UpdateReply* updateR = static_cast<api::UpdateReply*>(reply.get());
+ auto* updateR = static_cast<api::UpdateReply*>(reply.get());
updateR->setOldTimestamp(oldTimestamp);
updateR->setBucketInfo(info);
callback.onReceive(sender, std::shared_ptr<StorageReply>(reply.release()));
}
-void
-UpdateOperation_Test::testSimple()
-{
+TEST_F(UpdateOperationTest, simple) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 90);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 100, timestamp of updated doc: 90) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 90) ReturnCode(NONE)",
+ sender.getLastReply(true));
auto& metrics = getDistributor().getMetrics().updates[documentapi::LoadType::DEFAULT];
- CPPUNIT_ASSERT_EQUAL(UINT64_C(0), metrics.diverging_timestamp_updates.getValue());
+ EXPECT_EQ(0, metrics.diverging_timestamp_updates.getValue());
}
-void
-UpdateOperation_Test::testNotFound()
-{
+TEST_F(UpdateOperationTest, not_found) {
setupDistributor(1, 1, "storage:1 distributor:1");
std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 0);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 100, timestamp of updated doc: 0) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ EXPECT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 0) ReturnCode(NONE)",
+ sender.getLastReply(true));
}
-void
-UpdateOperation_Test::testMultiNode()
-{
+TEST_F(UpdateOperationTest, multi_node) {
setupDistributor(2, 2, "distributor:1 storage:2");
std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0,Update => 1"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 120);
replyToMessage(*cb, sender, 1, 120);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 100, timestamp of updated doc: 120) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 120) ReturnCode(NONE)",
+ sender.getLastReply(true));
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- _bId.toString() + " : "
- "node(idx=1,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false,ready=false), "
- "node(idx=0,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false,ready=false)"),
- dumpBucket(_bId));
+ ASSERT_EQ(_bId.toString() + " : "
+ "node(idx=1,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false,ready=false), "
+ "node(idx=0,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false,ready=false)",
+ dumpBucket(_bId));
auto& metrics = getDistributor().getMetrics().updates[documentapi::LoadType::DEFAULT];
- CPPUNIT_ASSERT_EQUAL(UINT64_C(0), metrics.diverging_timestamp_updates.getValue());
+ EXPECT_EQ(0, metrics.diverging_timestamp_updates.getValue());
}
-void
-UpdateOperation_Test::testMultiNodeInconsistentTimestamp()
-{
+TEST_F(UpdateOperationTest, multi_node_inconsistent_timestamp) {
setupDistributor(2, 2, "distributor:1 storage:2");
std::shared_ptr<UpdateOperation> cb(sendUpdate("0=1/2/3,1=1/2/3"));
- MessageSenderStub sender;
+ DistributorMessageSenderStub sender;
cb->start(sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Update => 0,Update => 1"), sender.getCommands(true));
+ ASSERT_EQ("Update => 0,Update => 1", sender.getCommands(true));
replyToMessage(*cb, sender, 0, 119);
replyToMessage(*cb, sender, 1, 120);
- CPPUNIT_ASSERT_EQUAL(
- std::string("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
- "timestamp 100, timestamp of updated doc: 120 Was inconsistent "
- "(best node 1)) ReturnCode(NONE)"),
- sender.getLastReply(true));
+ ASSERT_EQ("UpdateReply(doc:test:test, BucketId(0x0000000000000000), "
+ "timestamp 100, timestamp of updated doc: 120 Was inconsistent "
+ "(best node 1)) ReturnCode(NONE)",
+ sender.getLastReply(true));
auto& metrics = getDistributor().getMetrics().updates[documentapi::LoadType::DEFAULT];
- CPPUNIT_ASSERT_EQUAL(UINT64_C(1), metrics.diverging_timestamp_updates.getValue());
+ EXPECT_EQ(1, metrics.diverging_timestamp_updates.getValue());
}
diff --git a/storage/src/tests/distributor/visitoroperationtest.cpp b/storage/src/tests/distributor/visitoroperationtest.cpp
index af580480563..7819b0ed5dc 100644
--- a/storage/src/tests/distributor/visitoroperationtest.cpp
+++ b/storage/src/tests/distributor/visitoroperationtest.cpp
@@ -1,7 +1,4 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <iomanip>
-#include <iostream>
-#include <memory>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/storageapi/message/datagram.h>
@@ -13,127 +10,36 @@
#include <tests/distributor/distributortestutil.h>
#include <vespa/storage/distributor/distributor.h>
#include <tests/common/dummystoragelink.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/document/test/make_bucket_space.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
+#include <ostream>
using namespace document;
using namespace storage::api;
using namespace storage::lib;
-using namespace std::string_literals;
+using namespace ::testing;
using document::test::makeBucketSpace;
namespace storage::distributor {
-class VisitorOperationTest : public CppUnit::TestFixture,
- public DistributorTestUtil {
- CPPUNIT_TEST_SUITE(VisitorOperationTest);
- CPPUNIT_TEST(testParameterForwarding);
- CPPUNIT_TEST(testShutdown);
- CPPUNIT_TEST(testNoBucket);
- CPPUNIT_TEST(testOnlySuperBucketAndProgressAllowed);
- CPPUNIT_TEST(testRetiredStorageNode);
- CPPUNIT_TEST(testNoResendAfterTimeoutPassed);
- CPPUNIT_TEST(testDistributorNotReady);
- CPPUNIT_TEST(testInvalidOrderDocSelection);
- CPPUNIT_TEST(testNonExistingBucket);
- CPPUNIT_TEST(testUserSingleBucket);
- CPPUNIT_TEST(testUserInconsistentlySplitBucket);
- CPPUNIT_TEST(testBucketRemovedWhileVisitorPending);
- CPPUNIT_TEST(testEmptyBucketsVisitedWhenVisitingRemoves);
- CPPUNIT_TEST(testResendToOtherStorageNodeOnFailure);
- CPPUNIT_TEST(testTimeoutOnlyAfterReplyFromAllStorageNodes);
- CPPUNIT_TEST(testTimeoutDoesNotOverrideCriticalError);
- CPPUNIT_TEST(testWrongDistribution);
- CPPUNIT_TEST(testWrongDistributionInPendingState);
- CPPUNIT_TEST(testVisitorAbortedIfNodeIsMarkedAsDown);
- CPPUNIT_TEST(testBucketHighBitCount);
- CPPUNIT_TEST(testBucketLowBitCount);
- CPPUNIT_TEST(testParallelVisitorsToOneStorageNode);
- CPPUNIT_TEST(testParallelVisitorsResendOnlyFailing);
- CPPUNIT_TEST(testParallelVisitorsToOneStorageNodeOneSuperBucket);
- CPPUNIT_TEST(testVisitWhenOneBucketCopyIsInvalid);
- CPPUNIT_TEST(testVisitingWhenAllBucketsAreInvalid);
- CPPUNIT_TEST(testInconsistencyHandling);
- CPPUNIT_TEST(testVisitIdealNode);
- CPPUNIT_TEST(testNoResendingOnCriticalFailure);
- CPPUNIT_TEST(testFailureOnAllNodes);
- CPPUNIT_TEST(testVisitOrder);
- CPPUNIT_TEST(testVisitInChunks);
- CPPUNIT_TEST(testVisitOrderSplitPastOrderBits);
- CPPUNIT_TEST(testVisitOrderInconsistentlySplit);
- CPPUNIT_TEST(testUserVisitorOrder);
- CPPUNIT_TEST(testUserVisitorOrderSplitPastOrderBits);
- CPPUNIT_TEST(testNoClientReplyBeforeAllStorageRepliesReceived);
- CPPUNIT_TEST(testSkipFailedSubBucketsWhenVisitingInconsistent);
- CPPUNIT_TEST(testQueueTimeoutIsFactorOfTotalTimeout);
- CPPUNIT_TEST(metrics_are_updated_with_visitor_statistics_upon_replying);
- CPPUNIT_TEST(statistical_metrics_not_updated_on_wrong_distribution);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
- void testParameterForwarding();
- void testShutdown();
- void testNoBucket();
- void testOnlySuperBucketAndProgressAllowed();
- void testRetiredStorageNode();
- void testNoResendAfterTimeoutPassed();
- void testDistributorNotReady();
- void testInvalidOrderDocSelection();
- void testNonExistingBucket();
- void testUserSingleBucket();
- void testUserInconsistentlySplitBucket();
- void testBucketRemovedWhileVisitorPending();
- void testEmptyBucketsVisitedWhenVisitingRemoves();
- void testResendToOtherStorageNodeOnFailure();
- void testTimeoutOnlyAfterReplyFromAllStorageNodes();
- void testTimeoutDoesNotOverrideCriticalError();
- void testAbortNonExisting();
- void testAbort();
- void testWrongDistribution();
- void testWrongDistributionInPendingState();
- void testVisitorAbortedIfNodeIsMarkedAsDown();
- void testBucketHighBitCount();
- void testBucketLowBitCount();
- void testParallelVisitorsToOneStorageNode();
- void testParallelVisitorsResendOnlyFailing();
- void testParallelVisitorsToOneStorageNodeOneSuperBucket();
- void testVisitWhenOneBucketCopyIsInvalid();
- void testVisitingWhenAllBucketsAreInvalid();
- void testInconsistencyHandling();
- void testVisitIdealNode();
- void testNoResendingOnCriticalFailure();
- void testFailureOnAllNodes();
- void testVisitOrder();
- void testVisitInChunks();
- void testVisitOrderSplitPastOrderBits();
- void testVisitOrderInconsistentlySplit();
- void testUserVisitorOrder();
- void testUserVisitorOrderSplitPastOrderBits();
- void testUserVisitorOrderInconsistentlySplit();
- void testNoClientReplyBeforeAllStorageRepliesReceived();
- void testSkipFailedSubBucketsWhenVisitingInconsistent();
- void testQueueTimeoutIsFactorOfTotalTimeout();
- void metrics_are_updated_with_visitor_statistics_upon_replying();
- void statistical_metrics_not_updated_on_wrong_distribution();
-public:
+struct VisitorOperationTest : Test, DistributorTestUtil {
VisitorOperationTest()
: defaultConfig(100, 100)
{}
- void setUp() override {
+ void SetUp() override {
createLinks();
nullId = document::BucketId(0, 0);
- doneId = document::BucketId(INT_MAX);
};
- void tearDown() override {
+ void TearDown() override {
close();
}
enum {MAX_PENDING = 2};
-private:
+
document::BucketId nullId;
- document::BucketId doneId;
VisitorOperation::Config defaultConfig;
api::CreateVisitorCommand::SP
@@ -149,8 +55,8 @@ private:
document::OrderingSpecification::ASCENDING,
const std::string& docSelection = "")
{
- api::CreateVisitorCommand::SP cmd(
- new api::CreateVisitorCommand(makeBucketSpace(), libraryName, instanceId, docSelection));
+ auto cmd = std::make_shared<api::CreateVisitorCommand>(
+ makeBucketSpace(), libraryName, instanceId, docSelection);
cmd->setControlDestination("controldestination");
cmd->setDataDestination("datadestination");
cmd->setFieldSet("[header]");
@@ -176,13 +82,13 @@ private:
std::string
serializeVisitorCommand(int idx = -1) {
if (idx == -1) {
- idx = _sender.commands.size() - 1;
+ idx = _sender.commands().size() - 1;
}
std::ostringstream ost;
- CreateVisitorCommand* cvc = dynamic_cast<CreateVisitorCommand*>(
- _sender.commands[idx].get());
+ auto* cvc = dynamic_cast<CreateVisitorCommand*>(_sender.command(idx).get());
+ assert(cvc != nullptr);
ost << *cvc << " Buckets: [ ";
for (uint32_t i = 0; i < cvc->getBuckets().size(); ++i) {
@@ -225,9 +131,8 @@ private:
}
const std::vector<BucketId>& getBucketsFromLastCommand() {
- const CreateVisitorCommand& cvc(
- dynamic_cast<const CreateVisitorCommand&>(
- *_sender.commands[_sender.commands.size() - 1]));
+ const auto& cvc = dynamic_cast<const CreateVisitorCommand&>(
+ *_sender.commands().back());
return cvc.getBuckets();
}
@@ -236,7 +141,7 @@ private:
document::BucketId lastId,
uint32_t maxBuckets);
- std::string doOrderedVisitor(document::BucketId startBucket);
+ void doOrderedVisitor(document::BucketId startBucket, std::string& out);
void doStandardVisitTest(const std::string& clusterState);
@@ -246,11 +151,7 @@ private:
void do_visitor_roundtrip_with_statistics(const api::ReturnCode& result);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(VisitorOperationTest);
-
-void
-VisitorOperationTest::testParameterForwarding()
-{
+TEST_F(VisitorOperationTest, parameter_forwarding) {
doStandardVisitTest("distributor:1 storage:1");
}
@@ -267,11 +168,8 @@ VisitorOperationTest::doStandardVisitTest(const std::string& clusterState)
vespalib::string instanceId("testParameterForwarding");
vespalib::string libraryName("dumpvisitor");
vespalib::string docSelection("");
- api::CreateVisitorCommand::SP msg(
- new api::CreateVisitorCommand(makeBucketSpace(),
- libraryName,
- instanceId,
- docSelection));
+ auto msg = std::make_shared<api::CreateVisitorCommand>(
+ makeBucketSpace(), libraryName, instanceId, docSelection);
vespalib::string controlDestination("controldestination");
msg->setControlDestination(controlDestination);
vespalib::string dataDestination("datadestination");
@@ -291,41 +189,37 @@ VisitorOperationTest::doStandardVisitTest(const std::string& clusterState)
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
// Receive create visitor command for storage and simulate reply
- api::StorageMessage::SP rep0 = _sender.commands[0];
- CreateVisitorCommand* cvc = dynamic_cast<CreateVisitorCommand*>(rep0.get());
- CPPUNIT_ASSERT(cvc);
- CPPUNIT_ASSERT_EQUAL(libraryName, cvc->getLibraryName());
- CPPUNIT_ASSERT_EQUAL(instanceId, cvc->getInstanceId().substr(0, instanceId.length()));
- CPPUNIT_ASSERT_EQUAL(docSelection, cvc->getDocumentSelection());
- CPPUNIT_ASSERT_EQUAL(controlDestination, cvc->getControlDestination());
- CPPUNIT_ASSERT_EQUAL(dataDestination, cvc->getDataDestination());
- CPPUNIT_ASSERT_EQUAL((unsigned int) VisitorOperationTest::MAX_PENDING, cvc->getMaximumPendingReplyCount());
- CPPUNIT_ASSERT_EQUAL((unsigned int) 8, cvc->getMaxBucketsPerVisitor());
- CPPUNIT_ASSERT_EQUAL((size_t) 1, cvc->getBuckets().size());
- CPPUNIT_ASSERT_EQUAL((api::Timestamp) 10, cvc->getFromTime());
- CPPUNIT_ASSERT(cvc->getToTime() > 0);
- CPPUNIT_ASSERT_EQUAL(vespalib::string("[header]"), cvc->getFieldSet());
- CPPUNIT_ASSERT_EQUAL((bool) 1, cvc->visitRemoves());
- CPPUNIT_ASSERT_EQUAL(uint32_t(1234), cvc->getTimeout());
- CPPUNIT_ASSERT_EQUAL(uint32_t(7), cvc->getTrace().getLevel());
+ api::StorageMessage::SP rep0 = _sender.command(0);
+ auto* cvc = dynamic_cast<CreateVisitorCommand*>(rep0.get());
+ ASSERT_TRUE(cvc != nullptr);
+ EXPECT_EQ(libraryName, cvc->getLibraryName());
+ EXPECT_EQ(instanceId, cvc->getInstanceId().substr(0, instanceId.length()));
+ EXPECT_EQ(docSelection, cvc->getDocumentSelection());
+ EXPECT_EQ(controlDestination, cvc->getControlDestination());
+ EXPECT_EQ(dataDestination, cvc->getDataDestination());
+ EXPECT_EQ(VisitorOperationTest::MAX_PENDING, cvc->getMaximumPendingReplyCount());
+ EXPECT_EQ(8, cvc->getMaxBucketsPerVisitor());
+ EXPECT_EQ(1, cvc->getBuckets().size());
+ EXPECT_EQ(api::Timestamp(10), cvc->getFromTime());
+ EXPECT_GT(cvc->getToTime(), 0);
+ EXPECT_EQ("[header]", cvc->getFieldSet());
+ EXPECT_TRUE(cvc->visitRemoves());
+ EXPECT_EQ(1234, cvc->getTimeout());
+ EXPECT_EQ(7, cvc->getTrace().getLevel());
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(std::string("CreateVisitorReply("
- "last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), defaultVisitorMetrics().
- ok.getLongValue("count"));
+ ASSERT_EQ("CreateVisitorReply("
+ "last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
+ EXPECT_EQ(1, defaultVisitorMetrics().ok.getLongValue("count"));
}
-void
-VisitorOperationTest::testShutdown()
-{
+TEST_F(VisitorOperationTest, shutdown) {
enableDistributorClusterState("distributor:1 storage:1");
// Create bucket in bucketdb
@@ -336,11 +230,8 @@ VisitorOperationTest::testShutdown()
vespalib::string instanceId("testShutdown");
vespalib::string libraryName("dumpvisitor");
vespalib::string docSelection("");
- api::CreateVisitorCommand::SP msg(
- new api::CreateVisitorCommand(makeBucketSpace(),
- libraryName,
- instanceId,
- docSelection));
+ auto msg = std::make_shared<api::CreateVisitorCommand>(
+ makeBucketSpace(), libraryName, instanceId, docSelection);
msg->addBucketToBeVisited(id);
msg->addBucketToBeVisited(nullId);
@@ -348,36 +239,29 @@ VisitorOperationTest::testShutdown()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
op->onClose(_sender); // This will fail the visitor
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ABORTED, Process is shutting down)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Process is shutting down)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testNoBucket()
-{
+TEST_F(VisitorOperationTest, no_bucket) {
enableDistributorClusterState("distributor:1 storage:1");
// Send create visitor
- api::CreateVisitorCommand::SP msg(new api::CreateVisitorCommand(
- makeBucketSpace(), "dumpvisitor", "instance", ""));
+ auto msg = std::make_shared<api::CreateVisitorCommand>(
+ makeBucketSpace(), "dumpvisitor", "instance", "");
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ILLEGAL_PARAMETERS, No buckets in "
- "CreateVisitorCommand for visitor 'instance')"),
- runEmptyVisitor(msg));
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, No buckets in "
+ "CreateVisitorCommand for visitor 'instance')",
+ runEmptyVisitor(msg));
}
-void
-VisitorOperationTest::testOnlySuperBucketAndProgressAllowed()
-{
+TEST_F(VisitorOperationTest, only_super_bucket_and_progress_allowed) {
enableDistributorClusterState("distributor:1 storage:1");
// Send create visitor
@@ -387,23 +271,18 @@ VisitorOperationTest::testOnlySuperBucketAndProgressAllowed()
msg->addBucketToBeVisited(nullId);
msg->addBucketToBeVisited(nullId);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ILLEGAL_PARAMETERS, CreateVisitorCommand "
- "does not contain 2 buckets for visitor "
- "'instance')"),
- runEmptyVisitor(msg));
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, CreateVisitorCommand "
+ "does not contain 2 buckets for visitor "
+ "'instance')",
+ runEmptyVisitor(msg));
}
-void
-VisitorOperationTest::testRetiredStorageNode()
-{
+TEST_F(VisitorOperationTest, retired_storage_node) {
doStandardVisitTest("distributor:1 storage:1 .0.s:r");
}
-void
-VisitorOperationTest::testNoResendAfterTimeoutPassed()
-{
+TEST_F(VisitorOperationTest, no_resend_after_timeout_passed) {
document::BucketId id(uint64_t(0x400000000000007b));
enableDistributorClusterState("distributor:1 storage:2");
@@ -414,78 +293,57 @@ VisitorOperationTest::testNoResendAfterTimeoutPassed()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
getClock().addMilliSecondsToTime(22);
sendReply(*op, -1, api::ReturnCode::BUSY);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ABORTED, Timeout of 20 ms is running out)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Timeout of 20 ms is running out)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testDistributorNotReady()
-{
+TEST_F(VisitorOperationTest, distributor_not_ready) {
enableDistributorClusterState("distributor:0 storage:0");
document::BucketId id(uint64_t(0x400000000000007b));
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
"ReturnCode(NODE_NOT_READY, No distributors available when "
- "processing visitor 'notready')"),
- runEmptyVisitor(createVisitorCommand("notready", id, nullId)));
+ "processing visitor 'notready')",
+ runEmptyVisitor(createVisitorCommand("notready", id, nullId)));
}
// Distributor only parses selection if in the order doc case (which is detected
// by first checking if string contains "order" which it must to refer to
// "id.order" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-void
-VisitorOperationTest::testInvalidOrderDocSelection()
-{
+TEST_F(VisitorOperationTest, invalid_order_doc_selection) {
enableDistributorClusterState("distributor:1 storage:1");
document::BucketId id(0x400000000000007b);
addNodesToBucketDB(id, "0=1/1/1/t");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ILLEGAL_PARAMETERS, Failed to parse document select "
- "string 'id.order(10,3)=1 and dummy': Document type 'dummy' not "
- "found at column 22 when parsing selection 'id.order(10,3)=1 and dummy')"),
- runEmptyVisitor(
- createVisitorCommand("invalidOrderDoc",
- id,
- nullId,
- 8,
- 500,
- false,
- false,
- "dumpvisitor",
- document::OrderingSpecification::ASCENDING,
- "id.order(10,3)=1 and dummy")));
+ auto res = runEmptyVisitor(
+ createVisitorCommand("invalidOrderDoc", id, nullId, 8, 500,
+ false, false, "dumpvisitor",
+ document::OrderingSpecification::ASCENDING,
+ "id.order(10,3)=1 and dummy"));
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, Failed to parse document select "
+ "string 'id.order(10,3)=1 and dummy': Document type 'dummy' not "
+ "found at column 22 when parsing selection 'id.order(10,3)=1 and dummy')",
+ res);
+
}
-void
-VisitorOperationTest::testNonExistingBucket()
-{
+TEST_F(VisitorOperationTest, non_existing_bucket) {
document::BucketId id(uint64_t(0x400000000000007b));
enableDistributorClusterState("distributor:1 storage:1");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- runEmptyVisitor(
- createVisitorCommand("nonExistingBucket",
- id,
- nullId)));
+ auto res = runEmptyVisitor(
+ createVisitorCommand("nonExistingBucket", id, nullId));
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)", res);
}
-void
-VisitorOperationTest::testUserSingleBucket()
-{
+TEST_F(VisitorOperationTest, user_single_bucket) {
document::BucketId id(uint64_t(0x400000000000007b));
document::BucketId userid(uint64_t(0x800000000000007b));
enableDistributorClusterState("distributor:1 storage:1");
@@ -507,14 +365,11 @@ VisitorOperationTest::testUserSingleBucket()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL_MSG(_sender.getLastReply(),
- std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true)) << _sender.getLastReply();
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
std::pair<std::string, std::string>
@@ -546,9 +401,7 @@ VisitorOperationTest::runVisitor(document::BucketId id,
return retVal;
}
-void
-VisitorOperationTest::testUserInconsistentlySplitBucket()
-{
+TEST_F(VisitorOperationTest, user_inconsistently_split_bucket) {
enableDistributorClusterState("distributor:1 storage:1");
// Not containing (19, 0x40001)
@@ -575,27 +428,23 @@ VisitorOperationTest::testUserInconsistentlySplitBucket()
std::pair<std::string, std::string> val(
runVisitor(id, nullId, 100));
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorCommand(dumpvisitor, true, 7 buckets) "
- "Buckets: [ BucketId(0x4400000000000001) "
- "BucketId(0x4800000000000001) "
- "BucketId(0x4c00000000040001) "
- "BucketId(0x5000000000040001) "
- "BucketId(0x5400000000040001) "
- "BucketId(0x5400000000140001) "
- "BucketId(0x50000000000c0001) ]"),
- val.first);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- val.second);
+ EXPECT_EQ("CreateVisitorCommand(dumpvisitor, true, 7 buckets) "
+ "Buckets: [ BucketId(0x4400000000000001) "
+ "BucketId(0x4800000000000001) "
+ "BucketId(0x4c00000000040001) "
+ "BucketId(0x5000000000040001) "
+ "BucketId(0x5400000000040001) "
+ "BucketId(0x5400000000140001) "
+ "BucketId(0x50000000000c0001) ]",
+ val.first);
+
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ val.second);
}
}
-void
-VisitorOperationTest::testBucketRemovedWhileVisitorPending()
-{
+TEST_F(VisitorOperationTest, bucket_removed_while_visitor_pending) {
enableDistributorClusterState("distributor:1 storage:1");
// Create bucket in bucketdb
@@ -608,24 +457,19 @@ VisitorOperationTest::testBucketRemovedWhileVisitorPending()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
removeFromBucketDB(id);
sendReply(*op, -1, api::ReturnCode::NOT_CONNECTED);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"),
- _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), defaultVisitorMetrics().failures.
- inconsistent_bucket.getLongValue("count"));
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ _sender.getLastReply());
+ EXPECT_EQ(1, defaultVisitorMetrics().failures.inconsistent_bucket.getLongValue("count"));
}
-void
-VisitorOperationTest::testEmptyBucketsVisitedWhenVisitingRemoves()
-{
+TEST_F(VisitorOperationTest, empty_buckets_visited_when_visiting_removes) {
enableDistributorClusterState("distributor:1 storage:1");
document::BucketId id(uint64_t(0x400000000000007b));
addNodesToBucketDB(id, "0=0/0/0/1/2/t");
@@ -642,13 +486,10 @@ VisitorOperationTest::testEmptyBucketsVisitedWhenVisitingRemoves()
op->start(_sender, framework::MilliSecTime(0));
// Since visitRemoves is true, the empty bucket will be visited
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
}
-void
-VisitorOperationTest::testResendToOtherStorageNodeOnFailure()
-{
+TEST_F(VisitorOperationTest, resend_to_other_storage_node_on_failure) {
enableDistributorClusterState("distributor:1 storage:2");
document::BucketId id(uint64_t(0x400000000000007b));
@@ -659,23 +500,20 @@ VisitorOperationTest::testResendToOtherStorageNodeOnFailure()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
sendReply(*op, -1, api::ReturnCode::NOT_CONNECTED);
- CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies());
+ ASSERT_EQ("", _sender.getReplies());
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 1",
+ _sender.getCommands(true));
}
// Since MessageBus handles timeouts for us implicitly, we make the assumption
// that we can safely wait for all replies to be received before sending a
// client reply and that this won't cause things to hang for indeterminate
// amounts of time.
-void
-VisitorOperationTest::testTimeoutOnlyAfterReplyFromAllStorageNodes()
-{
+TEST_F(VisitorOperationTest, timeout_only_after_reply_from_all_storage_nodes) {
enableDistributorClusterState("distributor:1 storage:2");
// Contained in (16, 0x1)
@@ -690,33 +528,30 @@ VisitorOperationTest::testTimeoutOnlyAfterReplyFromAllStorageNodes()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 1",
+ _sender.getCommands(true));
getClock().addMilliSecondsToTime(501);
sendReply(*op, 0);
- CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies()); // No reply yet.
+ ASSERT_EQ("", _sender.getReplies()); // No reply yet.
sendReply(*op, 1, api::ReturnCode::BUSY);
- CPPUNIT_ASSERT_EQUAL(
- "CreateVisitorReply(last=BucketId(0x4400000000000001)) "
- "ReturnCode(ABORTED, Timeout of 500 ms is running out)"s,
- _sender.getLastReply());
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x4400000000000001)) "
+ "ReturnCode(ABORTED, Timeout of 500 ms is running out)",
+ _sender.getLastReply());
// XXX This is sub-optimal in the case that we time out but all storage
// visitors return OK, as we'll then be failing an operation that
// technically went fine. However, this is assumed to happen sufficiently
// rarely (requires timing to be so that mbus timouts don't happen for
// neither client -> distributor nor distributor -> storage for the
- // operation to possibly could have been considered successful) that we
+ // operation to possibly have been considered successful) that we
// don't bother to add complexity for handling it as a special case.
}
-void
-VisitorOperationTest::testTimeoutDoesNotOverrideCriticalError()
-{
+TEST_F(VisitorOperationTest, timeout_does_not_override_critical_error) {
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(17, 0x00001), "0=1/1/1/t");
addNodesToBucketDB(document::BucketId(17, 0x10001), "1=1/1/1/t");
@@ -729,41 +564,33 @@ VisitorOperationTest::testTimeoutDoesNotOverrideCriticalError()
500)); // ms timeout
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 1",
+ _sender.getCommands(true));
getClock().addMilliSecondsToTime(501);
// Technically has timed out at this point, but should still report the
// critical failure.
sendReply(*op, 0, api::ReturnCode::INTERNAL_FAILURE);
- CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies());
+ ASSERT_EQ("", _sender.getReplies());
sendReply(*op, 1, api::ReturnCode::BUSY);
- CPPUNIT_ASSERT_EQUAL(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(INTERNAL_FAILURE, [from content node 0] )"s,
- _sender.getLastReply());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), defaultVisitorMetrics().failures.
- storagefailure.getLongValue("count"));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(INTERNAL_FAILURE, [from content node 0] )",
+ _sender.getLastReply());
+ EXPECT_EQ(1, defaultVisitorMetrics().failures.storagefailure.getLongValue("count"));
}
-void
-VisitorOperationTest::testWrongDistribution()
-{
+TEST_F(VisitorOperationTest, wrong_distribution) {
setupDistributor(1, 100, "distributor:100 storage:2");
document::BucketId id(uint64_t(0x400000000000127b));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:2)"),
- runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
- CPPUNIT_ASSERT_EQUAL(int64_t(1), defaultVisitorMetrics().failures.
- wrongdistributor.getLongValue("count"));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:2)",
+ runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
+ EXPECT_EQ(1, defaultVisitorMetrics().failures.wrongdistributor.getLongValue("count"));
}
-void
-VisitorOperationTest::testWrongDistributionInPendingState()
-{
+TEST_F(VisitorOperationTest, wrong_distribution_in_pending_state) {
// Force bucket to belong to this distributor in currently enabled state.
setupDistributor(1, 100, "distributor:1 storage:2");
// Trigger pending cluster state. Note: increase in storage node count
@@ -773,10 +600,9 @@ VisitorOperationTest::testWrongDistributionInPendingState()
getBucketDBUpdater().onSetSystemState(stateCmd);
document::BucketId id(uint64_t(0x400000000000127b));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:3)"),
- runEmptyVisitor(createVisitorCommand("wrongdistpending", id, nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:3)",
+ runEmptyVisitor(createVisitorCommand("wrongdistpending", id, nullId)));
}
// If the current node state changes, this alters the node's cluster state
@@ -784,30 +610,24 @@ VisitorOperationTest::testWrongDistributionInPendingState()
// we cannot answer with WRONG_DISTRIBUTION as the client expects to see a
// higher version number.
// See ticket 6353382 for details.
-void
-VisitorOperationTest::testVisitorAbortedIfNodeIsMarkedAsDown()
-{
+TEST_F(VisitorOperationTest, visitor_aborted_if_node_is_marked_as_down) {
setupDistributor(1, 10, "distributor:10 .0.s:s storage:10");
document::BucketId id(uint64_t(0x400000000000127b));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ABORTED, Distributor is shutting down)"),
- runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ABORTED, Distributor is shutting down)",
+ runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
}
-void
-VisitorOperationTest::testBucketHighBitCount()
-{
+TEST_F(VisitorOperationTest, bucket_high_bit_count) {
enableDistributorClusterState("distributor:1 storage:1 bits:16");
document::BucketId id(18, 0x0);
addNodesToBucketDB(id, "0=1/1/1/t");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
- runEmptyVisitor(createVisitorCommand("buckethigbit", id, nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)",
+ runEmptyVisitor(createVisitorCommand("buckethigbit", id, nullId)));
auto op = createOpWithDefaultConfig(
createVisitorCommand("buckethighbitcount",
@@ -823,22 +643,18 @@ VisitorOperationTest::testBucketHighBitCount()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ EXPECT_EQ("Visitor Create => 0", _sender.getCommands(true));
}
-void
-VisitorOperationTest::testBucketLowBitCount()
-{
+TEST_F(VisitorOperationTest, bucket_low_bit_count) {
enableDistributorClusterState("distributor:1 storage:1 bits:16");
document::BucketId id(1, 0x0);
addNodesToBucketDB(id, "0=1/1/1/t");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
- runEmptyVisitor(createVisitorCommand("bucketlowbit", id, nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)",
+ runEmptyVisitor(createVisitorCommand("bucketlowbit", id, nullId)));
auto op = createOpWithDefaultConfig(
createVisitorCommand("buckethighbitcount",
@@ -853,15 +669,12 @@ VisitorOperationTest::testBucketLowBitCount()
"true"));
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:1 storage:1)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testParallelVisitorsToOneStorageNode()
-{
+TEST_F(VisitorOperationTest, parallel_visitors_to_one_storage_node) {
enableDistributorClusterState("distributor:1 storage:1");
// Create buckets in bucketdb
@@ -878,47 +691,42 @@ VisitorOperationTest::testParallelVisitorsToOneStorageNode()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
- "Visitor Create => 0,Visitor Create => 0"),
- _sender.getCommands(true));
-
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
- "BucketId(0x5400000000000001) BucketId(0x5400000000040001) "
- "BucketId(0x5400000000020001) BucketId(0x5400000000060001) "
- "BucketId(0x5400000000010001) BucketId(0x5400000000050001) "
- "BucketId(0x5400000000030001) BucketId(0x5400000000070001) ]"),
- serializeVisitorCommand(0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
- "BucketId(0x5400000000100001) BucketId(0x5400000000140001) "
- "BucketId(0x5400000000120001) BucketId(0x5400000000160001) "
- "BucketId(0x5400000000110001) BucketId(0x5400000000150001) "
- "BucketId(0x5400000000130001) BucketId(0x5400000000170001) ]"),
- serializeVisitorCommand(1));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
- "BucketId(0x5400000000080001) BucketId(0x54000000000c0001) "
- "BucketId(0x54000000000a0001) BucketId(0x54000000000e0001) "
- "BucketId(0x5400000000090001) BucketId(0x54000000000d0001) "
- "BucketId(0x54000000000b0001) BucketId(0x54000000000f0001) ]"),
- serializeVisitorCommand(2));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 7 buckets) Buckets: [ "
- "BucketId(0x5400000000180001) BucketId(0x54000000001c0001) "
- "BucketId(0x54000000001a0001) BucketId(0x54000000001e0001) "
- "BucketId(0x5400000000190001) BucketId(0x54000000001d0001) "
- "BucketId(0x54000000001b0001) ]"),
- serializeVisitorCommand(3));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0",
+ _sender.getCommands(true));
+
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000000001) BucketId(0x5400000000040001) "
+ "BucketId(0x5400000000020001) BucketId(0x5400000000060001) "
+ "BucketId(0x5400000000010001) BucketId(0x5400000000050001) "
+ "BucketId(0x5400000000030001) BucketId(0x5400000000070001) ]",
+ serializeVisitorCommand(0));
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000100001) BucketId(0x5400000000140001) "
+ "BucketId(0x5400000000120001) BucketId(0x5400000000160001) "
+ "BucketId(0x5400000000110001) BucketId(0x5400000000150001) "
+ "BucketId(0x5400000000130001) BucketId(0x5400000000170001) ]",
+ serializeVisitorCommand(1));
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000080001) BucketId(0x54000000000c0001) "
+ "BucketId(0x54000000000a0001) BucketId(0x54000000000e0001) "
+ "BucketId(0x5400000000090001) BucketId(0x54000000000d0001) "
+ "BucketId(0x54000000000b0001) BucketId(0x54000000000f0001) ]",
+ serializeVisitorCommand(2));
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 7 buckets) Buckets: [ "
+ "BucketId(0x5400000000180001) BucketId(0x54000000001c0001) "
+ "BucketId(0x54000000001a0001) BucketId(0x54000000001e0001) "
+ "BucketId(0x5400000000190001) BucketId(0x54000000001d0001) "
+ "BucketId(0x54000000001b0001) ]",
+ serializeVisitorCommand(3));
for (uint32_t i = 0; i < 4; ++i) {
sendReply(*op, i);
}
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
_sender.clear();
@@ -930,20 +738,16 @@ VisitorOperationTest::testParallelVisitorsToOneStorageNode()
op2->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
sendReply(*op2);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testParallelVisitorsResendOnlyFailing()
-{
+TEST_F(VisitorOperationTest, parallel_visitors_resend_only_failing) {
enableDistributorClusterState("distributor:1 storage:2");
// Create buckets in bucketdb
@@ -962,32 +766,29 @@ VisitorOperationTest::testParallelVisitorsResendOnlyFailing()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
- "Visitor Create => 0,Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0",
+ _sender.getCommands(true));
for (uint32_t i = 0; i < 2; ++i) {
sendReply(*op, i, api::ReturnCode::NOT_CONNECTED);
}
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 0,"
- "Visitor Create => 0,Visitor Create => 0,"
- "Visitor Create => 1,Visitor Create => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 0,Visitor Create => 0,"
+ "Visitor Create => 1,Visitor Create => 1",
+ _sender.getCommands(true));
for (uint32_t i = 2; i < 6; ++i) {
sendReply(*op, i);
}
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x54000000000f0001)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testParallelVisitorsToOneStorageNodeOneSuperBucket()
-{
+TEST_F(VisitorOperationTest, parallel_visitors_to_one_storage_node_one_super_bucket) {
enableDistributorClusterState("distributor:1 storage:1");
// Create buckets in bucketdb
@@ -1004,74 +805,56 @@ VisitorOperationTest::testParallelVisitorsToOneStorageNodeOneSuperBucket()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
- "BucketId(0x8c000000e3362b6a) BucketId(0x8c000004e3362b6a) "
- "BucketId(0x8c000002e3362b6a) BucketId(0x8c000006e3362b6a) "
- "BucketId(0x8c000001e3362b6a) BucketId(0x8c000005e3362b6a) "
- "BucketId(0x8c000003e3362b6a) BucketId(0x8c000007e3362b6a) ]"),
- serializeVisitorCommand(0));
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x8c000000e3362b6a) BucketId(0x8c000004e3362b6a) "
+ "BucketId(0x8c000002e3362b6a) BucketId(0x8c000006e3362b6a) "
+ "BucketId(0x8c000001e3362b6a) BucketId(0x8c000005e3362b6a) "
+ "BucketId(0x8c000003e3362b6a) BucketId(0x8c000007e3362b6a) ]",
+ serializeVisitorCommand(0));
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testVisitWhenOneBucketCopyIsInvalid()
-{
+TEST_F(VisitorOperationTest, visit_when_one_bucket_copy_is_invalid) {
enableDistributorClusterState("distributor:1 storage:2");
document::BucketId id(16, 0);
addNodesToBucketDB(id, "0=100,1=0/0/1");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"),
- runEmptyVisitor(createVisitorCommand("incompletehandling",
- id,
- nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ runEmptyVisitor(createVisitorCommand("incompletehandling", id, nullId)));
}
-void
-VisitorOperationTest::testVisitingWhenAllBucketsAreInvalid()
-{
+TEST_F(VisitorOperationTest, visiting_when_all_buckets_are_invalid) {
enableDistributorClusterState("distributor:1 storage:2");
document::BucketId id(16, 0);
addNodesToBucketDB(id, "0=0/0/1,1=0/0/1");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"),
- runEmptyVisitor(createVisitorCommand("allincompletehandling",
- id,
- nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ runEmptyVisitor(createVisitorCommand("allincompletehandling", id, nullId)));
}
-void
-VisitorOperationTest::testInconsistencyHandling()
-{
+TEST_F(VisitorOperationTest, inconsistency_handling) {
enableDistributorClusterState("distributor:1 storage:2");
document::BucketId id(16, 0);
addNodesToBucketDB(id, "0=1/1/1,1=2/2/2");
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"),
- runEmptyVisitor(createVisitorCommand("testinconsistencyhandling",
- id,
- nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ runEmptyVisitor(createVisitorCommand("testinconsistencyhandling", id, nullId)));
_sender.clear();
auto op = createOpWithConfig(
@@ -1080,20 +863,16 @@ VisitorOperationTest::testInconsistencyHandling()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 1", _sender.getCommands(true));
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testVisitIdealNode()
-{
+TEST_F(VisitorOperationTest, visit_ideal_node) {
ClusterState state("distributor:1 storage:3");
_distributor->enableClusterStateBundle(lib::ClusterStateBundle(state));
@@ -1109,28 +888,23 @@ VisitorOperationTest::testVisitIdealNode()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
- "BucketId(0x5400000000000001) BucketId(0x5400000000100001) "
- "BucketId(0x5400000000080001) BucketId(0x5400000000180001) "
- "BucketId(0x5400000000040001) BucketId(0x5400000000140001) "
- "BucketId(0x54000000000c0001) BucketId(0x54000000001c0001) ]"),
- serializeVisitorCommand(0));
+ ASSERT_EQ("CreateVisitorCommand(dumpvisitor, , 8 buckets) Buckets: [ "
+ "BucketId(0x5400000000000001) BucketId(0x5400000000100001) "
+ "BucketId(0x5400000000080001) BucketId(0x5400000000180001) "
+ "BucketId(0x5400000000040001) BucketId(0x5400000000140001) "
+ "BucketId(0x54000000000c0001) BucketId(0x54000000001c0001) ]",
+ serializeVisitorCommand(0));
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x54000000001c0001)) "
- "ReturnCode(NONE)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x54000000001c0001)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testNoResendingOnCriticalFailure()
-{
+TEST_F(VisitorOperationTest, no_resending_on_critical_failure) {
enableDistributorClusterState("distributor:1 storage:3");
// Create buckets in bucketdb
@@ -1145,20 +919,16 @@ VisitorOperationTest::testNoResendingOnCriticalFailure()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
sendReply(*op, -1, api::ReturnCode::ILLEGAL_PARAMETERS);
- CPPUNIT_ASSERT_EQUAL(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(ILLEGAL_PARAMETERS, [from content node 0] )"s,
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(ILLEGAL_PARAMETERS, [from content node 0] )",
+ _sender.getLastReply());
}
-void
-VisitorOperationTest::testFailureOnAllNodes()
-{
+TEST_F(VisitorOperationTest, failure_on_all_nodes) {
enableDistributorClusterState("distributor:1 storage:3");
// Create buckets in bucketdb
@@ -1173,29 +943,23 @@ VisitorOperationTest::testFailureOnAllNodes()
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
sendReply(*op, -1, api::ReturnCode::NOT_CONNECTED);
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0,Visitor Create => 1"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0,Visitor Create => 1", _sender.getCommands(true));
sendReply(*op, -1, api::ReturnCode::NOT_CONNECTED);
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"),
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ _sender.getLastReply());
// TODO it'd be much more accurate to increase the "notconnected" metric
// here, but our metrics are currently based on the reply sent back to the
// client, not the ones sent from the content nodes to the distributor.
}
-
-void
-VisitorOperationTest::testVisitOrder()
-{
+TEST_F(VisitorOperationTest, visit_order) {
std::vector<document::BucketId> buckets;
document::BucketId id000(35, 0x0000004d2);
@@ -1211,43 +975,28 @@ VisitorOperationTest::testVisitOrder()
buckets.end(),
VisitorOrder(document::OrderingSpecification(
document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
-
- CPPUNIT_ASSERT_EQUAL(buckets[0], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
+ EXPECT_THAT(buckets, ElementsAre(id000, id001, id01, id1));
std::sort(buckets.begin(),
buckets.end(),
VisitorOrder(document::OrderingSpecification(
document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id000);
+ EXPECT_THAT(buckets, ElementsAre(id1, id01, id001, id000));
std::sort(buckets.begin(),
buckets.end(),
VisitorOrder(document::OrderingSpecification(
document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
+ EXPECT_THAT(buckets, ElementsAre(id01, id1, id000, id001));
std::sort(buckets.begin(),
buckets.end(),
VisitorOrder(document::OrderingSpecification(
document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
+ EXPECT_THAT(buckets, ElementsAre(id01, id001, id000, id1));
}
-void
-VisitorOperationTest::testVisitInChunks()
-{
+TEST_F(VisitorOperationTest, visit_in_chunks) {
enableDistributorClusterState("distributor:1 storage:1");
for (int i = 0; i < 9; ++i) {
@@ -1257,48 +1006,40 @@ VisitorOperationTest::testVisitInChunks()
document::BucketId id(16, 0);
std::pair<std::string, std::string> val(runVisitor(id, nullId, 3));
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
- "Buckets: [ BucketId(0x7800000000000000) "
- "BucketId(0x7800000000080000) "
- "BucketId(0x7800000000040000) ]"),
- val.first);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x7800000000040000)) "
- "ReturnCode(NONE)"),
- val.second);
+ EXPECT_EQ("CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000000000) "
+ "BucketId(0x7800000000080000) "
+ "BucketId(0x7800000000040000) ]",
+ val.first);
+
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x7800000000040000)) "
+ "ReturnCode(NONE)",
+ val.second);
val = runVisitor(id, document::BucketId(0x7800000000040000), 3);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
- "Buckets: [ BucketId(0x7800000000020000) "
- "BucketId(0x7800000000060000) "
- "BucketId(0x7800000000010000) ]"),
- val.first);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x7800000000010000)) "
- "ReturnCode(NONE)"),
- val.second);
+ EXPECT_EQ("CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000020000) "
+ "BucketId(0x7800000000060000) "
+ "BucketId(0x7800000000010000) ]",
+ val.first);
+
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x7800000000010000)) "
+ "ReturnCode(NONE)",
+ val.second);
val = runVisitor(id, document::BucketId(0x7800000000010000), 3);
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
- "Buckets: [ BucketId(0x7800000000050000) "
- "BucketId(0x7800000000030000) "
- "BucketId(0x7800000000070000) ]"),
- val.first);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"),
- val.second);
+ EXPECT_EQ("CreateVisitorCommand(dumpvisitor, true, 3 buckets) "
+ "Buckets: [ BucketId(0x7800000000050000) "
+ "BucketId(0x7800000000030000) "
+ "BucketId(0x7800000000070000) ]",
+ val.first);
+
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ val.second);
}
-void
-VisitorOperationTest::testVisitOrderSplitPastOrderBits()
-{
+TEST_F(VisitorOperationTest, visit_order_split_past_order_bits) {
std::vector<document::BucketId> buckets;
document::BucketId max(INT_MAX);
@@ -1316,46 +1057,24 @@ VisitorOperationTest::testVisitOrderSplitPastOrderBits()
document::BucketId null(0, 0);
buckets.push_back(null);
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id0000);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id00000);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id00001);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id0000);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id00000);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id00001);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id0000);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id00000);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id00001);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id0000);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id00000);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id00001);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, id0000, id00000, id00001, id01, id1, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, id1, id01, id0000, id00000, id00001, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, id01, id1, id0000, id00000, id00001, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, id01, id0000, id00000, id00001, id1, max));
}
-void
-VisitorOperationTest::testVisitOrderInconsistentlySplit()
-{
+TEST_F(VisitorOperationTest, visit_order_inconsistently_split) {
std::vector<document::BucketId> buckets;
document::BucketId max(INT_MAX);
@@ -1373,45 +1092,25 @@ VisitorOperationTest::testVisitOrderInconsistentlySplit()
document::BucketId null(0, 0);
buckets.push_back(null);
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
-
- std::sort(buckets.begin(), buckets.end(), VisitorOrder(document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
- CPPUNIT_ASSERT_EQUAL(buckets[0], null);
- CPPUNIT_ASSERT_EQUAL(buckets[1], idsuper);
- CPPUNIT_ASSERT_EQUAL(buckets[2], id01);
- CPPUNIT_ASSERT_EQUAL(buckets[3], id001);
- CPPUNIT_ASSERT_EQUAL(buckets[4], id000);
- CPPUNIT_ASSERT_EQUAL(buckets[5], id1);
- CPPUNIT_ASSERT_EQUAL(buckets[6], max);
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x0, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, idsuper, id000, id001, id01, id1, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0xFF, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, idsuper, id1, id01, id001, id000, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::ASCENDING, 0x14, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, idsuper, id01, id1, id000, id001, max));
+
+ std::sort(buckets.begin(), buckets.end(), VisitorOrder(
+ document::OrderingSpecification(document::OrderingSpecification::DESCENDING, 0x14, 6, 2)));
+ EXPECT_THAT(buckets, ElementsAre(null, idsuper, id01, id001, id000, id1, max));
}
-std::string
-VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket)
+void
+VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket, std::string& out)
{
std::vector<document::BucketId> buckets;
@@ -1434,13 +1133,12 @@ VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket)
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
- for (uint32_t i = 0; i < _sender.commands.size(); ++i) {
+ for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
const api::CreateVisitorCommand cmd(
static_cast<const api::CreateVisitorCommand&>(
- *_sender.commands[i]));
+ *_sender.command(i)));
for (uint32_t j = 0; j < cmd.getBuckets().size(); ++j) {
buckets.push_back(cmd.getBuckets()[j]);
@@ -1449,10 +1147,9 @@ VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket)
sendReply(*op);
- CPPUNIT_ASSERT_EQUAL(1, (int)_sender.replies.size());
+ ASSERT_EQ(1, _sender.replies().size());
- const api::CreateVisitorReply& reply(
- static_cast<const api::CreateVisitorReply&>(*_sender.replies[0]));
+ auto& reply = dynamic_cast<const api::CreateVisitorReply&>(*_sender.reply(0));
if (reply.getLastBucket() == document::BucketId(0x000000007fffffff)) {
break;
@@ -1464,12 +1161,10 @@ VisitorOperationTest::doOrderedVisitor(document::BucketId startBucket)
ost << buckets[i] << "\n";
}
- return ost.str();
+ out = ost.str();
}
-void
-VisitorOperationTest::testUserVisitorOrder()
-{
+TEST_F(VisitorOperationTest, user_visitor_order) {
enableDistributorClusterState("distributor:1 storage:1");
// Create buckets in bucketdb
@@ -1489,16 +1184,16 @@ VisitorOperationTest::testUserVisitorOrder()
document::BucketId id(16, 0x04d2);
- CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x88000002000004d2)\n"
- "BucketId(0x8c000004000004d2)\n"
- "BucketId(0x8c000000000004d2)\n"
- "BucketId(0x84000001000004d2)\n"),
- doOrderedVisitor(id));
+ std::string res;
+ ASSERT_NO_FATAL_FAILURE(doOrderedVisitor(id, res));
+ EXPECT_EQ("BucketId(0x88000002000004d2)\n"
+ "BucketId(0x8c000004000004d2)\n"
+ "BucketId(0x8c000000000004d2)\n"
+ "BucketId(0x84000001000004d2)\n",
+ res);
}
-void
-VisitorOperationTest::testUserVisitorOrderSplitPastOrderBits()
-{
+TEST_F(VisitorOperationTest, user_visitor_order_split_past_order_bits) {
enableDistributorClusterState("distributor:1 storage:1");
// Create buckets in bucketdb
@@ -1519,12 +1214,14 @@ VisitorOperationTest::testUserVisitorOrderSplitPastOrderBits()
document::BucketId id(16, 0x04d2);
- CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x88000002000004d2)\n"
- "BucketId(0x90000000000004d2)\n"
- "BucketId(0x94000000000004d2)\n"
- "BucketId(0x94000010000004d2)\n"
- "BucketId(0x84000001000004d2)\n"),
- doOrderedVisitor(id));
+ std::string res;
+ ASSERT_NO_FATAL_FAILURE(doOrderedVisitor(id, res));
+ EXPECT_EQ("BucketId(0x88000002000004d2)\n"
+ "BucketId(0x90000000000004d2)\n"
+ "BucketId(0x94000000000004d2)\n"
+ "BucketId(0x94000010000004d2)\n"
+ "BucketId(0x84000001000004d2)\n",
+ res);
}
std::unique_ptr<VisitorOperation>
@@ -1533,8 +1230,7 @@ VisitorOperationTest::startOperationWith2StorageNodeVisitors(bool inconsistent)
enableDistributorClusterState("distributor:1 storage:3");
addNodesToBucketDB(document::BucketId(17, 1), "0=1/1/1/t");
- addNodesToBucketDB(document::BucketId(17, 1 << 16 | 1),
- "1=1/1/1/t");
+ addNodesToBucketDB(document::BucketId(17, 1ULL << 16 | 1), "1=1/1/1/t");
document::BucketId id(16, 1);
auto op = createOpWithDefaultConfig(
@@ -1548,57 +1244,48 @@ VisitorOperationTest::startOperationWith2StorageNodeVisitors(bool inconsistent)
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL("Visitor Create => 0,Visitor Create => 1"s,
- _sender.getCommands(true));
+ assert(_sender.getCommands(true) == "Visitor Create => 0,Visitor Create => 1");
return op;
}
-void
-VisitorOperationTest::testNoClientReplyBeforeAllStorageRepliesReceived()
-{
+TEST_F(VisitorOperationTest, no_client_reply_before_all_storage_replies_received) {
auto op = startOperationWith2StorageNodeVisitors(false);
sendReply(*op, 0, api::ReturnCode::BUSY);
// We don't want to see a reply here until the other node has replied.
- CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies(true));
+ ASSERT_EQ("", _sender.getReplies(true));
// OK reply from 1, but have to retry from client anyhow since one of
// the sub buckets failed to be processed and we don't have inconsistent
// visiting set in the client visitor command.
sendReply(*op, 1);
- CPPUNIT_ASSERT_EQUAL(
- "CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(BUCKET_NOT_FOUND)"s,
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(BUCKET_NOT_FOUND)",
+ _sender.getLastReply());
// XXX we should consider wether we want BUSY to be returned instead.
// Non-critical error codes are currently converted to a generic "not found"
// code to let the client silently retry until the bucket has hopefully
// become consistent/available.
}
-void
-VisitorOperationTest::testSkipFailedSubBucketsWhenVisitingInconsistent()
-{
+TEST_F(VisitorOperationTest, skip_failed_sub_buckets_when_visiting_inconsistent) {
auto op = startOperationWith2StorageNodeVisitors(true);
sendReply(*op, 0, api::ReturnCode::BUSY);
- CPPUNIT_ASSERT_EQUAL(""s, _sender.getReplies(true));
+ ASSERT_EQ("", _sender.getReplies(true));
// Subset of buckets could not be visited, but visit inconsistent flag is
// set in the client visitor so we treat it as a success anyway. In this
// case we've expanded the entire superbucket sub-tree so return with magic
// number to signify this.
sendReply(*op, 1);
- CPPUNIT_ASSERT_EQUAL(
- "CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
- "ReturnCode(NONE)"s,
- _sender.getLastReply());
+ EXPECT_EQ("CreateVisitorReply(last=BucketId(0x000000007fffffff)) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply());
}
// By default, queue timeout should be half of remaining visitor time. This
// is a highly un-scientific heuristic, but seems rather more reasonable than
// having it hard-coded to 2000 ms as was the case earlier.
-void
-VisitorOperationTest::testQueueTimeoutIsFactorOfTotalTimeout()
-{
+TEST_F(VisitorOperationTest, queue_timeout_is_factor_of_total_timeout) {
document::BucketId id(uint64_t(0x400000000000007b));
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(id, "0=1/1/1/t,1=1/1/1/t");
@@ -1607,11 +1294,10 @@ VisitorOperationTest::testQueueTimeoutIsFactorOfTotalTimeout()
createVisitorCommand("foo", id, nullId, 8, 10000));
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
- auto& cmd(dynamic_cast<CreateVisitorCommand&>(*_sender.commands[0]));
- CPPUNIT_ASSERT_EQUAL(uint32_t(5000), cmd.getQueueTimeout());
+ auto& cmd = dynamic_cast<CreateVisitorCommand&>(*_sender.command(0));
+ EXPECT_EQ(5000, cmd.getQueueTimeout());
}
void
@@ -1626,49 +1312,43 @@ VisitorOperationTest::do_visitor_roundtrip_with_statistics(
createVisitorCommand("metricstats", id, nullId));
op->start(_sender, framework::MilliSecTime(0));
- CPPUNIT_ASSERT_EQUAL(std::string("Visitor Create => 0"),
- _sender.getCommands(true));
- auto& cmd(dynamic_cast<CreateVisitorCommand&>(*_sender.commands[0]));
+ ASSERT_EQ("Visitor Create => 0", _sender.getCommands(true));
+ auto& cmd = dynamic_cast<CreateVisitorCommand&>(*_sender.command(0));
auto reply = cmd.makeReply();
vdslib::VisitorStatistics stats;
stats.setBucketsVisited(50);
stats.setDocumentsVisited(100);
stats.setBytesVisited(2000);
- static_cast<CreateVisitorReply&>(*reply).setVisitorStatistics(stats);
+ dynamic_cast<CreateVisitorReply&>(*reply).setVisitorStatistics(stats);
reply->setResult(result);
op->receive(_sender, api::StorageReply::SP(std::move(reply)));
}
-void
-VisitorOperationTest::metrics_are_updated_with_visitor_statistics_upon_replying()
-{
- do_visitor_roundtrip_with_statistics(api::ReturnCode(api::ReturnCode::OK));
+TEST_F(VisitorOperationTest, metrics_are_updated_with_visitor_statistics_upon_replying) {
+ ASSERT_NO_FATAL_FAILURE(do_visitor_roundtrip_with_statistics(api::ReturnCode(api::ReturnCode::OK)));
- CPPUNIT_ASSERT_EQUAL(int64_t(50), defaultVisitorMetrics().buckets_per_visitor.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(100), defaultVisitorMetrics().docs_per_visitor.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(2000), defaultVisitorMetrics().bytes_per_visitor.getLast());
+ EXPECT_EQ(50, defaultVisitorMetrics().buckets_per_visitor.getLast());
+ EXPECT_EQ(100, defaultVisitorMetrics().docs_per_visitor.getLast());
+ EXPECT_EQ(2000, defaultVisitorMetrics().bytes_per_visitor.getLast());
}
-void
-VisitorOperationTest::statistical_metrics_not_updated_on_wrong_distribution()
-{
+TEST_F(VisitorOperationTest, statistical_metrics_not_updated_on_wrong_distribution) {
setupDistributor(1, 100, "distributor:100 storage:2");
document::BucketId id(uint64_t(0x400000000000127b));
- CPPUNIT_ASSERT_EQUAL(
- std::string("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
- "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:2)"),
- runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
+ ASSERT_EQ("CreateVisitorReply(last=BucketId(0x0000000000000000)) "
+ "ReturnCode(WRONG_DISTRIBUTION, distributor:100 storage:2)",
+ runEmptyVisitor(createVisitorCommand("wrongdist", id, nullId)));
// Note that we're testing the number of _times_ the metric has been
// updated, not the value with which it's been updated (which would be zero
// even in the case we actually did update the statistical metrics).
- CPPUNIT_ASSERT_EQUAL(int64_t(0), defaultVisitorMetrics().buckets_per_visitor.getCount());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), defaultVisitorMetrics().docs_per_visitor.getCount());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), defaultVisitorMetrics().bytes_per_visitor.getCount());
+ EXPECT_EQ(0, defaultVisitorMetrics().buckets_per_visitor.getCount());
+ EXPECT_EQ(0, defaultVisitorMetrics().docs_per_visitor.getCount());
+ EXPECT_EQ(0, defaultVisitorMetrics().bytes_per_visitor.getCount());
// Fascinating that count is also a double...
- CPPUNIT_ASSERT_EQUAL(0.0, defaultVisitorMetrics().latency.getCount());
+ EXPECT_DOUBLE_EQ(0.0, defaultVisitorMetrics().latency.getCount());
}
}
diff --git a/storage/src/tests/gtest_runner.cpp b/storage/src/tests/gtest_runner.cpp
deleted file mode 100644
index 0eb65b00d49..00000000000
--- a/storage/src/tests/gtest_runner.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/gtest/gtest.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("storage_gtest_runner");
-
-GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt
index 76361e1d419..1e25f38fe2a 100644
--- a/storage/src/tests/persistence/CMakeLists.txt
+++ b/storage/src/tests/persistence/CMakeLists.txt
@@ -15,7 +15,6 @@ vespa_add_executable(storage_persistence_gtest_runner_app TEST
gtest_runner.cpp
DEPENDS
storage
- storage_testdistributor
storage_testpersistence_common
gtest
)
diff --git a/storage/src/tests/persistence/bucketownershipnotifiertest.cpp b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
index f4820f3ff13..47b1c43ca63 100644
--- a/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
+++ b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
@@ -2,7 +2,7 @@
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/persistence/bucketownershipnotifier.h>
-#include <tests/distributor/messagesenderstub.h>
+#include <tests/common/message_sender_stub.h>
#include <tests/common/teststorageapp.h>
#include <vespa/vespalib/gtest/gtest.h>
diff --git a/storage/src/tests/persistence/common/filestortestfixture.h b/storage/src/tests/persistence/common/filestortestfixture.h
index a8c32a409ec..85cec16a546 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.h
+++ b/storage/src/tests/persistence/common/filestortestfixture.h
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/common/testhelper.h>
#include <vespa/persistence/spi/persistenceprovider.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index f6b8fc3b3f0..7db47572e22 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -1,6 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/testhelper.h> // FIXME
+#include <tests/common/testhelper.h>
#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/persistence/filestorage/forwardingmessagesender.h>
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 0c291a179ae..8dedf3f18df 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -4,7 +4,7 @@
#include <vespa/storage/persistence/mergehandler.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
-#include <tests/distributor/messagesenderstub.h>
+#include <tests/common/message_sender_stub.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <gmock/gmock.h>
diff --git a/storage/src/tests/storageserver/fnet_listener_test.cpp b/storage/src/tests/storageserver/fnet_listener_test.cpp
index b9f2ca74df3..f82af1f8e5c 100644
--- a/storage/src/tests/storageserver/fnet_listener_test.cpp
+++ b/storage/src/tests/storageserver/fnet_listener_test.cpp
@@ -8,7 +8,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vespalib/stllike/asciistream.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.hpp>
+#include <vespa/vdstestlib/config/dirconfig.hpp>
#include <vespa/messagebus/testlib/slobrok.h>
#include <tests/common/testhelper.h>
#include <vespa/vespalib/gtest/gtest.h>
diff --git a/storage/src/tests/storageserver/service_layer_error_listener_test.cpp b/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
index dc5324c00e3..ec38e09a673 100644
--- a/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
+++ b/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/storage/storageserver/service_layer_error_listener.h>
#include <vespa/storage/storageserver/mergethrottler.h>
#include <vespa/storageframework/defaultimplementation/component/componentregisterimpl.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/config/dirconfig.h>
#include <tests/common/testhelper.h>
#include <tests/common/teststorageapp.h>
#include <vespa/vespalib/gtest/gtest.h>
diff --git a/storage/src/tests/testrunner.cpp b/storage/src/tests/testrunner.cpp
deleted file mode 100644
index 9f871997873..00000000000
--- a/storage/src/tests/testrunner.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("storagecppunittests");
-
-int
-main(int argc, const char *argv[])
-{
- vdstestlib::CppUnitTestRunner testRunner;
- return testRunner.run(argc, argv);
-}
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index 8c84fef47b5..a58767164a7 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -10,7 +10,7 @@
namespace storage {
namespace distributor {
- class Distributor_Test;
+struct DistributorTest;
}
class DistributorConfiguration {
@@ -242,6 +242,8 @@ public:
void setAllowStaleReadsDuringClusterStateTransitions(bool allow) noexcept {
_allowStaleReadsDuringClusterStateTransitions = allow;
}
+
+ bool containsTimeStatement(const std::string& documentSelection) const;
private:
DistributorConfiguration(const DistributorConfiguration& other);
@@ -285,9 +287,7 @@ private:
DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode;
- friend class distributor::Distributor_Test;
-
- bool containsTimeStatement(const std::string& documentSelection) const;
+ friend struct distributor::DistributorTest;
void configureMaintenancePriorities(
const vespa::config::content::core::StorDistributormanagerConfig&);
};
diff --git a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
index d349164e8ed..124e8c607ee 100644
--- a/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
+++ b/storage/src/vespa/storage/distributor/bucketdbupdater.cpp
@@ -6,6 +6,7 @@
#include "distributor_bucket_space.h"
#include "distributormetricsset.h"
#include "simpleclusterinformation.h"
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/storage/common/bucketoperationlogger.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/removelocation.h>
@@ -142,7 +143,8 @@ BucketDBUpdater::removeSuperfluousBuckets(
if (!is_distribution_config_change
&& db_pruning_may_be_elided(oldClusterState, *new_cluster_state, up_states))
{
- LOG(debug, "Eliding DB pruning for state transition '%s' -> '%s'",
+ LOG(info, "[bucket space '%s']: eliding DB pruning for state transition '%s' -> '%s'",
+ document::FixedBucketSpaces::to_string(elem.first).data(),
oldClusterState.toString().c_str(), new_cluster_state->toString().c_str());
continue;
}
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index 96359bcec60..424ac0e7a78 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -169,14 +169,7 @@ public:
return *_readOnlyBucketSpaceRepo;
}
-private:
- friend class Distributor_Test;
- friend class BucketDBUpdaterTest;
- friend class DistributorTestUtil;
- friend class ExternalOperationHandler_Test;
- friend class Operation_Test;
- friend class MetricUpdateHook;
-
+ class Status;
class MetricUpdateHook : public framework::MetricUpdateHook
{
public:
@@ -193,6 +186,12 @@ private:
Distributor& _self;
};
+private:
+ friend struct DistributorTest;
+ friend class BucketDBUpdaterTest;
+ friend class DistributorTestUtil;
+ friend class MetricUpdateHook;
+
void setNodeStateUp();
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
@@ -292,7 +291,6 @@ private:
framework::TickingThreadPool& _threadPool;
vespalib::Monitor _statusMonitor;
- class Status;
mutable std::vector<std::shared_ptr<Status>> _statusToDo;
mutable std::vector<std::shared_ptr<Status>> _fetchedStatusRequests;
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index 5ae5d8dc3f8..eea6db8c782 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -32,7 +32,8 @@ IdealStateManager::IdealStateManager(
: HtmlStatusReporter("idealstateman", "Ideal state manager"),
_metrics(new IdealStateMetricSet),
_distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Ideal state manager"),
- _bucketSpaceRepo(bucketSpaceRepo)
+ _bucketSpaceRepo(bucketSpaceRepo),
+ _has_logged_phantom_replica_warning(false)
{
_distributorComponent.registerStatusPage(*this);
_distributorComponent.registerMetric(*_metrics);
@@ -52,9 +53,7 @@ IdealStateManager::IdealStateManager(
_stateCheckers.push_back(StateChecker::SP(new GarbageCollectionStateChecker()));
}
-IdealStateManager::~IdealStateManager()
-{
-}
+IdealStateManager::~IdealStateManager() = default;
void
IdealStateManager::print(std::ostream& out, bool verbose,
@@ -143,6 +142,26 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
return highestPri;
}
+void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Context& c) const {
+ if (_has_logged_phantom_replica_warning) {
+ return;
+ }
+ for (const auto& n : c.entry->getRawNodes()) {
+ const uint16_t index = n.getNode();
+ const auto& state = c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, index));
+ // Only nodes in Up, Initializing or Retired should ever be present in the DB.
+ if (!state.getState().oneOf("uir")) {
+ LOG(warning, "%s in bucket DB is on node %u, which is in unavailable state %s. "
+ "Current cluster state is '%s'",
+ c.entry.getBucketId().toString().c_str(),
+ index,
+ state.getState().toString().c_str(),
+ c.systemState.toString().c_str());
+ _has_logged_phantom_replica_warning = true;
+ }
+ }
+}
+
StateChecker::Result
IdealStateManager::generateHighestPriority(
const document::Bucket &bucket,
@@ -160,6 +179,7 @@ IdealStateManager::generateHighestPriority(
LOG(spam, "Checking bucket %s", e->toString().c_str());
c.entry = *e;
+ verify_only_live_nodes_in_context(c);
return runStateCheckers(c);
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 8566c67a51b..10f18a35952 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -41,7 +41,7 @@ public:
DistributorComponentRegister& compReg,
bool manageActiveBucketCopies);
- ~IdealStateManager();
+ ~IdealStateManager() override;
void print(std::ostream& out, bool verbose,
const std::string& indent) const;
@@ -86,6 +86,7 @@ public:
const DistributorBucketSpaceRepo &getBucketSpaceRepo() const { return _bucketSpaceRepo; }
private:
+ void verify_only_live_nodes_in_context(const StateChecker::Context& c) const;
void fillParentAndChildBuckets(StateChecker::Context& c) const;
void fillSiblingBucket(StateChecker::Context& c) const;
StateChecker::Result generateHighestPriority(
@@ -95,13 +96,6 @@ private:
BucketDatabase::Entry* getEntryForPrimaryBucket(StateChecker::Context& c) const;
- friend class Operation_TestCase;
- friend class RemoveBucketOperation_Test;
- friend class MergeOperation_Test;
- friend class CreateBucketOperation_Test;
- friend class SplitOperation_Test;
- friend class JoinOperation_Test;
-
std::shared_ptr<IdealStateMetricSet> _metrics;
document::BucketId _lastPrioritizedBucket;
@@ -112,8 +106,7 @@ private:
DistributorComponent _distributorComponent;
DistributorBucketSpaceRepo &_bucketSpaceRepo;
-
- std::vector<IdealStateOperation::SP> generateOperationsForBucket(StateChecker::Context& c) const;
+ mutable bool _has_logged_phantom_replica_warning;
bool iAmUp() const;
@@ -133,7 +126,6 @@ private:
return true;
}
};
- friend class StatusBucketVisitor;
void getBucketStatus(document::BucketSpace bucketSpace, const BucketDatabase::ConstEntryRef& entry,
NodeMaintenanceStatsTracker& statsTracker, std::ostream& out) const;
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index 11f7b53ba20..296c6f76563 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -32,6 +32,9 @@ struct NodeMaintenanceStats
&& copyingOut == other.copyingOut
&& total == other.total);
}
+ bool operator!=(const NodeMaintenanceStats& other) const noexcept {
+ return !(*this == other);
+ }
};
std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
diff --git a/storageapi/src/tests/CMakeLists.txt b/storageapi/src/tests/CMakeLists.txt
index ddc43c70004..8b820adb467 100644
--- a/storageapi/src/tests/CMakeLists.txt
+++ b/storageapi/src/tests/CMakeLists.txt
@@ -1,13 +1,12 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# Runner for unit tests written in gtest.
-# NOTE: All new test classes should be added here.
vespa_add_executable(storageapi_gtest_runner_app TEST
SOURCES
gtest_runner.cpp
DEPENDS
storageapi_testbuckets
storageapi_testmbusprot
+ storageapi_testmessageapi
storageapi
gtest
)
@@ -17,17 +16,3 @@ vespa_add_test(
COMMAND storageapi_gtest_runner_app
)
-# Runner for unit tests written in CppUnit (DEPRECATED).
-vespa_add_executable(storageapi_testrunner_app TEST
- SOURCES
- testrunner.cpp
- DEPENDS
- storageapi_testmessageapi
- storageapi
- vdstestlib
-)
-
-vespa_add_test(
- NAME storageapi_testrunner_app
- COMMAND storageapi_testrunner_app
-)
diff --git a/storageapi/src/tests/testrunner.cpp b/storageapi/src/tests/testrunner.cpp
deleted file mode 100644
index f21df6bdb86..00000000000
--- a/storageapi/src/tests/testrunner.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
-
-#include <vespa/log/log.h>
-LOG_SETUP("storageapicppunittestrunner");
-
-int
-main(int argc, const char *argv[])
-{
- vdstestlib::CppUnitTestRunner testRunner;
- return testRunner.run(argc, argv);
-}
diff --git a/storageframework/CMakeLists.txt b/storageframework/CMakeLists.txt
index 2211a4c009d..db6a14d0386 100644
--- a/storageframework/CMakeLists.txt
+++ b/storageframework/CMakeLists.txt
@@ -20,7 +20,6 @@ vespa_define_module(
src/vespa/storageframework/generic/thread
TEST_EXTERNAL_DEPENDS
- cppunit
vdstestlib
TESTS
diff --git a/storageserver/src/tests/testhelper.h b/storageserver/src/tests/testhelper.h
index 7dbaaecdbf4..a0d5da20eb8 100644
--- a/storageserver/src/tests/testhelper.h
+++ b/storageserver/src/tests/testhelper.h
@@ -4,7 +4,7 @@
#include <fstream>
#include <sstream>
#include <vespa/messagebus/testlib/slobrok.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/config/dirconfig.h>
namespace storage {
diff --git a/streamingvisitors/CMakeLists.txt b/streamingvisitors/CMakeLists.txt
index 88278bbc86a..26eb77fa4ce 100644
--- a/streamingvisitors/CMakeLists.txt
+++ b/streamingvisitors/CMakeLists.txt
@@ -12,9 +12,6 @@ vespa_define_module(
vdslib
vsm
- TEST_EXTERNAL_DEPENDS
- cppunit
-
LIBS
src/vespa/searchvisitor
diff --git a/tenant-base/pom.xml b/tenant-base/pom.xml
index 3c48d22085e..f087a8019da 100644
--- a/tenant-base/pom.xml
+++ b/tenant-base/pom.xml
@@ -33,10 +33,9 @@
<properties>
<vespaversion>${project.version}</vespaversion>
- <test_framework_version>${vespaversion}</test_framework_version>
<target_jdk_version>11</target_jdk_version>
<compiler_plugin_version>3.8.0</compiler_plugin_version>
- <surefire_version>2.22.0</surefire_version> <!-- NOTE bjorncs 15.06.2017: Version 2.20 has OoM issues -->
+ <surefire_version>2.22.0</surefire_version>
<endpoint>https://api.vespa-external.aws.oath.cloud:4443</endpoint>
</properties>
@@ -55,21 +54,21 @@
<dependencies>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>tenant-cd</artifactId>
- <version>${test_framework_version}</version>
- <scope>test</scope>
+ <artifactId>container</artifactId>
+ <version>${vespaversion}</version>
+ <scope>provided</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container</artifactId>
+ <artifactId>container-test</artifactId>
<version>${vespaversion}</version>
- <scope>provided</scope>
+ <scope>test</scope>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-test</artifactId>
+ <artifactId>tenant-cd</artifactId>
<version>${vespaversion}</version>
<scope>test</scope>
</dependency>
diff --git a/tenant-cd/pom.xml b/tenant-cd/pom.xml
index 18c4084a173..829b1de457b 100644
--- a/tenant-cd/pom.xml
+++ b/tenant-cd/pom.xml
@@ -51,10 +51,8 @@
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>4.12</version>
- <scope>test</scope>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
</dependency>
</dependencies>
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java
deleted file mode 100644
index e6beb313d28..00000000000
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package ai.vespa.hosted.cd;
-
-/**
- * Tests that compare the behaviour of a Vespa application deployment against a fixed specification.
- *
- * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform
- * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to
- * the Vespa application is not rolled out.
- *
- * A typical functional test is to feed some documents, optionally verifying that the documents have been processed
- * as expected, and then to see that queries give the expected results. Another common use is to verify integration
- * with external services.
- *
- * @author jonmv
- */
-public interface FunctionalTest {
-
- // Want to feed some documents.
- // Want to verify document processing and routing is as expected.
- // Want to check recall on those documents.
- // Want to verify queries give expected documents.
- // Want to verify searchers.
- // Want to verify updates.
- // Want to verify deletion.
- // May want to verify reprocessing.
- // Must likely delete documents between tests.
- // Must be able to feed documents, setting route.
- // Must be able to search.
- // Must be able to visit.
-
-}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
index 6cf5fb07f58..e9a2be1fb1f 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
@@ -1,6 +1,16 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
/**
* Tests that verify the health of production deployments of Vespa applications.
*
@@ -14,7 +24,10 @@ package ai.vespa.hosted.cd;
*
* @author jonmv
*/
-public interface ProductionTest {
+@Target({TYPE, ANNOTATION_TYPE})
+@Retention(RUNTIME)
+@Tag("ai.vespa.hosted.cd.ProductionTest")
+public @interface ProductionTest {
// Want to verify metrics (Vespa).
// Want to verify external metrics (YAMAS, other).
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
index 40377da30ef..b0325efa8d3 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
@@ -1,6 +1,16 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
/**
* Tests that assert continuity of behaviour for Vespa application deployments, through upgrades.
*
@@ -16,7 +26,12 @@ package ai.vespa.hosted.cd;
*
* @author jonmv
*/
-public interface StagingTest {
+@Target({TYPE, ANNOTATION_TYPE})
+@Retention(RUNTIME)
+@Tag("ai.vespa.hosted.cd.StagingTest")
+public @interface StagingTest {
+
// Want to verify documents are not damaged by upgrade.
// May want to verify metrics during upgrade.
+
}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
index c67d86fc8de..f27fa01072c 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
@@ -1,6 +1,18 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.ANNOTATION_TYPE;
+import static java.lang.annotation.ElementType.TYPE;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
/**
* Tests that compare the behaviour of a Vespa application deployment against a fixed specification.
*
@@ -14,7 +26,11 @@ package ai.vespa.hosted.cd;
*
* @author jonmv
*/
-public interface SystemTest {
+@Target({TYPE, ANNOTATION_TYPE})
+@Retention(RUNTIME)
+@Tag("ai.vespa.hosted.cd.SystemTest")
+public @interface SystemTest {
+
// Want to feed some documents.
// Want to verify document processing and routing is as expected.
// Want to check recall on those documents.
@@ -27,4 +43,5 @@ public interface SystemTest {
// Must be able to feed documents, setting route.
// Must be able to search.
// Must be able to visit.
+
}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java
deleted file mode 100644
index 32083fbd5f6..00000000000
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package ai.vespa.hosted.cd;
-
-/**
- * Tests that assert continuity of behaviour for Vespa application deployments, through upgrades.
- *
- * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform
- * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to
- * the Vespa application is not rolled out.
- *
- * A typical upgrade test is to do some operations against a test deployment prior to upgrade, like feed and
- * search for some documents, perhaps recording some metrics from the deployment, and then to upgrade it,
- * repeat the exercise, and compare the results from pre and post upgrade.
- *
- * TODO Split in platform upgrades and application upgrades?
- *
- * @author jonmv
- */
-public interface UpgradeTest {
-
- // Want to verify documents are not damaged by upgrade.
- // May want to verify metrics during upgrade.
-
-}
diff --git a/valgrind-suppressions.txt b/valgrind-suppressions.txt
index 2587552ceff..13be6234a94 100644
--- a/valgrind-suppressions.txt
+++ b/valgrind-suppressions.txt
@@ -24,57 +24,6 @@
fun:(below main)
}
{
- This is a bug in cppunit.
- Memcheck:Leak
- fun:_Znwm
- fun:*addTestsToSuite*CppUnit*TestSuiteBuilderContextBase*
-}
-{
- Bug in cppunit. This suppression is created on CentOS7.
- Memcheck:Leak
- match-leak-kinds: definite
- fun:_Znwm
- fun:addTestsToSuite
- fun:suite
- fun:*makeTest*
- fun:_ZN7CppUnit19TestFactoryRegistry14addTestToSuiteEPNS_9TestSuiteE
- fun:_ZN7CppUnit19TestFactoryRegistry8makeTestEv
- fun:_ZN10vdstestlib17CppUnitTestRunner3runEiPKPKc
- fun:main
-}
-{
- Bug in cppunit. This suppression is created on CentOS7.
- Memcheck:Leak
- match-leak-kinds: definite
- fun:_Znwm
- fun:allocate
- fun:_S_create
- fun:_S_construct<char const*>
- fun:_S_construct_aux<char const*>
- fun:_S_construct<char const*>
- fun:_ZNSsC1EPKcRKSaIcE
- fun:_ZN7CppUnit10TestRunnerC1Ev
- fun:_ZN7CppUnit14TextTestRunnerC1EPNS_9OutputterE
- fun:_ZN10vdstestlib17CppUnitTestRunner3runEiPPKc
- fun:main
-}
-{
- Bug in cppunit. This suppression is created on CentOS7.
- Memcheck:Leak
- match-leak-kinds: definite
- fun:_Znwm
- fun:allocate
- fun:_S_create
- fun:_ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag
- fun:_S_construct_aux<char const*>
- fun:_S_construct<char const*>
- fun:_ZNSsC1EPKcRKSaIcE
- fun:_ZN7CppUnit10TestRunnerC1Ev
- fun:_ZN7CppUnit14TextTestRunnerC1EPNS_9OutputterE
- fun:_ZN10vdstestlib17CppUnitTestRunner3runEiPPKc
- fun:main
-}
-{
RHEL6 strlen is eager and will read 16 bytes blocks.
Memcheck:Cond
fun:__strlen_sse42
diff --git a/vdslib/CMakeLists.txt b/vdslib/CMakeLists.txt
index 0480ea266e5..b997bf5f983 100644
--- a/vdslib/CMakeLists.txt
+++ b/vdslib/CMakeLists.txt
@@ -18,7 +18,6 @@ vespa_define_module(
TEST_DEPENDS
vdstestlib
- cppunit
TESTS
src/tests
diff --git a/vdstestlib/CMakeLists.txt b/vdstestlib/CMakeLists.txt
index 10ee29bdcc5..8fd97fe8168 100644
--- a/vdstestlib/CMakeLists.txt
+++ b/vdstestlib/CMakeLists.txt
@@ -5,10 +5,9 @@ vespa_define_module(
vespalib
TESTS
- src/tests/cppunit
src/tests/dirconfig
LIBS
src/vespa/vdstestlib
- src/vespa/vdstestlib/cppunit
+ src/vespa/vdstestlib/config
)
diff --git a/vdstestlib/src/tests/cppunit/.gitignore b/vdstestlib/src/tests/cppunit/.gitignore
deleted file mode 100644
index ec991c2f0cd..00000000000
--- a/vdstestlib/src/tests/cppunit/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.depend
-Makefile
-testrunner
-vdstestlib_testrunner_app
diff --git a/vdstestlib/src/tests/cppunit/CMakeLists.txt b/vdstestlib/src/tests/cppunit/CMakeLists.txt
deleted file mode 100644
index 1b8e857fe30..00000000000
--- a/vdstestlib/src/tests/cppunit/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vdstestlib_testrunner_app TEST
- SOURCES
- testrunner.cpp
- cppunittest.cpp
- DEPENDS
- vdstestlib
-)
-
-vespa_add_test(
- NAME vdstestlib_testrunner_app
- NO_VALGRIND
- COMMAND vdstestlib_testrunner_app
-)
diff --git a/vdstestlib/src/tests/cppunit/cppunittest.cpp b/vdstestlib/src/tests/cppunit/cppunittest.cpp
deleted file mode 100644
index bf8b38e696e..00000000000
--- a/vdstestlib/src/tests/cppunit/cppunittest.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-
-namespace vespalib {
-
-struct CppunitTest : public CppUnit::TestFixture {
-
- void testSomething();
-
- CPPUNIT_TEST_SUITE(CppunitTest);
- CPPUNIT_TEST(testSomething);
- CPPUNIT_TEST_SUITE_END();
-
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(CppunitTest);
-
-void
-CppunitTest::testSomething()
-{
- CPPUNIT_ASSERT_EQUAL_MESSAGE("hmm", "foo", "foo");
-}
-
-} // vespalib
diff --git a/vdstestlib/src/tests/cppunit/testrunner.cpp b/vdstestlib/src/tests/cppunit/testrunner.cpp
deleted file mode 100644
index 3b686d132bb..00000000000
--- a/vdstestlib/src/tests/cppunit/testrunner.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
-
-int main(int argc, const char *argv[])
-{
- vdstestlib::CppUnitTestRunner testRunner;
- return testRunner.run(argc, argv);
-}
-
diff --git a/vdstestlib/src/tests/dirconfig/dirconfigtest.cpp b/vdstestlib/src/tests/dirconfig/dirconfigtest.cpp
index 9d8e5c47143..f985150e497 100644
--- a/vdstestlib/src/tests/dirconfig/dirconfigtest.cpp
+++ b/vdstestlib/src/tests/dirconfig/dirconfigtest.cpp
@@ -2,7 +2,7 @@
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/exceptions.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/config/dirconfig.h>
#include <fstream>
#include <iostream>
diff --git a/vdstestlib/src/vespa/vdstestlib/CMakeLists.txt b/vdstestlib/src/vespa/vdstestlib/CMakeLists.txt
index ce80f8dd644..86ca3461302 100644
--- a/vdstestlib/src/vespa/vdstestlib/CMakeLists.txt
+++ b/vdstestlib/src/vespa/vdstestlib/CMakeLists.txt
@@ -1,8 +1,6 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(vdstestlib
SOURCES
- $<TARGET_OBJECTS:vdstestlib_vdstestlib_cppunit>
+ $<TARGET_OBJECTS:vdstestlib_vdstestlib_config>
INSTALL lib64
- DEPENDS
- cppunit
)
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/.gitignore b/vdstestlib/src/vespa/vdstestlib/config/.gitignore
index 583460ae288..583460ae288 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/.gitignore
+++ b/vdstestlib/src/vespa/vdstestlib/config/.gitignore
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/CMakeLists.txt b/vdstestlib/src/vespa/vdstestlib/config/CMakeLists.txt
index e7d8705b07d..90b7c95fe50 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/CMakeLists.txt
+++ b/vdstestlib/src/vespa/vdstestlib/config/CMakeLists.txt
@@ -1,7 +1,6 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(vdstestlib_vdstestlib_cppunit OBJECT
+vespa_add_library(vdstestlib_vdstestlib_config OBJECT
SOURCES
- cppunittestrunner.cpp
dirconfig.cpp
DEPENDS
)
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.cpp b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp
index 3a26ed9dec8..3a26ed9dec8 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.cpp
+++ b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.h b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.h
index 388348c880e..c0a1571f52c 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.h
+++ b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.h
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
/**
* \class vdstestlib::DirConfig
- * \ingroup cppunit
+ * \ingroup config
*
* \brief Helper class for generating dir config
*
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.hpp b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.hpp
index f5d886c089a..5d127879e34 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/dirconfig.hpp
+++ b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.hpp
@@ -2,7 +2,7 @@
#pragma once
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
+#include <vespa/vdstestlib/config/dirconfig.h>
#include <sstream>
#include <boost/lexical_cast.hpp>
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.cpp b/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.cpp
deleted file mode 100644
index dbedf1ee900..00000000000
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vdstestlib/cppunit/cppunittestrunner.h>
-
-#include <cppunit/extensions/TestFactoryRegistry.h>
-#include <cppunit/ui/text/TestRunner.h>
-#include <cppunit/TextTestProgressListener.h>
-#include <vespa/log/log.h>
-#include <iostream>
-
-LOG_SETUP(".cppunittestrunner");
-
-using CppUnit::Test;
-using CppUnit::TestSuite;
-
-namespace vdstestlib {
-
-namespace {
- struct WantedTestList : public CppUnit::Test::Filter {
- std::vector<std::string> _wanted;
- bool _includeStressTests;
-
- WantedTestList(int argc, const char *argv[],
- bool includeStressTests)
- : _wanted(),
- _includeStressTests(includeStressTests)
- {
- for (int i=1; i<argc; ++i) {
- if (argv[i][0] != '-') {
- std::cerr << "Running tests matching '*"
- << argv[i] << "*'.\n";
- _wanted.push_back(argv[i]);
- }
- }
- char* testpat = getenv("TEST_SUBSET");
- if (testpat != 0) {
- std::string pat = std::string("*") + testpat;
- if (pat[pat.size() - 1] != '$') pat += "*";
- std::cerr << "Running tests matching '" << pat << "'."
- " (Taken from TEST_SUBSET environment variable)\n";
- _wanted.push_back(testpat);
- }
- if (CppUnit::Test::disabledCount > 0) {
- std::cerr << CppUnit::Test::disabledCount
- << " tests are currently disabled and won't be "
- << "attempted run.\n";
- }
- if (CppUnit::Test::ignoredCount > 0) {
- std::cerr << CppUnit::Test::ignoredCount
- << " tests are currently set to ignore failures.\n";
- }
- }
-
- std::string getWantedString(uint32_t index) const {
- std::string s = _wanted[index];
- if (s[s.size() - 1] == '$') {
- return s.substr(0, s.size() - 1);
- }
- return s;
- }
-
- bool requiresTailMatch(uint32_t index) const {
- std::string s = _wanted[index];
- return (s[s.size() - 1] == '$');
- }
-
- bool include(const std::string& name) const override {
- if ((name.find("stress") != std::string::npos ||
- name.find("Stress") != std::string::npos)
- && !_includeStressTests)
- {
- std::cerr << "Excluding stress test " << name << "\n";
- } else {
- if (_wanted.size() == 0) return true;
- for (uint32_t i=0; i<_wanted.size(); ++i) {
- std::string::size_type pos = name.rfind(getWantedString(i));
- if (pos == std::string::npos) continue;
- if (!requiresTailMatch(i)
- || pos == name.size() - getWantedString(i).size())
- {
- return true;
- }
- }
- }
- return false;
- }
- };
-
- struct LogHook : public CppUnit::TextTestProgressListener::TestStartHook {
- std::string lastTest;
- void startedTest(const std::string& testName) override {
- LOG(info, "Starting test: %s", testName.c_str());
- lastTest = testName;
- }
- void stoppedTest() override {
- LOG(info, "Stopped test: %s", lastTest.c_str());
- }
- };
-}
-
-void CppUnitTestRunner::listTests(const TestSuite *tests) {
- for (const auto & test : tests->getTests()) {
- std::cout << test->getName() << std::endl;
- }
-}
-
-CppUnitTestRunner::CppUnitTestRunner()
-{
- std::ios::sync_with_stdio();
-}
-
-int
-CppUnitTestRunner::run(int argc, const char *argv[])
-{
- CppUnit::TextUi::TestRunner runner;
- CppUnit::TestFactoryRegistry& registry(
- CppUnit::TestFactoryRegistry::getRegistry());
-
- Test *tests = registry.makeTest();
- TestSuite *suite = dynamic_cast<TestSuite *>(tests);
-
- bool includeStressTests = false;
- bool logStartStop = false;
- bool verbose = false;
- if (getenv("TEST_VERBOSE") != 0) {
- verbose = true;
- }
-
- for (int i=1; i<argc; ++i) {
- std::string arg(argv[i]);
- if (arg == "--verbose") {
- verbose = true;
- logStartStop = true;
- } else if (arg == "--includestress") {
- includeStressTests = true;
- } else if (arg == "--list") {
- listTests(suite);
- exit(0);
- } else if (argv[i][0] == '-') {
- std::cerr << "Illegal option " << arg << "\n";
- exit(1);
- } else {
- // Arguments will be passed as patterns
- }
- }
-
- WantedTestList wantedList(argc, argv, includeStressTests);
- suite->filter(wantedList);
- runner.addTest(tests);
- CppUnit::TextTestProgressListener::verboseProgress = verbose;
- if (logStartStop) {
- CppUnit::TextTestProgressListener::startHook.reset(new LogHook);
- }
- return (runner.run("", false) ? 0 : -1);
-}
-
-} // vdstestlib
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.h b/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.h
deleted file mode 100644
index 057f116a7d2..00000000000
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/cppunittestrunner.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * @class CppUnitTestRunner
- * @ingroup cppunit
- *
- * @brief Application for running cppunit tests.
- *
- * This is an application to use when running cppunit tests, currently used
- * in document,vdslib,storageapi and storage.
- *
- * It is built like a library, as one need to create one per project, but the
- * cppunit test application file in each project can now only contain a little
- * main method, creating an instance of this class and calling run.
- *
- * See storage/src/cpp/tests/testrunner.h for an example of simple app using
- * this.
- *
- * When using this test binary you have the following options.
- *
- * If the TEST_SUBSET environment variable is set, only tests matching the
- * pattern given in the environment is run. For instance, by doing
- * TEST_SUBSET=foo ./testrunner, only tests that match the regular expression
- * .*foo.* will be run. Optionally you can postfix your expression with a
- * dollar, to only run tests that end with the given string. You can match
- * against any part of the function shown in verbose mode. For instance
- * TEST_SUBSET=foo::bar$ will run tests whose test class ends in foo, and
- * having test name bar.
- *
- * You can specify --verbose mode. In verbose mode, each test name is printed
- * to stdout when started, and after completion, the runtime of the test is
- * shown. This is very useful to identify slow unit tests which should be
- * improved, and also to see in what test the system might be hung up in. In
- * addition, in verbose mode, a vespa log entry is given at the start and end
- * of each test, such that one can identify which parts of the vespa log belongs
- * to each test, in case you are redirecting the log to a file.
- *
- * You can also use the --includestress option to also include tests that match
- * the regular expression '.*[sS]tress.*'. These are excluded by default, such
- * that regular test runs can be quick.
- */
-
-#pragma once
-
-#include <cppunit/TestSuite.h>
-
-namespace vdstestlib {
-
-class CppUnitTestRunner {
-public:
- CppUnitTestRunner();
-
- void listTests(const CppUnit::TestSuite *tests);
- int run(int argc, const char * argv[]);
-
-};
-
-} // vdstestlib
-
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h b/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h
deleted file mode 100644
index fdde94bbbd6..00000000000
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * This file contains additional CPPUNIT macros to simplify tests.
- */
-#pragma once
-#include <cppunit/extensions/HelperMacros.h>
-#include <vespa/vespalib/test/insertion_operators.h>
-
-
-// Wrapper for CPPUNIT_ASSERT_EQUAL_MESSAGE to prevent it from evaluating
-// message if val1 is equal to val2
-#define CPPUNIT_ASSERT_EQUAL_MSG(message, val1, val2) \
- { \
- if (!((val1) == (val2))) { \
- CPPUNIT_ASSERT_EQUAL_MESSAGE(message, val1, val2); \
- } \
- }
-
-#define CPPUNIT_ASSERT_EQUAL_ESCAPED(val1, val2) \
- { \
- if (!((val1) == (val2))) { \
- std::ostringstream out1; \
- std::ostringstream out2; \
- out1 << "[" << val1 << "]"; \
- out2 << "[" << val2 << "]"; \
- CPPUNIT_ASSERT_EQUAL( \
- document::StringUtil::escape(out1.str()), \
- document::StringUtil::escape(out2.str())); \
- } \
- }
-
-// Wrapper for CPPUNIT_ASSERT_MESSAGE to prevent it from evaluating message if
-// val is true
-#define CPPUNIT_ASSERT_MSG(message, val) \
- { \
- if (!(val)) { \
- CPPUNIT_ASSERT_MESSAGE(message, val); \
- } \
- }
-
-// Assert that value starts with prefix
-#define CPPUNIT_ASSERT_PREFIX(prefix, value) \
- { \
- std::ostringstream pre; \
- pre << prefix; \
- std::ostringstream val; \
- val << value; \
- if (val.str().find(pre.str()) != 0) { \
- CPPUNIT_FAIL("Value of '" + val.str() + "' does not contain " \
- "prefix '" + pre.str() + "'."); \
- } \
- }
-
-// Assert that value contains given substring
-#define CPPUNIT_ASSERT_CONTAIN(contained, value) \
- { \
- std::ostringstream cont; \
- cont << contained; \
- std::ostringstream val; \
- val << value; \
- if (val.str().find(cont.str()) == std::string::npos) { \
- CPPUNIT_FAIL("Value of '" + val.str() + "' does not contain '" \
- + cont.str() + "'."); \
- } \
- }
-
-// Assert that value contains given substring, add message to output on error
-#define CPPUNIT_ASSERT_CONTAIN_MESSAGE(message, contained, value) \
- { \
- std::ostringstream cont; \
- cont << contained; \
- std::ostringstream val; \
- val << value; \
- std::string mess = message; \
- if (val.str().find(cont.str()) == std::string::npos) { \
- CPPUNIT_FAIL(mess + ": Value of '" + val.str() \
- + "' does not contain '" + cont.str() + "'."); \
- } \
- }
-
-// Assert that given expression matches the given regular expression.
-#include <vespa/vespalib/util/regexp.h>
-#define CPPUNIT_ASSERT_MATCH_REGEX(expression, value) \
- { \
- std::ostringstream _ost_; \
- _ost_ << value; \
- std::string _s_(_ost_.str()); \
- vespalib::Regexp _myregex_(expression); \
- if (!_myregex_.match(_s_)) { \
- CPPUNIT_FAIL("Value of '" + _s_ + "' does not match regex '" \
- + expression + "'."); \
- } \
- }
-
-// Assert that given expression matches the given regular expression.
-#include <vespa/vespalib/util/regexp.h>
-#define CPPUNIT_ASSERT_MATCH_REGEX_MSG(message, expression, value) \
- { \
- std::ostringstream _ost_; \
- _ost_ << value; \
- std::string _s_(_ost_.str()); \
- vespalib::Regexp _myregex_(expression); \
- std::string mess = message; \
- if (!_myregex_.match(_s_)) { \
- CPPUNIT_FAIL("Value of '" + _s_ + "' does not match regex '" \
- + expression + "'. Message: '" + mess + "'"); \
- } \
- }
-
-#define CPPUNIT_ASSERT_FILE_CONTAINS(expected, filename) \
- { \
- std::ostringstream value; \
- value << expected; \
- std::ostringstream ost; \
- std::string line; \
- std::ifstream input(filename); \
- while (std::getline(input, line, '\n')) { \
- ost << line << '\n'; \
- } \
- CPPUNIT_ASSERT_EQUAL(value.str(), ost.str()); \
- }
-
-#define CPPUNIT_ASSERT_SUBSTRING_COUNT(source, expectedCount, substring) \
- { \
- uint32_t count = 0; \
- std::ostringstream value; /* Let value be non-strings */ \
- value << source; \
- std::string s(value.str()); \
- std::string::size_type pos = s.find(substring); \
- while (pos != std::string::npos) { \
- ++count; \
- pos = s.find(substring, pos+1); \
- } \
- if (count != (uint32_t) expectedCount) { \
- std::ostringstream error; \
- error << "Value of '" << s << "' contained " << count \
- << " instances of substring '" << substring << "', not " \
- << expectedCount << " as expected."; \
- CPPUNIT_FAIL(error.str()); \
- } \
- }
-
-#include <ostream>
-#include <map>
-#include <unordered_map>
-#include <vector>
-
-// Create output operator for containers.
-// Needed so we can use CPPUNIT_ASSERT_EQUAL with them.
-
-template<typename S, typename T>
-std::ostream&
-operator<<(std::ostream& out, const std::unordered_map<S, T>& umap)
-{
- out << "std::unordered_map(" << umap.size() << ") {";
- for (auto keyValue : umap) {
- out << "\n " << keyValue.first << ": " << keyValue.second;
- }
- if (!umap.empty()) {
- out << "\n";
- }
- out << "}";
- return out;
-}
diff --git a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
index bc34a4ac3df..3afd84725bf 100644
--- a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
+++ b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
@@ -203,7 +203,7 @@ public class DocumentGenMojo extends AbstractMojo {
" * Generated by vespa-documentgen-plugin, do not edit.\n" +
" * Date: "+new Date()+"\n" +
" */\n");
- out.write("@com.yahoo.document.Generated public class ConcreteDocumentFactory extends com.yahoo.docproc.AbstractConcreteDocumentFactory {\n");
+ out.write("@com.yahoo.document.Generated\npublic class ConcreteDocumentFactory extends com.yahoo.docproc.AbstractConcreteDocumentFactory {\n");
out.write(ind()+"private static java.util.Map<java.lang.String, java.lang.Class<? extends com.yahoo.document.Document>> dTypes = new java.util.HashMap<java.lang.String, java.lang.Class<? extends com.yahoo.document.Document>>();\n");
out.write(ind()+"private static java.util.Map<java.lang.String, com.yahoo.document.DocumentType> docTypes = new java.util.HashMap<>();\n");
out.write(ind()+"private static java.util.Map<java.lang.String, java.lang.Class<? extends com.yahoo.document.datatypes.Struct>> sTypes = new java.util.HashMap<java.lang.String, java.lang.Class<? extends com.yahoo.document.datatypes.Struct>>();\n");
@@ -316,7 +316,7 @@ public class DocumentGenMojo extends AbstractMojo {
" * Input annotation type: "+annType.getName()+"\n" +
" * Date: "+new Date()+"\n" +
" */\n" +
- "@com.yahoo.document.Generated public "+annTypeModifier(annType)+"class "+className+" extends "+getParentAnnotationType(annType)+" {\n\n");
+ "@com.yahoo.document.Generated\npublic "+annTypeModifier(annType)+"class "+className+" extends "+getParentAnnotationType(annType)+" {\n\n");
if (annType.getDataType() instanceof StructDataType) {
out.write(ind() + "public "+className+"() {\n" +
ind(2) + "setType(new com.yahoo.document.annotation.AnnotationType(\""+annType.getName()+"\", Fields.type));\n" +
@@ -435,7 +435,7 @@ public class DocumentGenMojo extends AbstractMojo {
" * Input document type: "+docType.getName()+"\n" +
" * Date: "+new Date()+"\n" +
" */\n" +
- "@com.yahoo.document.Generated public class "+className+" extends "+superType+" {\n\n"+
+ "@com.yahoo.document.Generated\npublic class "+className+" extends "+superType+" {\n\n"+
ind(1)+"/** The doc type of this.*/\n" +
ind(1)+"public static final com.yahoo.document.DocumentType type = getDocumentType();\n\n"+
ind(1)+"/** Struct type view of the type of the body of this.*/\n" +
@@ -567,19 +567,13 @@ public class DocumentGenMojo extends AbstractMojo {
out.write(ind(ind)+"public "+className+"(com.yahoo.document.datatypes.StructuredFieldValue src) {\n"+
ind(ind+1)+"super("+className+".type);\n");
}
+ out.write(ind() + "ConcreteDocumentFactory factory = new ConcreteDocumentFactory();");
out.write(
ind(ind+1)+"for (java.util.Iterator<java.util.Map.Entry<com.yahoo.document.Field, com.yahoo.document.datatypes.FieldValue>>i=src.iterator() ; i.hasNext() ; ) {\n" +
ind(ind+2)+"java.util.Map.Entry<com.yahoo.document.Field, com.yahoo.document.datatypes.FieldValue> e = i.next();\n" +
ind(ind+2)+"com.yahoo.document.Field f = e.getKey();\n" +
ind(ind+2)+"com.yahoo.document.datatypes.FieldValue fv = e.getValue();\n" +
- ind(ind+2)+"if (fv instanceof com.yahoo.document.datatypes.StructuredFieldValue) {\n" +
- ind(ind+3)+"try {\n" +
- ind(ind+4)+"com.yahoo.document.datatypes.StructuredFieldValue newVal = ConcreteDocumentFactory.structTypes.get(f.getDataType().getName()).getConstructor(com.yahoo.document.datatypes.StructuredFieldValue.class).newInstance(fv);\n" +
- ind(ind+4)+"setFieldValue(f, newVal);\n" +
- ind(ind+3)+"} catch (java.lang.Exception ex) { throw new java.lang.RuntimeException(ex); }\n" +
- ind(ind+2)+"} else {\n" +
- ind(ind+3)+"setFieldValue(f, fv);\n" +
- ind(ind+2)+"}\n" +
+ ind(ind+2)+"setFieldValue(f, factory.optionallyUpgrade(f, fv));\n" +
ind(ind+1)+"}\n"+
ind(ind)+"}\n\n");
}
diff --git a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java
index 221ff2bc9a4..4ebabe63c1d 100644
--- a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java
+++ b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/PomXmlGenerator.java
@@ -36,16 +36,22 @@ public class PomXmlGenerator {
" <version>1.0.0</version>\n" +
"\n" +
" <properties>\n" +
- " <maven_version>4.12</maven_version>\n" +
+ " <junit_version>5.4.2</junit_version>\n" +
" <surefire_version>2.22.0</surefire_version>\n" +
"%PROPERTIES%" +
" </properties>\n" +
"\n" +
" <dependencies>\n" +
" <dependency>\n" +
- " <groupId>junit</groupId>\n" +
- " <artifactId>junit</artifactId>\n" +
- " <version>${maven_version}</version>\n" +
+ " <groupId>org.junit.vintage</groupId>\n" +
+ " <artifactId>junit-vintage-engine</artifactId>\n" +
+ " <version>${junit_version}</version>\n" +
+ " <scope>test</scope>\n" +
+ " </dependency>\n" +
+ " <dependency>\n" +
+ " <groupId>org.junit.jupiter</groupId>\n" +
+ " <artifactId>junit-jupiter-engine</artifactId>\n" +
+ " <version>${junit_version}</version>\n" +
" <scope>test</scope>\n" +
" </dependency>\n" +
"%DEPENDENCIES%" +
diff --git a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests
index 86c36afd636..263bd27a4f3 100644
--- a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests
+++ b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests
@@ -6,7 +6,7 @@
<version>1.0.0</version>
<properties>
- <maven_version>4.12</maven_version>
+ <junit_version>5.4.2</junit_version>
<surefire_version>2.22.0</surefire_version>
<my-comp.jar.path>components/my-comp.jar</my-comp.jar.path>
<main.jar.path>main.jar</main.jar.path>
@@ -14,9 +14,15 @@
<dependencies>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <version>${maven_version}</version>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
+ <version>${junit_version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <version>${junit_version}</version>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/vespabase/desc.vespa_base_dev b/vespabase/desc.vespa_base_dev
index de19051806b..1f05be94257 100644
--- a/vespabase/desc.vespa_base_dev
+++ b/vespabase/desc.vespa_base_dev
@@ -33,5 +33,3 @@ Development tools and 3rd party libraries:
* vespa_boost_dev: Boost headers,
prepared for use with the Vespa compiler.
- * vespa_cppunit_dev: Headers and libraries prepared with the
- Vespa compiler.
diff --git a/vespajlib/src/main/java/com/yahoo/io/NativeIO.java b/vespajlib/src/main/java/com/yahoo/io/NativeIO.java
index f69bdb4e3dd..109b7ff7943 100644
--- a/vespajlib/src/main/java/com/yahoo/io/NativeIO.java
+++ b/vespajlib/src/main/java/com/yahoo/io/NativeIO.java
@@ -54,19 +54,28 @@ public class NativeIO {
}
/**
- * Will hint the OS that this is will not be accessed again and should hence be dropped from the buffer cache.
+ * Will hint the OS that data read so far will not be accessed again and should hence be dropped from the buffer cache.
* @param fd The file descriptor to drop from buffer cache.
*/
- public void dropFileFromCache(FileDescriptor fd) {
- try {
- fd.sync();
- } catch (SyncFailedException e) {
- logger.warning("Sync failed while dropping cache: " + e.getMessage());
+ public void dropPartialFileFromCache(FileDescriptor fd, long offset, long len, boolean sync) {
+ if (sync) {
+ try {
+ fd.sync();
+ } catch (SyncFailedException e) {
+ logger.warning("Sync failed while dropping cache: " + e.getMessage());
+ }
}
if (initialized) {
- posix_fadvise(getNativeFD(fd), 0, 0, POSIX_FADV_DONTNEED);
+ posix_fadvise(getNativeFD(fd), offset, len, POSIX_FADV_DONTNEED);
}
}
+ /**
+ * Will hint the OS that this is will not be accessed again and should hence be dropped from the buffer cache.
+ * @param fd The file descriptor to drop from buffer cache.
+ */
+ public void dropFileFromCache(FileDescriptor fd) {
+ dropPartialFileFromCache(fd, 0, 0, true);
+ }
/**
* Will hint the OS that this is will not be accessed again and should hence be dropped from the buffer cache.
diff --git a/vespalib/src/tests/stllike/asciistream_test.cpp b/vespalib/src/tests/stllike/asciistream_test.cpp
index b1ba70e6ae2..fd362d9c49a 100644
--- a/vespalib/src/tests/stllike/asciistream_test.cpp
+++ b/vespalib/src/tests/stllike/asciistream_test.cpp
@@ -40,10 +40,21 @@ AsciistreamTest::verifyBothWays(T value, const char * expected)
os << value;
EXPECT_EQUAL(os.str(), string(expected));
EXPECT_EQUAL(os.size(), strlen(expected));
- T v;
- os >> v;
- EXPECT_EQUAL(value, v);
- EXPECT_TRUE(os.empty());
+ {
+ T v;
+ os >> v;
+ EXPECT_EQUAL(value, v);
+ EXPECT_TRUE(os.empty());
+ }
+
+ {
+ os << " " << expected;
+ T v;
+ os >> v;
+ EXPECT_EQUAL(value, v);
+ EXPECT_TRUE(os.empty());
+ EXPECT_EQUAL(0u, os.size());
+ }
}
template <typename T>
@@ -72,16 +83,16 @@ AsciistreamTest::testIllegalNumbers()
{
asciistream is("777777777777");
uint16_t s(0);
- EXPECT_EXCEPTION(is >> s, IllegalArgumentException, "An unsigned short can not represent '777777777777'");
+ EXPECT_EXCEPTION(is >> s, IllegalArgumentException, "strToInt value '777777777777' is outside of range");
EXPECT_EQUAL(12u, is.size());
uint32_t i(0);
- EXPECT_EXCEPTION(is >> i, IllegalArgumentException, "An unsigned int can not represent '777777777777'");
+ EXPECT_EXCEPTION(is >> i, IllegalArgumentException, "strToInt value '777777777777' is outside of range");
EXPECT_EQUAL(12u, is.size());
int16_t si(0);
- EXPECT_EXCEPTION(is >> si, IllegalArgumentException, "A short can not represent '777777777777'");
+ EXPECT_EXCEPTION(is >> si, IllegalArgumentException, "strToInt value '777777777777' is outside of range");
EXPECT_EQUAL(12u, is.size());
int32_t ii(0);
- EXPECT_EXCEPTION(is >> ii, IllegalArgumentException, "An int can not represent '777777777777'");
+ EXPECT_EXCEPTION(is >> ii, IllegalArgumentException, "strToInt value '777777777777' is outside of range");
EXPECT_EQUAL(12u, is.size());
is << "777777777777";
EXPECT_EQUAL(24u, is.size());
@@ -95,10 +106,10 @@ AsciistreamTest::testIllegalNumbers()
{
asciistream is("-77");
uint16_t s(0);
- EXPECT_EXCEPTION(is >> s, IllegalArgumentException, "An unsigned short can not represent '-77'");
+ EXPECT_EXCEPTION(is >> s, IllegalArgumentException, "Illegal strToInt value '-77'");
EXPECT_EQUAL(3u, is.size());
uint32_t i(0);
- EXPECT_EXCEPTION(is >> i, IllegalArgumentException, "An unsigned int can not represent '-77'");
+ EXPECT_EXCEPTION(is >> i, IllegalArgumentException, "Illegal strToInt value '-77'");
EXPECT_EQUAL(3u, is.size());
}
{
@@ -131,12 +142,12 @@ AsciistreamTest::testIllegalNumbers()
EXPECT_TRUE(is.empty());
{
uint32_t l(0);
- EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "Failed decoding a unsigned long long from ''.");
+ EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "buffer underflow at pos 0.");
EXPECT_TRUE(is.empty());
}
{
int32_t l(0);
- EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "Failed decoding a long long from ''.");
+ EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "buffer underflow at pos 0");
EXPECT_TRUE(is.empty());
}
{
diff --git a/vespalib/src/tests/testkit-testhook/CMakeLists.txt b/vespalib/src/tests/testkit-testhook/CMakeLists.txt
index c4662adc8a8..4febe6ebb75 100644
--- a/vespalib/src/tests/testkit-testhook/CMakeLists.txt
+++ b/vespalib/src/tests/testkit-testhook/CMakeLists.txt
@@ -4,6 +4,5 @@ vespa_add_executable(vespalib_testkit-testhook_test_app TEST
testkit-testhook_test.cpp
DEPENDS
vespalib
- cppunit
)
vespa_add_test(NAME vespalib_testkit-testhook_test_app COMMAND vespalib_testkit-testhook_test_app)
diff --git a/vespalib/src/tests/testkit-testhook/testkit-testhook_test.cpp b/vespalib/src/tests/testkit-testhook/testkit-testhook_test.cpp
index 164f4cd7247..df7916dd56d 100644
--- a/vespalib/src/tests/testkit-testhook/testkit-testhook_test.cpp
+++ b/vespalib/src/tests/testkit-testhook/testkit-testhook_test.cpp
@@ -1,7 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
-
-#include <cppunit/extensions/HelperMacros.h>
#include <stdexcept>
//-----------------------------------------------------------------------------
@@ -150,12 +148,7 @@ IGNORE_TEST("passed tests can also be ignored") {
//-----------------------------------------------------------------------------
-TEST("cppunit unwind will result in 1 failed test and 1 failed check") {
- CPPUNIT_ASSERT_EQUAL_MESSAGE("cppunit happy", 1, 1);
- CPPUNIT_ASSERT_EQUAL_MESSAGE("cppunit not happy", 1, 2);
-}
-
-TEST("std::excpetion unwind will result in 1 failed test and 1 failed check") {
+TEST("std::exception unwind will result in 1 failed test and 1 failed check") {
throw std::runtime_error("something failed");
}
@@ -172,7 +165,7 @@ TEST("verify and ignore check failures from previous tests") {
TEST("verify that all appropriate tests have been executed") {
TEST_FLUSH();
- EXPECT_EQUAL(25u, TEST_MASTER.getProgress().passCnt);
+ EXPECT_EQUAL(24u, TEST_MASTER.getProgress().passCnt);
}
//-----------------------------------------------------------------------------
diff --git a/vespalib/src/vespa/vespalib/stllike/asciistream.cpp b/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
index 7d585cf1cf6..8114923a9fc 100644
--- a/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
+++ b/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
@@ -10,7 +10,8 @@
#include <limits>
#include <stdexcept>
#include <cassert>
-#include <math.h>
+#include <cmath>
+#include <charconv>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.stllike.asciistream");
@@ -77,9 +78,7 @@ asciistream::asciistream(stringref buf) :
}
}
-asciistream::~asciistream()
-{
-}
+asciistream::~asciistream() = default;
asciistream::asciistream(const asciistream & rhs) :
_rPos(0),
@@ -145,10 +144,11 @@ namespace {
int getValue(double & val, const char *buf) __attribute__((noinline));
int getValue(float & val, const char *buf) __attribute__((noinline));
-int getValue(unsigned long long & val, const char *buf) __attribute__((noinline));
-int getValue(long long & val, const char *buf) __attribute__((noinline));
void throwInputError(int e, const char * t, const char * buf) __attribute__((noinline));
+void throwInputError(std::errc e, const char * t, const char * buf) __attribute__((noinline));
void throwUnderflow(size_t pos) __attribute__((noinline));
+template <typename T>
+T strToInt(T & v, const char *begin, const char *end) __attribute__((noinline));
void throwInputError(int e, const char * t, const char * buf)
{
@@ -163,6 +163,16 @@ void throwInputError(int e, const char * t, const char * buf)
}
}
+void throwInputError(std::errc e, const char * t, const char * buf) {
+ if (e == std::errc::invalid_argument) {
+ throw IllegalArgumentException("Illegal " + string(t) + " value '" + string(buf) + "'.", VESPA_STRLOC);
+ } else if (e == std::errc::result_out_of_range) {
+ throw IllegalArgumentException(string(t) + " value '" + string(buf) + "' is outside of range.", VESPA_STRLOC);
+ } else {
+ throw IllegalArgumentException("Unknown error decoding an " + string(t) + " from '" + string(buf) + "'.", VESPA_STRLOC);
+ }
+}
+
void throwUnderflow(size_t pos)
{
throw IllegalArgumentException(make_string("buffer underflow at pos %ld.", pos), VESPA_STRLOC);
@@ -190,26 +200,28 @@ int getValue(float & val, const char *buf)
return ebuf - buf;
}
-int getValue(unsigned long long & val, const char *buf)
+template <typename T>
+T strToInt(T & v, const char *begin, const char *end)
{
- char *ebuf;
- errno = 0;
- val = strtoull(buf, &ebuf, 0);
- if ((errno != 0) || (buf == ebuf)) {
- throwInputError(errno, "unsigned long long", buf);
- }
- return ebuf - buf;
-}
+ const char * curr = begin;
+ for (;(curr < end) && std::isspace(*curr); curr++);
-int getValue(long long & val, const char *buf)
-{
- char *ebuf;
- errno = 0;
- val = strtoll(buf, &ebuf, 0);
- if ((errno != 0) || (buf == ebuf)) {
- throwInputError(errno, "long long", buf);
+ std::from_chars_result err;
+ if (((end - curr) > 2) && (curr[0] == '0') && ((curr[1] | 0x20) == 'x')) {
+ err = std::from_chars(curr+2, end, v, 16);
+ } else {
+ err = std::from_chars(curr, end, v, 10);
}
- return ebuf - buf;
+ if (err.ec == std::errc::invalid_argument) {
+ if (err.ptr >= end) {
+ throwUnderflow(err.ptr - begin);
+ }
+ throwInputError(err.ec, "strToInt", begin);
+ } else if (err.ec == std::errc::result_out_of_range) {
+ throwInputError(err.ec, "strToInt", begin);
+ }
+
+ return err.ptr - begin;
}
}
@@ -260,81 +272,49 @@ asciistream & asciistream::operator >> (unsigned char & v)
asciistream & asciistream::operator >> (unsigned short & v)
{
- unsigned long long l(0);
- size_t r = getValue(l, &_rbuf[_rPos]);
- if (l > std::numeric_limits<unsigned short>::max()) {
- throw IllegalArgumentException(make_string("An unsigned short can not represent '%lld'.", l), VESPA_STRLOC);
- }
- _rPos += r;
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (unsigned int & v)
{
- unsigned long long l(0);
- size_t r = getValue(l, &_rbuf[_rPos]);
- if (l > std::numeric_limits<unsigned int>::max()) {
- throw IllegalArgumentException(make_string("An unsigned int can not represent '%lld'.", l), VESPA_STRLOC);
- }
- _rPos += r;
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (unsigned long & v)
{
- unsigned long long l(0);
- _rPos += getValue(l, &_rbuf[_rPos]);
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (unsigned long long & v)
{
- unsigned long long l(0);
- _rPos += getValue(l, &_rbuf[_rPos]);
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (short & v)
{
- long long l(0);
- size_t r = getValue(l, &_rbuf[_rPos]);
- if ((l < std::numeric_limits<short>::min()) || (l > std::numeric_limits<short>::max())) {
- throw IllegalArgumentException(make_string("A short can not represent '%lld'.", l), VESPA_STRLOC);
- }
- _rPos += r;
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (int & v)
{
- long long l(0);
- size_t r = getValue(l, &_rbuf[_rPos]);
- if ((l < std::numeric_limits<int>::min()) || (l > std::numeric_limits<int>::max())) {
- throw IllegalArgumentException(make_string("An int can not represent '%lld'.", l), VESPA_STRLOC);
- }
- _rPos += r;
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (long & v)
{
- long long l(0);
- _rPos += getValue(l, &_rbuf[_rPos]);
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}
asciistream & asciistream::operator >> (long long & v)
{
- long long l(0);
- _rPos += getValue(l, &_rbuf[_rPos]);
- v = l;
+ _rPos += strToInt(v, &_rbuf[_rPos], &_rbuf[length()]);
return *this;
}