aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@verizonmedia.com>2019-06-14 13:50:54 +0200
committerJon Bratseth <bratseth@verizonmedia.com>2019-06-14 13:50:54 +0200
commit799acc9335f0dc3eebc747db8397aef0b4d930a9 (patch)
tree051baee0f938c2b088daa4a0fcb9120993abd95c
parent7e2d577daf548e171a7d7bf16a6996f9b894c330 (diff)
parent1b2c6aa193483f9a7eaaf17a5a82037b93bd1749 (diff)
Merge with master
-rw-r--r--application/src/main/java/com/yahoo/application/Application.java2
-rw-r--r--application/src/main/java/com/yahoo/application/container/JDisc.java1
-rw-r--r--athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/ConfigserverSslContextFactoryProvider.java18
-rw-r--r--athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java9
-rw-r--r--athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/CkmsKeyProvider.java4
-rw-r--r--athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/Utils.java7
-rw-r--r--athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/TestUtils.java13
-rw-r--r--athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java2
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/FeatureNames.java6
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java16
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java12
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ExpressionTransforms.java3
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/RankProfileTransformContext.java10
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/AddExtraFieldsToDocument.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java25
-rwxr-xr-xconfig-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java28
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfilesBuilder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java7
-rw-r--r--config-model/src/test/configmodel/types/documentmanager.cfg6
-rw-r--r--config-model/src/test/configmodel/types/documenttypes.cfg10
-rw-r--r--config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg22
-rw-r--r--config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg6
-rw-r--r--config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg18
-rw-r--r--config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg12
-rw-r--r--config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg10
-rw-r--r--config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg30
-rw-r--r--config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg20
-rw-r--r--config-model/src/test/derived/advanced/documentmanager.cfg14
-rw-r--r--config-model/src/test/derived/advanced/index-info.cfg12
-rw-r--r--config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationsimplicitstruct/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationsinheritance/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationsinheritance/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationsinheritance2/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationspolymorphy/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationsreference/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationsreference/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationssimple/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationssimple/index-info.cfg4
-rw-r--r--config-model/src/test/derived/annotationsstruct/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/annotationsstructarray/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/array_of_struct_attribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/arrays/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/arrays/index-info.cfg4
-rw-r--r--config-model/src/test/derived/attributeprefetch/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/attributeprefetch/index-info.cfg4
-rw-r--r--config-model/src/test/derived/attributes/index-info.cfg4
-rw-r--r--config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg4
-rw-r--r--config-model/src/test/derived/complex/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/emptydefault/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/emptydefault/index-info.cfg4
-rw-r--r--config-model/src/test/derived/exactmatch/index-info.cfg4
-rw-r--r--config-model/src/test/derived/fieldset/index-info.cfg4
-rw-r--r--config-model/src/test/derived/id/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/id/index-info.cfg4
-rw-r--r--config-model/src/test/derived/imported_position_field/index-info.cfg6
-rw-r--r--config-model/src/test/derived/imported_position_field_summary/index-info.cfg12
-rw-r--r--config-model/src/test/derived/imported_struct_fields/index-info.cfg4
-rw-r--r--config-model/src/test/derived/importedfields/index-info.cfg4
-rw-r--r--config-model/src/test/derived/indexinfo_fieldsets/index-info.cfg4
-rw-r--r--config-model/src/test/derived/indexinfo_lowercase/index-info.cfg4
-rw-r--r--config-model/src/test/derived/indexschema/index-info.cfg12
-rw-r--r--config-model/src/test/derived/indexswitches/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/indexswitches/index-info.cfg4
-rw-r--r--config-model/src/test/derived/inheritance/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/inheritance/index-info.cfg4
-rw-r--r--config-model/src/test/derived/inheritance/mother/documentmanager.cfg16
-rw-r--r--config-model/src/test/derived/inheritdiamond/documentmanager.cfg16
-rw-r--r--config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/inheritfromparent/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/inheritfromparent/documenttypes.cfg10
-rw-r--r--config-model/src/test/derived/inheritstruct/index-info.cfg4
-rw-r--r--config-model/src/test/derived/mail/documentmanager.cfg4
-rw-r--r--config-model/src/test/derived/mail/index-info.cfg4
-rw-r--r--config-model/src/test/derived/map_attribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/map_of_struct_attribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/music/index-info.cfg4
-rw-r--r--config-model/src/test/derived/neuralnet/neuralnet.sd238
-rw-r--r--config-model/src/test/derived/neuralnet/query-profiles/default.xml2
-rw-r--r--config-model/src/test/derived/neuralnet/query-profiles/types/DefaultQueryProfileType.xml8
-rw-r--r--config-model/src/test/derived/neuralnet/rank-profiles.cfg198
-rw-r--r--config-model/src/test/derived/newrank/index-info.cfg4
-rw-r--r--config-model/src/test/derived/position_array/index-info.cfg12
-rw-r--r--config-model/src/test/derived/position_attribute/index-info.cfg12
-rw-r--r--config-model/src/test/derived/position_extra/index-info.cfg12
-rw-r--r--config-model/src/test/derived/predicate_attribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/prefixexactattribute/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/prefixexactattribute/index-info.cfg4
-rw-r--r--config-model/src/test/derived/ranktypes/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/ranktypes/index-info.cfg4
-rw-r--r--config-model/src/test/derived/streamingstruct/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/structanyorder/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/structanyorder/index-info.cfg4
-rw-r--r--config-model/src/test/derived/tensor/documenttypes.cfg10
-rw-r--r--config-model/src/test/derived/twostreamingstructs/documentmanager.cfg8
-rw-r--r--config-model/src/test/derived/types/documentmanager.cfg6
-rw-r--r--config-model/src/test/derived/types/index-info.cfg4
-rw-r--r--config-model/src/test/examples/fieldoftypedocument.cfg12
-rwxr-xr-xconfig-model/src/test/examples/structresult.cfg6
-rw-r--r--config-model/src/test/integration/vespa/models/example.model2
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java40
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java12
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionLoopDetectionTestCase.java6
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java8
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/NeuralNetTestCase.java16
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitSearchFieldsTestCase.java20
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitStructTypesTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java41
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java7
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java54
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorTransformTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java6
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfileVariantsTestCase.java23
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfilesTestCase.java57
-rw-r--r--config-provisioning/abi-spec.json4
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java27
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java26
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java7
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java20
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java11
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/SystemNameTest.java4
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java3
-rw-r--r--configdefinitions/src/vespa/athenz-provider-service.def12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStream.java (renamed from configserver/src/main/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStream.java)9
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java21
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java46
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java35
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java11
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java6
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStreamTest.java (renamed from configserver/src/test/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStreamTest.java)6
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java51
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java62
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java5
-rw-r--r--configserver/src/test/resources/deploy/hosted-app/deployment.xml7
-rw-r--r--configserver/src/test/resources/deploy/hosted-app/services.xml6
-rw-r--r--container-search/abi-spec.json1
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/Model.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/config/QueryProfileXMLReader.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileTypeRegistry.java8
-rw-r--r--container-search/src/main/java/com/yahoo/search/querytransform/DefaultPositionSearcher.java1
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java59
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/RoutingGenerator.java7
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/role/RoleTest.java16
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java74
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java92
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializer.java82
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceForwarder.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceQueue.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java109
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java42
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java102
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/RoutingGeneratorMock.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-recursive.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json (renamed from controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-west-1.json)0
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/test-config.json21
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json2
-rw-r--r--controller-server/src/test/resources/testConfig.json20
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java5
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java7
-rw-r--r--document/src/vespa/document/base/idstring.cpp1
-rw-r--r--document/src/vespa/document/datatype/referencedatatype.cpp1
-rw-r--r--document/src/vespa/document/fieldvalue/arrayfieldvalue.cpp1
-rw-r--r--document/src/vespa/document/fieldvalue/mapfieldvalue.cpp1
-rw-r--r--document/src/vespa/document/fieldvalue/referencefieldvalue.cpp1
-rw-r--r--document/src/vespa/document/fieldvalue/stringfieldvalue.cpp1
-rw-r--r--document/src/vespa/document/fieldvalue/structfieldvalue.cpp1
-rw-r--r--document/src/vespa/document/select/doctype.cpp1
-rw-r--r--document/src/vespa/document/select/operator.cpp1
-rw-r--r--document/src/vespa/document/select/simpleparser.cpp1
-rw-r--r--document/src/vespa/document/select/value.cpp1
-rw-r--r--document/src/vespa/document/update/addvalueupdate.cpp1
-rw-r--r--document/src/vespa/document/update/arithmeticvalueupdate.cpp1
-rw-r--r--document/src/vespa/document/update/assignfieldpathupdate.cpp1
-rw-r--r--document/src/vespa/document/update/assignvalueupdate.cpp1
-rw-r--r--document/src/vespa/document/update/documentupdate.cpp1
-rw-r--r--document/src/vespa/document/update/fieldupdate.cpp1
-rw-r--r--document/src/vespa/document/update/mapvalueupdate.cpp1
-rw-r--r--document/src/vespa/document/update/removevalueupdate.cpp1
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java2
-rw-r--r--eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp373
-rw-r--r--eval/src/apps/make_tensor_binary_format_test_spec/test_spec.json36
-rw-r--r--eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp6
-rw-r--r--eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp33
-rw-r--r--eval/src/vespa/eval/eval/operation.cpp1
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.cpp14
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp26
-rw-r--r--eval/src/vespa/eval/tensor/serialization/common.h9
-rw-r--r--eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp61
-rw-r--r--eval/src/vespa/eval/tensor/serialization/dense_binary_format.h15
-rw-r--r--eval/src/vespa/eval/tensor/serialization/sparse_binary_format.cpp138
-rw-r--r--eval/src/vespa/eval/tensor/serialization/sparse_binary_format.h7
-rw-r--r--eval/src/vespa/eval/tensor/serialization/typed_binary_format.cpp101
-rw-r--r--eval/src/vespa/eval/tensor/serialization/typed_binary_format.h11
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp11
-rw-r--r--eval/src/vespa/eval/tensor/tensor.cpp3
-rw-r--r--fastos/src/vespa/fastos/unix_file.h1
-rw-r--r--fbench/src/util/clientstatus.h1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java5
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/ApiAuthenticator.java8
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java79
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/EndpointAuthenticator.java20
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/Properties.java55
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/RequestVerifier.java1
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/TestConfig.java67
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java5
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/TestUtil.java2
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/rpc/IntegrationTester.java2
-rw-r--r--metrics/src/vespa/metrics/metricset.cpp1
-rw-r--r--model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java2
-rw-r--r--model-integration/src/test/models/vespa/example.model2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java7
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java16
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java7
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java18
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java1
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java12
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZoneAppMigrationTest.java171
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java2
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java4
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java6
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java9
-rw-r--r--pom.xml1
-rw-r--r--processing/src/main/java/com/yahoo/processing/request/CompoundName.java7
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/fakesearchcontext.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/queryenvironment.cpp13
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/queryenvironment.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/querynodes.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/querynodes.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/same_element_builder.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchcontext.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchcontext.h6
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionNode.java6
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/IfNode.java4
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/ReferenceNode.java8
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/ExpressionTransformer.java2
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TensorMaxMinTransformer.java (renamed from config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/TensorTransformer.java)34
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TransformContext.java12
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/TypeResolutionTestCase.java4
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/ConstantDereferencerTestCase.java3
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/SimplifierTestCase.java8
-rw-r--r--searchlib/src/tests/common/bitvector/bitvector_test.cpp17
-rw-r--r--searchlib/src/tests/diskindex/fusion/.gitignore1
-rw-r--r--searchlib/src/tests/diskindex/fusion/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/diskindex/fusion/fusion_test.cpp290
-rw-r--r--searchlib/src/tests/features/bm25/bm25_test.cpp120
-rw-r--r--searchlib/src/tests/features/imported_dot_product/imported_dot_product_test.cpp16
-rw-r--r--searchlib/src/tests/features/prod_features.cpp6
-rw-r--r--searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp32
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleboolattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp22
-rw-r--r--searchlib/src/vespa/searchlib/common/allocatedbitvector.h2
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvector.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/common/growablebitvector.h3
-rw-r--r--searchlib/src/vespa/searchlib/common/partialbitvector.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.h4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp69
-rw-r--r--searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h6
-rw-r--r--searchlib/src/vespa/searchlib/features/attributefeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/features/attributefeature.h2
-rw-r--r--searchlib/src/vespa/searchlib/features/bm25_feature.cpp90
-rw-r--r--searchlib/src/vespa/searchlib/features/bm25_feature.h12
-rw-r--r--searchlib/src/vespa/searchlib/features/dotproductfeature.cpp330
-rw-r--r--searchlib/src/vespa/searchlib/features/dotproductfeature.h25
-rw-r--r--searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp213
-rw-r--r--searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.h7
-rw-r--r--searchlib/src/vespa/searchlib/features/queryfeature.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/fef/blueprint.cpp11
-rw-r--r--searchlib/src/vespa/searchlib/fef/blueprint.h27
-rw-r--r--searchlib/src/vespa/searchlib/fef/iqueryenvironment.h9
-rw-r--r--searchlib/src/vespa/searchlib/fef/itermfielddata.h17
-rw-r--r--searchlib/src/vespa/searchlib/fef/objectstore.h29
-rw-r--r--searchlib/src/vespa/searchlib/fef/phrasesplitter.h1
-rw-r--r--searchlib/src/vespa/searchlib/fef/simpletermfielddata.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/fef/simpletermfielddata.h40
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/queryenvironment.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/queryenvironment.h19
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.cpp24
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.h2
-rw-r--r--searchlib/src/vespa/searchlib/util/url.cpp1
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java20
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/duper/ZoneApplication.java108
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/health/HealthMonitorManager.java18
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthModel.java23
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/model/ApplicationInstanceGenerator.java17
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/duper/TestZoneApplication.java90
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/health/HealthMonitorManagerTest.java60
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/health/StateV1HealthModelTest.java20
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/manager/UnionMonitorManagerTest.java9
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/model/ApplicationInstanceGeneratorTest.java77
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/rusage.cpp1
-rw-r--r--storage/src/tests/CMakeLists.txt4
-rw-r--r--storage/src/tests/bucketdb/CMakeLists.txt12
-rw-r--r--storage/src/tests/bucketdb/bucketinfotest.cpp150
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp506
-rw-r--r--storage/src/tests/bucketdb/initializertest.cpp502
-rw-r--r--storage/src/tests/bucketdb/judyarraytest.cpp236
-rw-r--r--storage/src/tests/bucketdb/judymultimaptest.cpp118
-rw-r--r--storage/src/tests/bucketdb/lockablemaptest.cpp817
-rw-r--r--storage/src/tests/common/CMakeLists.txt7
-rw-r--r--storage/src/tests/common/global_bucket_space_distribution_converter_test.cpp80
-rw-r--r--storage/src/tests/common/metricstest.cpp88
-rw-r--r--storage/src/tests/common/storagelinktest.cpp44
-rw-r--r--storage/src/tests/common/storagelinktest.h40
-rw-r--r--storage/src/tests/distributor/messagesenderstub.h1
-rw-r--r--storage/src/tests/frameworkimpl/status/CMakeLists.txt10
-rw-r--r--storage/src/tests/frameworkimpl/status/statustest.cpp62
-rw-r--r--storage/src/tests/persistence/CMakeLists.txt11
-rw-r--r--storage/src/tests/persistence/bucketownershipnotifiertest.cpp58
-rw-r--r--storage/src/tests/persistence/common/CMakeLists.txt1
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.cpp8
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.h19
-rw-r--r--storage/src/tests/persistence/diskmoveoperationhandlertest.cpp27
-rw-r--r--storage/src/tests/persistence/filestorage/CMakeLists.txt11
-rw-r--r--storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp25
-rw-r--r--storage/src/tests/persistence/filestorage/deletebuckettest.cpp26
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp1531
-rw-r--r--storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp51
-rw-r--r--storage/src/tests/persistence/filestorage/mergeblockingtest.cpp160
-rw-r--r--storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp120
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp172
-rw-r--r--storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp34
-rw-r--r--storage/src/tests/persistence/filestorage/singlebucketjointest.cpp21
-rw-r--r--storage/src/tests/persistence/mergehandlertest.cpp773
-rw-r--r--storage/src/tests/persistence/persistencequeuetest.cpp82
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp9
-rw-r--r--storage/src/tests/persistence/persistencetestutils.h7
-rw-r--r--storage/src/tests/persistence/persistencethread_splittest.cpp121
-rw-r--r--storage/src/tests/persistence/processalltest.cpp148
-rw-r--r--storage/src/tests/persistence/provider_error_wrapper_test.cpp61
-rw-r--r--storage/src/tests/persistence/splitbitdetectortest.cpp254
-rw-r--r--storage/src/tests/persistence/testandsettest.cpp153
-rw-r--r--storage/src/tests/storageserver/bucketintegritycheckertest.cpp16
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp6
-rw-r--r--storage/src/vespa/storage/bucketdb/lockablemap.hpp1
-rw-r--r--storage/src/vespa/storage/bucketdb/stdmapwrapper.h1
-rw-r--r--storage/src/vespa/storage/common/storagelinkqueued.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.h1
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp1
-rw-r--r--storage/src/vespa/storage/persistence/diskthread.h1
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/mergestatus.cpp1
-rw-r--r--storage/src/vespa/storage/persistence/messages.cpp1
-rw-r--r--storage/src/vespa/storage/tools/generatedistributionbits.cpp1
-rw-r--r--storage/src/vespa/storage/visiting/commandqueue.h1
-rw-r--r--storage/src/vespa/storage/visiting/visitor.h1
-rw-r--r--storageapi/src/vespa/storageapi/message/datagram.cpp1
-rw-r--r--storageapi/src/vespa/storageapi/message/documentsummary.cpp1
-rw-r--r--storageapi/src/vespa/storageapi/message/queryresult.cpp1
-rw-r--r--storageapi/src/vespa/storageapi/message/searchresult.cpp1
-rw-r--r--storageapi/src/vespa/storageapi/message/visitor.cpp1
-rw-r--r--storageframework/src/vespa/storageframework/generic/clock/time.cpp1
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/queryenvironment.h2
-rw-r--r--tenant-auth/OWNERS1
-rw-r--r--tenant-auth/README.md1
-rw-r--r--tenant-auth/pom.xml40
-rw-r--r--tenant-auth/src/main/java/ai/vespa/hosted/auth/ApiAuthenticator.java16
-rw-r--r--tenant-auth/src/main/java/ai/vespa/hosted/auth/EndpointAuthenticator.java68
-rw-r--r--tenant-auth/src/test/java/ai/vespa/hosted/auth/AuthenticatorTest.java5
-rw-r--r--tenant-base/pom.xml107
-rw-r--r--tenant-cd/pom.xml31
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Deployment.java19
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Digest.java28
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Document.java16
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/DocumentId.java71
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/EmptyGroup.java9
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Endpoint.java25
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Feed.java25
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java31
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java19
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Query.java70
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Search.java32
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Selection.java58
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java4
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java4
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/TestDeployment.java18
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/TestEndpoint.java13
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/TestRuntime.java98
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java23
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/Visit.java17
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/VisitEndpoint.java10
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpDeployment.java53
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpEndpoint.java97
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metric.java87
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metrics.java73
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Space.java44
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Statistic.java68
-rw-r--r--tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Type.java32
-rw-r--r--vbench/src/vbench/http/benchmark_headers.h1
-rw-r--r--vdslib/src/vespa/vdslib/container/parameters.cpp1
-rw-r--r--vespa-documentgen-plugin/etc/complex/music3.sd2
-rw-r--r--vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/DeployMojo.java7
-rw-r--r--vespajlib/abi-spec.json3
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/IndexedDoubleTensor.java8
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/Tensor.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java166
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TensorType.java3
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TensorTypeParser.java5
-rw-r--r--vespajlib/src/main/java/com/yahoo/text/Lowercase.java91
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java51
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java2
-rw-r--r--vespajlib/src/test/java/com/yahoo/text/LowercaseTestCase.java34
-rw-r--r--vespalib/src/tests/stllike/asciistream_test.cpp74
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeinserter.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeinserter.h28
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeinserter.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeiterator.hpp32
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeremover.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeremover.h24
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeremover.hpp11
-rw-r--r--vespalib/src/vespa/vespalib/data/databuffer.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/data/slime/json_format.cpp6
-rw-r--r--vespalib/src/vespa/vespalib/locale/c.cpp18
-rw-r--r--vespalib/src/vespa/vespalib/locale/c.h4
-rw-r--r--vespalib/src/vespa/vespalib/stllike/asciistream.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/util/benchmark_timer.h1
-rw-r--r--vespalog/src/test/threads/testthreads.cpp1
-rw-r--r--vespalog/src/vespa/log/log_message.h1
463 files changed, 6939 insertions, 7830 deletions
diff --git a/application/src/main/java/com/yahoo/application/Application.java b/application/src/main/java/com/yahoo/application/Application.java
index f7b5174b0e5..fb812ba6107 100644
--- a/application/src/main/java/com/yahoo/application/Application.java
+++ b/application/src/main/java/com/yahoo/application/Application.java
@@ -46,7 +46,7 @@ public final class Application implements AutoCloseable {
/**
* This system property is set to "true" upon creation of an Application.
- * This is useful for components which are created by dependendcy injection which needs to modify
+ * This is useful for components which are created by dependency injection which needs to modify
* their behavior to function without reliance on any processes outside the JVM.
*/
public static final String vespaLocalProperty = "vespa.local";
diff --git a/application/src/main/java/com/yahoo/application/container/JDisc.java b/application/src/main/java/com/yahoo/application/container/JDisc.java
index 10ff84d3ede..ee22c58a56f 100644
--- a/application/src/main/java/com/yahoo/application/container/JDisc.java
+++ b/application/src/main/java/com/yahoo/application/container/JDisc.java
@@ -21,7 +21,6 @@ import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.test.TestDriver;
import com.yahoo.processing.handler.ProcessingHandler;
import com.yahoo.search.handler.SearchHandler;
-import com.yahoo.search.searchchain.ExecutionFactory;
import java.nio.file.Path;
diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/ConfigserverSslContextFactoryProvider.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/ConfigserverSslContextFactoryProvider.java
index bb3216ba3ba..2bda2eb3627 100644
--- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/ConfigserverSslContextFactoryProvider.java
+++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/ConfigserverSslContextFactoryProvider.java
@@ -37,8 +37,6 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
-import static com.yahoo.vespa.hosted.athenz.instanceproviderservice.impl.Utils.getZoneConfig;
-
/**
* Configures the JDisc https connector with the configserver's Athenz provider certificate and private key.
*
@@ -56,7 +54,7 @@ public class ConfigserverSslContextFactoryProvider extends AbstractComponent imp
Executors.newSingleThreadScheduledExecutor(runnable -> new Thread(runnable, "configserver-ssl-context-factory-provider"));
private final ZtsClient ztsClient;
private final KeyProvider keyProvider;
- private final AthenzProviderServiceConfig.Zones zoneConfig;
+ private final AthenzProviderServiceConfig athenzProviderServiceConfig;
private final AthenzService configserverIdentity;
@Inject
@@ -64,14 +62,14 @@ public class ConfigserverSslContextFactoryProvider extends AbstractComponent imp
KeyProvider keyProvider,
AthenzProviderServiceConfig config,
Zone zone) {
- this.zoneConfig = getZoneConfig(config, zone);
- this.ztsClient = new DefaultZtsClient(URI.create(zoneConfig.ztsUrl()), bootstrapIdentity);
+ this.athenzProviderServiceConfig = config;
+ this.ztsClient = new DefaultZtsClient(URI.create(athenzProviderServiceConfig.ztsUrl()), bootstrapIdentity);
this.keyProvider = keyProvider;
- this.configserverIdentity = new AthenzService(zoneConfig.domain(), zoneConfig.serviceName());
+ this.configserverIdentity = new AthenzService(athenzProviderServiceConfig.domain(), athenzProviderServiceConfig.serviceName());
Duration updatePeriod = Duration.ofDays(config.updatePeriodDays());
Path trustStoreFile = Paths.get(config.athenzCaTrustStore());
- this.sslContextFactory = initializeSslContextFactory(keyProvider, trustStoreFile, updatePeriod, configserverIdentity, ztsClient, zoneConfig);
+ this.sslContextFactory = initializeSslContextFactory(keyProvider, trustStoreFile, updatePeriod, configserverIdentity, ztsClient, athenzProviderServiceConfig);
scheduler.scheduleAtFixedRate(new KeystoreUpdater(sslContextFactory),
updatePeriod.toDays()/*initial delay*/,
updatePeriod.toDays(),
@@ -108,7 +106,7 @@ public class ConfigserverSslContextFactoryProvider extends AbstractComponent imp
Duration updatePeriod,
AthenzService configserverIdentity,
ZtsClient ztsClient,
- AthenzProviderServiceConfig.Zones zoneConfig) {
+ AthenzProviderServiceConfig zoneConfig) {
// TODO Use DefaultTlsContext to configure SslContextFactory (ensure that cipher/protocol configuration is same across all TLS endpoints)
@@ -150,7 +148,7 @@ public class ConfigserverSslContextFactoryProvider extends AbstractComponent imp
char[] keystorePwd,
KeyProvider keyProvider,
ZtsClient ztsClient,
- AthenzProviderServiceConfig.Zones zoneConfig) {
+ AthenzProviderServiceConfig zoneConfig) {
PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion());
PublicKey publicKey = KeyUtils.extractPublicKey(privateKey);
Identity serviceIdentity = ztsClient.getServiceIdentity(configserverIdentity,
@@ -184,7 +182,7 @@ public class ConfigserverSslContextFactoryProvider extends AbstractComponent imp
try {
log.log(LogLevel.INFO, "Updating configserver provider certificate from ZTS");
char[] keystorePwd = generateKeystorePassword();
- KeyStore keyStore = updateKeystore(configserverIdentity, keystorePwd, keyProvider, ztsClient, zoneConfig);
+ KeyStore keyStore = updateKeystore(configserverIdentity, keystorePwd, keyProvider, ztsClient, athenzProviderServiceConfig);
sslContextFactory.reload(scf -> {
scf.setKeyStore(keyStore);
scf.setKeyStorePassword(new String(keystorePwd));
diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java
index 8d3e37e1ebd..c328b8b6c21 100644
--- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java
+++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java
@@ -11,7 +11,6 @@ import com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId;
import com.yahoo.vespa.athenz.identityprovider.client.IdentityDocumentSigner;
import com.yahoo.vespa.hosted.athenz.instanceproviderservice.KeyProvider;
import com.yahoo.vespa.hosted.athenz.instanceproviderservice.config.AthenzProviderServiceConfig;
-import com.yahoo.vespa.hosted.athenz.instanceproviderservice.impl.Utils;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Allocation;
@@ -33,14 +32,14 @@ public class IdentityDocumentGenerator {
private final NodeRepository nodeRepository;
private final Zone zone;
private final KeyProvider keyProvider;
- private final AthenzProviderServiceConfig.Zones zoneConfig;
+ private final AthenzProviderServiceConfig athenzProviderServiceConfig;
@Inject
public IdentityDocumentGenerator(AthenzProviderServiceConfig config,
NodeRepository nodeRepository,
Zone zone,
KeyProvider keyProvider) {
- this.zoneConfig = Utils.getZoneConfig(config, zone);
+ this.athenzProviderServiceConfig = config;
this.nodeRepository = nodeRepository;
this.zone = zone;
this.keyProvider = keyProvider;
@@ -62,8 +61,8 @@ public class IdentityDocumentGenerator {
Set<String> ips = new HashSet<>(node.ipAddresses());
- PrivateKey privateKey = keyProvider.getPrivateKey(zoneConfig.secretVersion());
- AthenzService providerService = new AthenzService(zoneConfig.domain(), zoneConfig.serviceName());
+ PrivateKey privateKey = keyProvider.getPrivateKey(athenzProviderServiceConfig.secretVersion());
+ AthenzService providerService = new AthenzService(athenzProviderServiceConfig.domain(), athenzProviderServiceConfig.serviceName());
String configServerHostname = HostName.getLocalhost();
Instant createdAt = Instant.now();
diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/CkmsKeyProvider.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/CkmsKeyProvider.java
index 40003d4ccf3..bc044f12b15 100644
--- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/CkmsKeyProvider.java
+++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/CkmsKeyProvider.java
@@ -14,8 +14,6 @@ import java.security.PublicKey;
import java.util.HashMap;
import java.util.Map;
-import static com.yahoo.vespa.hosted.athenz.instanceproviderservice.impl.Utils.getZoneConfig;
-
/**
* @author mortent
* @author bjorncs
@@ -32,7 +30,7 @@ public class CkmsKeyProvider implements KeyProvider {
Zone zone,
AthenzProviderServiceConfig config) {
this.secretStore = secretStore;
- this.secretName = getZoneConfig(config, zone).secretName();
+ this.secretName = config.secretName();
this.secrets = new HashMap<>();
}
diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/Utils.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/Utils.java
index ad54aa341bf..f52493375f1 100644
--- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/Utils.java
+++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/impl/Utils.java
@@ -3,8 +3,6 @@ package com.yahoo.vespa.hosted.athenz.instanceproviderservice.impl;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.hosted.athenz.instanceproviderservice.config.AthenzProviderServiceConfig;
/**
* @author bjorncs
@@ -23,9 +21,4 @@ public class Utils {
return mapper;
}
- public static AthenzProviderServiceConfig.Zones getZoneConfig(AthenzProviderServiceConfig config, Zone zone) {
- String key = zone.environment().value() + "." + zone.region().value();
- return config.zones(key);
- }
-
}
diff --git a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/TestUtils.java b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/TestUtils.java
index 9271fa74363..4a97ea7b09c 100644
--- a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/TestUtils.java
+++ b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/TestUtils.java
@@ -1,8 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.athenz.instanceproviderservice;
-import com.google.common.collect.ImmutableMap;
-import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.athenz.instanceproviderservice.config.AthenzProviderServiceConfig;
/**
@@ -12,10 +10,9 @@ public class TestUtils {
public static AthenzProviderServiceConfig getAthenzProviderConfig(String domain,
String service,
- String dnsSuffix,
- Zone zone) {
- AthenzProviderServiceConfig.Zones.Builder zoneConfig =
- new AthenzProviderServiceConfig.Zones.Builder()
+ String dnsSuffix) {
+ AthenzProviderServiceConfig.Builder zoneConfig =
+ new AthenzProviderServiceConfig.Builder()
.serviceName(service)
.secretVersion(0)
.domain(domain)
@@ -23,9 +20,7 @@ public class TestUtils {
.ztsUrl("localhost/zts")
.secretName("s3cr3t");
return new AthenzProviderServiceConfig(
- new AthenzProviderServiceConfig.Builder()
- .zones(ImmutableMap.of(zone.environment().value() + "." + zone.region().value(), zoneConfig))
- .athenzCaTrustStore("/dummy/path/to/athenz-ca.jks"));
+ zoneConfig.athenzCaTrustStore("/dummy/path/to/athenz-ca.jks"));
}
}
diff --git a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java
index 0688981a1c7..f496b177bdd 100644
--- a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java
+++ b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java
@@ -78,7 +78,7 @@ public class IdentityDocumentGeneratorTest {
AutoGeneratedKeyProvider keyProvider = new AutoGeneratedKeyProvider();
String dnsSuffix = "vespa.dns.suffix";
- AthenzProviderServiceConfig config = getAthenzProviderConfig("domain", "service", dnsSuffix, ZONE);
+ AthenzProviderServiceConfig config = getAthenzProviderConfig("domain", "service", dnsSuffix);
IdentityDocumentGenerator identityDocumentGenerator =
new IdentityDocumentGenerator(config, nodeRepository, ZONE, keyProvider);
SignedIdentityDocument signedIdentityDocument = identityDocumentGenerator.generateSignedIdentityDocument(containerHostname, IdentityType.TENANT);
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index 937d511bb09..c19865fafc9 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -341,7 +341,7 @@ public class DeployState implements ConfigDefinitionStore {
public DeployState build(ValidationParameters validationParameters) {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
- QueryProfiles queryProfiles = new QueryProfilesBuilder().build(applicationPackage);
+ QueryProfiles queryProfiles = new QueryProfilesBuilder().build(applicationPackage, logger);
SemanticRules semanticRules = new SemanticRuleBuilder().build(applicationPackage);
SearchDocumentModel searchDocumentModel = createSearchDocumentModel(rankProfileRegistry, logger, queryProfiles, validationParameters);
return new DeployState(applicationPackage,
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/FeatureNames.java b/config-model/src/main/java/com/yahoo/searchdefinition/FeatureNames.java
index 2f41b172ab6..1e133d0b8f4 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/FeatureNames.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/FeatureNames.java
@@ -34,6 +34,12 @@ public class FeatureNames {
return name.equals("attribute") || name.equals("constant") || name.equals("query");
}
+ /** Returns true if this is a constant */
+ public static boolean isConstantFeature(Reference reference) {
+ if ( ! isSimpleFeature(reference)) return false;
+ return reference.name().equals("constant");
+ }
+
/**
* Returns the single argument of the given feature name, without any quotes,
* or empty if it is not a valid query, attribute or constant feature name
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
index b3853b36aa5..d738929f721 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankProfile.java
@@ -756,7 +756,7 @@ public class RankProfile implements Serializable, Cloneable {
for (FieldDescription field : queryProfileType.declaredFields().values()) {
TensorType type = field.getType().asTensorType();
Optional<Reference> feature = Reference.simple(field.getName());
- if ( ! feature.isPresent() || ! feature.get().name().equals("query")) continue;
+ if ( feature.isEmpty() || ! feature.get().name().equals("query")) continue;
TensorType existingType = context.getType(feature.get());
if ( ! Objects.equals(existingType, context.defaultTypeOf(feature.get())))
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java
index bd4daa58253..6481d42446f 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/SearchBuilder.java
@@ -8,7 +8,9 @@ import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.document.DocumentTypeManager;
import com.yahoo.io.IOUtils;
import com.yahoo.io.reader.NamedReader;
+import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
+import com.yahoo.search.query.profile.config.QueryProfileXMLReader;
import com.yahoo.searchdefinition.derived.SearchOrderer;
import com.yahoo.searchdefinition.document.SDDocumentType;
import com.yahoo.searchdefinition.parser.ParseException;
@@ -246,7 +248,7 @@ public class SearchBuilder {
DocumentModelBuilder builder = new DocumentModelBuilder(model);
for (Search search : new SearchOrderer().order(searchList)) {
new FieldOperationApplierForSearch().process(search); // TODO: Why is this not in the regular list?
- process(search, deployLogger, new QueryProfiles(queryProfileRegistry), validate);
+ process(search, deployLogger, new QueryProfiles(queryProfileRegistry, deployLogger), validate);
built.add(search);
}
builder.addToModel(searchList);
@@ -394,7 +396,11 @@ public class SearchBuilder {
}
public static SearchBuilder createFromDirectory(String dir) throws IOException, ParseException {
- return createFromDirectory(dir, new RankProfileRegistry(), new QueryProfileRegistry());
+ return createFromDirectory(dir, new RankProfileRegistry());
+ }
+ public static SearchBuilder createFromDirectory(String dir,
+ RankProfileRegistry rankProfileRegistry) throws IOException, ParseException {
+ return createFromDirectory(dir, rankProfileRegistry, createQueryProfileRegistryFromDirectory(dir));
}
public static SearchBuilder createFromDirectory(String dir,
RankProfileRegistry rankProfileRegistry,
@@ -409,6 +415,12 @@ public class SearchBuilder {
return builder;
}
+ private static QueryProfileRegistry createQueryProfileRegistryFromDirectory(String dir) {
+ File queryProfilesDir = new File(dir, "query-profiles");
+ if ( ! queryProfilesDir.exists()) return new QueryProfileRegistry();
+ return new QueryProfileXMLReader().read(queryProfilesDir.toString());
+ }
+
// TODO: The build methods below just call the create methods above - remove
/**
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
index caf5f0442eb..6991e2b978b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ConstantTensorTransformer.java
@@ -49,13 +49,13 @@ public class ConstantTensorTransformer extends ExpressionTransformer<RankProfile
}
private ExpressionNode transformConstantReference(ReferenceNode node, RankProfileTransformContext context) {
- Reference constantReference = FeatureNames.asConstantFeature(node.getName());
+ Reference constantReference = node.reference();
+ if ( ! FeatureNames.isConstantFeature(constantReference) && constantReference.isIdentifier())
+ constantReference = FeatureNames.asConstantFeature(node.getName());
+
Value value = context.constants().get(node.getName());
- if (value == null || value.type().rank() == 0) {
- if (context.rankProfile().rankingConstants().get(node.getName()) != null) // Large constants: Transform reference but don't add value
- return new ReferenceNode(constantReference);
- return node;
- }
+ if (value == null || value.type().rank() == 0) return node;
+
TensorValue tensorValue = (TensorValue)value;
String tensorType = tensorValue.asTensor().type().toString();
context.rankProperties().put(constantReference.toString() + ".value", tensorValue.toString());
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ExpressionTransforms.java b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ExpressionTransforms.java
index cbabfffb7a1..6fdf448a39b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ExpressionTransforms.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/ExpressionTransforms.java
@@ -6,6 +6,7 @@ import com.yahoo.searchlib.rankingexpression.RankingExpression;
import com.yahoo.searchlib.rankingexpression.transform.ConstantDereferencer;
import com.yahoo.searchlib.rankingexpression.transform.ExpressionTransformer;
import com.yahoo.searchlib.rankingexpression.transform.Simplifier;
+import com.yahoo.searchlib.rankingexpression.transform.TensorMaxMinTransformer;
import java.util.List;
@@ -30,7 +31,7 @@ public class ExpressionTransforms {
new ConstantTensorTransformer(),
new FunctionInliner(),
new FunctionShadower(),
- new TensorTransformer(),
+ new TensorMaxMinTransformer(),
new Simplifier());
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/RankProfileTransformContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/RankProfileTransformContext.java
index 2c0e1eaa56a..630c8644eb1 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/RankProfileTransformContext.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/RankProfileTransformContext.java
@@ -24,19 +24,17 @@ public class RankProfileTransformContext extends TransformContext {
private final ImportedMlModels importedModels;
private final Map<String, RankProfile.RankingExpressionFunction> inlineFunctions;
private final Map<String, String> rankProperties = new HashMap<>();
- private final MapEvaluationTypeContext types;
public RankProfileTransformContext(RankProfile rankProfile,
QueryProfileRegistry queryProfiles,
ImportedMlModels importedModels,
Map<String, Value> constants,
Map<String, RankProfile.RankingExpressionFunction> inlineFunctions) {
- super(constants);
+ super(constants, rankProfile.typeContext(queryProfiles));
this.rankProfile = rankProfile;
this.queryProfiles = queryProfiles;
this.importedModels = importedModels;
this.inlineFunctions = inlineFunctions;
- this.types = rankProfile.typeContext(queryProfiles);
}
public RankProfile rankProfile() { return rankProfile; }
@@ -45,10 +43,4 @@ public class RankProfileTransformContext extends TransformContext {
public Map<String, RankProfile.RankingExpressionFunction> inlineFunctions() { return inlineFunctions; }
public Map<String, String> rankProperties() { return rankProperties; }
- /**
- * Returns the types known in this context. We may have type information for references
- * for which no value is available
- */
- public MapEvaluationTypeContext types() { return types; }
-
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/AddExtraFieldsToDocument.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/AddExtraFieldsToDocument.java
index 233075155b5..e75547a5bb2 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/AddExtraFieldsToDocument.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/AddExtraFieldsToDocument.java
@@ -21,10 +21,15 @@ import com.yahoo.vespa.model.container.search.QueryProfiles;
*/
public class AddExtraFieldsToDocument extends Processor {
- public AddExtraFieldsToDocument(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
+ AddExtraFieldsToDocument(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
super(search, deployLogger, rankProfileRegistry, queryProfiles);
}
+ //TODO This is a tempoarry hack to avoid producing illegal code for fields not wanted anyway.
+ private boolean dirtyLegalFieldNameCheck(String fieldName) {
+ return ! fieldName.contains(".") && !"rankfeatures".equals(fieldName) && !"summaryfeatures".equals(fieldName);
+ }
+
@Override
public void process(boolean validate, boolean documentsOnly) {
SDDocumentType document = search.getDocument();
@@ -32,8 +37,11 @@ public class AddExtraFieldsToDocument extends Processor {
for (Field field : search.extraFieldList()) {
addSdField(search, document, (SDField)field, validate);
}
+ //TODO Vespa 8 or sooner we should avoid the dirty addition of fields from dirty 'default' summary to document at all
for (SummaryField field : search.getSummary("default").getSummaryFields()) {
- addSummaryField(search, document, field, validate);
+ if (dirtyLegalFieldNameCheck(field.getName())) {
+ addSummaryField(search, document, field, validate);
+ }
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
index 44c10b0738b..03c8055dd12 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
@@ -221,7 +221,9 @@ public class Admin extends AbstractConfigProducer implements Serializable {
metricsProxyCluster = new MetricsProxyContainerCluster(this, "metrics", deployState);
int index = 0;
for (var host : hosts) {
- var container = new MetricsProxyContainer(metricsProxyCluster, index++, deployState.isHosted());
+ // Send hostname to be used in configId (instead of index), as the sorting of hosts seems to be unstable
+ // between config changes, even when the set of hosts is unchanged.
+ var container = new MetricsProxyContainer(metricsProxyCluster, host.getHostname(), index, deployState.isHosted());
addAndInitializeService(deployState.getDeployLogger(), host, container);
metricsProxyCluster.addContainer(container);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
index e683b70bbde..3bc38cad1d1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
@@ -45,8 +45,8 @@ public class MetricsProxyContainer extends Container implements
private final boolean isHostedVespa;
- public MetricsProxyContainer(AbstractConfigProducer parent, int index, boolean isHostedVespa) {
- super(parent, "metricsproxy." + index, index);
+ public MetricsProxyContainer(AbstractConfigProducer parent, String hostname, int index, boolean isHostedVespa) {
+ super(parent, hostname, index);
this.isHostedVespa = isHostedVespa;
setProp("clustertype", "admin");
setProp("index", String.valueOf(index));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
index fcc8cc8fa41..aa793b3c6a2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
@@ -5,7 +5,6 @@ import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.ConfigModelContext;
import com.yahoo.config.model.api.ConfigServerSpec;
import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.model.HostResource;
@@ -22,7 +21,6 @@ import org.w3c.dom.Element;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
@@ -33,8 +31,6 @@ import java.util.stream.Collectors;
*/
public class DomAdminV4Builder extends DomAdminBuilderBase {
- private ApplicationId ZONE_APPLICATION_ID = ApplicationId.from("hosted-vespa", "routing", "default");
-
private final Collection<ContainerModel> containerModels;
private final ConfigModelContext context;
@@ -134,34 +130,17 @@ public class DomAdminV4Builder extends DomAdminBuilderBase {
* @param minHostsPerContainerCluster the desired number of hosts per cluster
*/
private List<HostResource> pickContainerHostsForSlobrok(int count, int minHostsPerContainerCluster) {
- Collection<ContainerModel> containerModelsWithSlobrok = containerModels.stream()
- .filter(this::shouldHaveSlobrok)
- .collect(Collectors.toList());
int hostsPerCluster = (int) Math.max(minHostsPerContainerCluster,
- Math.ceil((double) count / containerModelsWithSlobrok.size()));
+ Math.ceil((double) count / containerModels.size()));
// Pick from all container clusters to make sure we don't lose all nodes at once if some clusters are removed.
// This will overshoot the desired size (due to ceil and picking at least one node per cluster).
List<HostResource> picked = new ArrayList<>();
- for (ContainerModel containerModel : containerModelsWithSlobrok)
+ for (ContainerModel containerModel : containerModels)
picked.addAll(pickContainerHostsFrom(containerModel, hostsPerCluster));
return picked;
}
- private boolean shouldHaveSlobrok(ContainerModel containerModel) {
- // Avoid Slobroks on node-admin container cluster, as node-admin is migrating
- // TODO: Remove after removing tenant hosts from zone-app
-
- ApplicationId applicationId = context.getDeployState().getProperties().applicationId();
- if (!applicationId.equals(ZONE_APPLICATION_ID)) {
- return true;
- }
-
- // aka clustername, aka application-model's ClusterId
- String clustername = containerModel.getCluster().getName();
- return !Objects.equals(clustername, "node-admin");
- }
-
private List<HostResource> pickContainerHostsFrom(ContainerModel model, int count) {
boolean retired = true;
List<HostResource> picked = sortedContainerHostsFrom(model, count, !retired);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
index 47adac637ee..a8f0f5941b0 100755
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
@@ -37,6 +37,7 @@ import com.yahoo.search.config.QrStartConfig;
import com.yahoo.search.pagetemplates.PageTemplatesConfig;
import com.yahoo.search.query.profile.config.QueryProfilesConfig;
import com.yahoo.vespa.configdefinition.IlscriptsConfig;
+import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.model.PortsMeta;
import com.yahoo.vespa.model.Service;
import com.yahoo.vespa.model.admin.monitoring.Monitoring;
@@ -63,6 +64,7 @@ import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -190,6 +192,7 @@ public abstract class ContainerCluster<CONTAINER extends Container>
addSimpleComponent("com.yahoo.container.handler.VipStatus");
addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName());
addJaxProviders();
+ addTestrunnerComponentsIfTester(deployState);
}
public void setZone(Zone zone) {
@@ -204,6 +207,11 @@ public abstract class ContainerCluster<CONTAINER extends Container>
addVipHandler();
}
+ private void addTestrunnerComponentsIfTester(DeployState deployState) {
+ if (deployState.isHosted() && deployState.getProperties().applicationId().instance().isTester())
+ addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar")));
+ }
+
public final void addDefaultHandlersExceptStatus() {
addDefaultRootHandler();
addMetricStateHandler();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
index 526b2abe1e1..c7114178ad6 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfiles.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.search;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.search.query.profile.BackedOverridableQueryProfile;
import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
@@ -12,6 +13,8 @@ import com.yahoo.search.query.profile.config.QueryProfilesConfig;
import java.io.Serializable;
import java.util.*;
import java.util.Map.Entry;
+import java.util.logging.Level;
+import java.util.stream.Collectors;
/**
* Owns the query profiles and query profile types to be handed to the qrs nodes.
@@ -29,8 +32,9 @@ public class QueryProfiles implements Serializable, QueryProfilesConfig.Producer
* @param registry the registry containing the query profiles and types of this.
* The given registry cannot be frozen on calling this.
*/
- public QueryProfiles(QueryProfileRegistry registry) {
+ public QueryProfiles(QueryProfileRegistry registry, DeployLogger logger) {
this.registry = registry;
+ validate(registry, logger);
}
public QueryProfiles() {
@@ -41,6 +45,28 @@ public class QueryProfiles implements Serializable, QueryProfilesConfig.Producer
return registry;
}
+ /** Emits warnings/hints on some common configuration errors */
+ private void validate(QueryProfileRegistry registry, DeployLogger logger) {
+ Set<String> tensorFields = new HashSet<>();
+ for (QueryProfileType type : registry.getTypeRegistry().allComponents()) {
+ for (var fieldEntry : type.fields().entrySet()) {
+ if (fieldEntry.getValue().getType().asTensorType().rank() > 0)
+ tensorFields.add(fieldEntry.getKey());
+ }
+ }
+
+ if ( registry.getTypeRegistry().hasApplicationTypes() && registry.allComponents().isEmpty()) {
+ logger.log(Level.WARNING, "This application define query profile types, but has " +
+ "no query profiles referencing them so they have no effect. " +
+ (tensorFields.isEmpty()
+ ? ""
+ : "In particular, the tensors (" + String.join(", ", tensorFields) +
+ ") will be interpreted as strings, not tensors if sent in requests. ") +
+ "See https://docs.vespa.ai/documentation/query-profiles.html");
+ }
+
+ }
+
@Override
public void getConfig(QueryProfilesConfig.Builder builder) {
for (QueryProfile profile : registry.allComponents()) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfilesBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfilesBuilder.java
index b85cb88bf2e..b832c1bbdcd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfilesBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/search/QueryProfilesBuilder.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.search;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.io.reader.NamedReader;
import com.yahoo.search.query.profile.config.QueryProfileXMLReader;
import com.yahoo.config.application.api.ApplicationPackage;
@@ -17,13 +18,14 @@ import java.util.List;
public class QueryProfilesBuilder {
/** Build the set of query profiles for an application package */
- public QueryProfiles build(ApplicationPackage applicationPackage) {
+ public QueryProfiles build(ApplicationPackage applicationPackage, DeployLogger logger) {
List<NamedReader> queryProfileTypeFiles = null;
List<NamedReader> queryProfileFiles = null;
try {
queryProfileTypeFiles = applicationPackage.getQueryProfileTypeFiles();
queryProfileFiles = applicationPackage.getQueryProfileFiles();
- return new QueryProfiles(new QueryProfileXMLReader().read(queryProfileTypeFiles, queryProfileFiles));
+ return new QueryProfiles(new QueryProfileXMLReader().read(queryProfileTypeFiles, queryProfileFiles),
+ logger);
}
finally {
NamedReader.closeAll(queryProfileTypeFiles);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index b7c96d63755..642f882f3ed 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -223,13 +223,6 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
});
}
- private boolean zoneHasActiveRotation(Zone zone) {
- return app.getDeployment()
- .map(DeploymentSpec::fromXml)
- .map(spec -> zoneHasActiveRotation(zone, spec))
- .orElse(true);
- }
-
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
diff --git a/config-model/src/test/configmodel/types/documentmanager.cfg b/config-model/src/test/configmodel/types/documentmanager.cfg
index a6a56bf65c1..631886181d7 100644
--- a/config-model/src/test/configmodel/types/documentmanager.cfg
+++ b/config-model/src/test/configmodel/types/documentmanager.cfg
@@ -214,12 +214,6 @@ datatype[26].structtype[0].field[26].detailedtype ""
datatype[26].structtype[0].field[27].name "other"
datatype[26].structtype[0].field[27].datatype 4
datatype[26].structtype[0].field[27].detailedtype ""
-datatype[26].structtype[0].field[28].name "rankfeatures"
-datatype[26].structtype[0].field[28].datatype 2
-datatype[26].structtype[0].field[28].detailedtype ""
-datatype[26].structtype[0].field[29].name "summaryfeatures"
-datatype[26].structtype[0].field[29].datatype 2
-datatype[26].structtype[0].field[29].detailedtype ""
datatype[27].id 348447225
datatype[27].structtype[0].name "types.body"
datatype[27].structtype[0].version 0
diff --git a/config-model/src/test/configmodel/types/documenttypes.cfg b/config-model/src/test/configmodel/types/documenttypes.cfg
index 7ff795bc33c..0a78e18dd40 100644
--- a/config-model/src/test/configmodel/types/documenttypes.cfg
+++ b/config-model/src/test/configmodel/types/documenttypes.cfg
@@ -585,16 +585,6 @@ documenttype[0].datatype[25].sstruct.field[27].id 2443357
documenttype[0].datatype[25].sstruct.field[27].id_v6 903806222
documenttype[0].datatype[25].sstruct.field[27].datatype 4
documenttype[0].datatype[25].sstruct.field[27].detailedtype ""
-documenttype[0].datatype[25].sstruct.field[28].name "rankfeatures"
-documenttype[0].datatype[25].sstruct.field[28].id 1883197392
-documenttype[0].datatype[25].sstruct.field[28].id_v6 699950698
-documenttype[0].datatype[25].sstruct.field[28].datatype 2
-documenttype[0].datatype[25].sstruct.field[28].detailedtype ""
-documenttype[0].datatype[25].sstruct.field[29].name "summaryfeatures"
-documenttype[0].datatype[25].sstruct.field[29].id 1840337115
-documenttype[0].datatype[25].sstruct.field[29].id_v6 1981648971
-documenttype[0].datatype[25].sstruct.field[29].datatype 2
-documenttype[0].datatype[25].sstruct.field[29].detailedtype ""
documenttype[0].datatype[26].id 348447225
documenttype[0].datatype[26].type STRUCT
documenttype[0].datatype[26].array.element.id 0
diff --git a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
index 3073dd55fba..a24fa03a834 100644
--- a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
+++ b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
@@ -20,16 +20,6 @@ documenttype[0].datatype[0].sstruct.compression.type NONE
documenttype[0].datatype[0].sstruct.compression.level 0
documenttype[0].datatype[0].sstruct.compression.threshold 95
documenttype[0].datatype[0].sstruct.compression.minsize 200
-documenttype[0].datatype[0].sstruct.field[0].name "rankfeatures"
-documenttype[0].datatype[0].sstruct.field[0].id 1883197392
-documenttype[0].datatype[0].sstruct.field[0].id_v6 699950698
-documenttype[0].datatype[0].sstruct.field[0].datatype 2
-documenttype[0].datatype[0].sstruct.field[0].detailedtype ""
-documenttype[0].datatype[0].sstruct.field[1].name "summaryfeatures"
-documenttype[0].datatype[0].sstruct.field[1].id 1840337115
-documenttype[0].datatype[0].sstruct.field[1].id_v6 1981648971
-documenttype[0].datatype[0].sstruct.field[1].datatype 2
-documenttype[0].datatype[0].sstruct.field[1].detailedtype ""
documenttype[0].datatype[1].id 549879017
documenttype[0].datatype[1].type STRUCT
documenttype[0].datatype[1].array.element.id 0
@@ -86,16 +76,6 @@ documenttype[1].datatype[1].sstruct.field[0].id 819293364
documenttype[1].datatype[1].sstruct.field[0].id_v6 1634907905
documenttype[1].datatype[1].sstruct.field[0].datatype -1368624373
documenttype[1].datatype[1].sstruct.field[0].detailedtype ""
-documenttype[1].datatype[1].sstruct.field[1].name "rankfeatures"
-documenttype[1].datatype[1].sstruct.field[1].id 1883197392
-documenttype[1].datatype[1].sstruct.field[1].id_v6 699950698
-documenttype[1].datatype[1].sstruct.field[1].datatype 2
-documenttype[1].datatype[1].sstruct.field[1].detailedtype ""
-documenttype[1].datatype[1].sstruct.field[2].name "summaryfeatures"
-documenttype[1].datatype[1].sstruct.field[2].id 1840337115
-documenttype[1].datatype[1].sstruct.field[2].id_v6 1981648971
-documenttype[1].datatype[1].sstruct.field[2].datatype 2
-documenttype[1].datatype[1].sstruct.field[2].detailedtype ""
documenttype[1].datatype[2].id 348447225
documenttype[1].datatype[2].type STRUCT
documenttype[1].datatype[2].array.element.id 0
@@ -111,4 +91,4 @@ documenttype[1].datatype[2].sstruct.compression.type NONE
documenttype[1].datatype[2].sstruct.compression.level 0
documenttype[1].datatype[2].sstruct.compression.threshold 95
documenttype[1].datatype[2].sstruct.compression.minsize 200
-documenttype[1].fieldsets{[document]}.fields[0] "doc_field" \ No newline at end of file
+documenttype[1].fieldsets{[document]}.fields[0] "doc_field"
diff --git a/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg b/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg
index 9bfba178915..e624ffdf7f5 100644
--- a/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg
+++ b/config-model/src/test/configmodel/types/references/documentmanager_ref_to_self_type.cfg
@@ -24,12 +24,6 @@ datatype[].structtype[].compressminsize 800
datatype[].structtype[].field[].name "self_ref"
datatype[].structtype[].field[].datatype -1895788438
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -255288561
datatype[].structtype[].name "ad.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg
index 81498f03139..90ddb8c9f8d 100644
--- a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg
+++ b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_other_types.cfg
@@ -29,12 +29,6 @@ datatype[3].structtype[0].field[0].detailedtype ""
datatype[3].structtype[0].field[1].name "person_ref"
datatype[3].structtype[0].field[1].datatype 542332920
datatype[3].structtype[0].field[1].detailedtype ""
-datatype[3].structtype[0].field[2].name "rankfeatures"
-datatype[3].structtype[0].field[2].datatype 2
-datatype[3].structtype[0].field[2].detailedtype ""
-datatype[3].structtype[0].field[3].name "summaryfeatures"
-datatype[3].structtype[0].field[3].datatype 2
-datatype[3].structtype[0].field[3].detailedtype ""
datatype[4].id -255288561
datatype[4].structtype[0].name "ad.body"
datatype[4].structtype[0].version 0
@@ -58,12 +52,6 @@ datatype[6].structtype[0].compresstype NONE
datatype[6].structtype[0].compresslevel 0
datatype[6].structtype[0].compressthreshold 95
datatype[6].structtype[0].compressminsize 800
-datatype[6].structtype[0].field[0].name "rankfeatures"
-datatype[6].structtype[0].field[0].datatype 2
-datatype[6].structtype[0].field[0].detailedtype ""
-datatype[6].structtype[0].field[1].name "summaryfeatures"
-datatype[6].structtype[0].field[1].datatype 2
-datatype[6].structtype[0].field[1].detailedtype ""
datatype[7].id 1448849794
datatype[7].structtype[0].name "campaign.body"
datatype[7].structtype[0].version 0
@@ -85,12 +73,6 @@ datatype[9].structtype[0].compresstype NONE
datatype[9].structtype[0].compresslevel 0
datatype[9].structtype[0].compressthreshold 95
datatype[9].structtype[0].compressminsize 800
-datatype[9].structtype[0].field[0].name "rankfeatures"
-datatype[9].structtype[0].field[0].datatype 2
-datatype[9].structtype[0].field[0].detailedtype ""
-datatype[9].structtype[0].field[1].name "summaryfeatures"
-datatype[9].structtype[0].field[1].datatype 2
-datatype[9].structtype[0].field[1].detailedtype ""
datatype[10].id -2003767395
datatype[10].structtype[0].name "person.body"
datatype[10].structtype[0].version 0
diff --git a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg
index 181da48eac4..1807adeb68d 100644
--- a/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg
+++ b/config-model/src/test/configmodel/types/references/documentmanager_refs_to_same_type.cfg
@@ -27,12 +27,6 @@ datatype[2].structtype[0].field[0].detailedtype ""
datatype[2].structtype[0].field[1].name "other_campaign_ref"
datatype[2].structtype[0].field[1].datatype 595216861
datatype[2].structtype[0].field[1].detailedtype ""
-datatype[2].structtype[0].field[2].name "rankfeatures"
-datatype[2].structtype[0].field[2].datatype 2
-datatype[2].structtype[0].field[2].detailedtype ""
-datatype[2].structtype[0].field[3].name "summaryfeatures"
-datatype[2].structtype[0].field[3].datatype 2
-datatype[2].structtype[0].field[3].detailedtype ""
datatype[3].id -255288561
datatype[3].structtype[0].name "ad.body"
datatype[3].structtype[0].version 0
@@ -56,12 +50,6 @@ datatype[5].structtype[0].compresstype NONE
datatype[5].structtype[0].compresslevel 0
datatype[5].structtype[0].compressthreshold 95
datatype[5].structtype[0].compressminsize 800
-datatype[5].structtype[0].field[0].name "rankfeatures"
-datatype[5].structtype[0].field[0].datatype 2
-datatype[5].structtype[0].field[0].detailedtype ""
-datatype[5].structtype[0].field[1].name "summaryfeatures"
-datatype[5].structtype[0].field[1].datatype 2
-datatype[5].structtype[0].field[1].detailedtype ""
datatype[6].id 1448849794
datatype[6].structtype[0].name "campaign.body"
datatype[6].structtype[0].version 0
diff --git a/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg b/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg
index ca4553f989c..d07b16431be 100644
--- a/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg
+++ b/config-model/src/test/configmodel/types/references/documenttypes_ref_to_self_type.cfg
@@ -25,16 +25,6 @@ documenttype[].datatype[].sstruct.field[].id 852207313
documenttype[].datatype[].sstruct.field[].id_v6 768042879
documenttype[].datatype[].sstruct.field[].datatype -1895788438
documenttype[].datatype[].sstruct.field[].detailedtype ""
-documenttype[].datatype[].sstruct.field[].name "rankfeatures"
-documenttype[].datatype[].sstruct.field[].id 1883197392
-documenttype[].datatype[].sstruct.field[].id_v6 699950698
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
-documenttype[].datatype[].sstruct.field[].name "summaryfeatures"
-documenttype[].datatype[].sstruct.field[].id 1840337115
-documenttype[].datatype[].sstruct.field[].id_v6 1981648971
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
documenttype[].datatype[].id -255288561
documenttype[].datatype[].type STRUCT
documenttype[].datatype[].array.element.id 0
diff --git a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg
index 3492cd2632b..e3002074d04 100644
--- a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg
+++ b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_other_types.cfg
@@ -30,16 +30,6 @@ documenttype[0].datatype[0].sstruct.field[1].id 100779805
documenttype[0].datatype[0].sstruct.field[1].id_v6 907695193
documenttype[0].datatype[0].sstruct.field[1].datatype 542332920
documenttype[0].datatype[0].sstruct.field[1].detailedtype ""
-documenttype[0].datatype[0].sstruct.field[2].name "rankfeatures"
-documenttype[0].datatype[0].sstruct.field[2].id 1883197392
-documenttype[0].datatype[0].sstruct.field[2].id_v6 699950698
-documenttype[0].datatype[0].sstruct.field[2].datatype 2
-documenttype[0].datatype[0].sstruct.field[2].detailedtype ""
-documenttype[0].datatype[0].sstruct.field[3].name "summaryfeatures"
-documenttype[0].datatype[0].sstruct.field[3].id 1840337115
-documenttype[0].datatype[0].sstruct.field[3].id_v6 1981648971
-documenttype[0].datatype[0].sstruct.field[3].datatype 2
-documenttype[0].datatype[0].sstruct.field[3].detailedtype ""
documenttype[0].datatype[1].id -255288561
documenttype[0].datatype[1].type STRUCT
documenttype[0].datatype[1].array.element.id 0
@@ -82,16 +72,6 @@ documenttype[1].datatype[0].sstruct.compression.type NONE
documenttype[1].datatype[0].sstruct.compression.level 0
documenttype[1].datatype[0].sstruct.compression.threshold 95
documenttype[1].datatype[0].sstruct.compression.minsize 200
-documenttype[1].datatype[0].sstruct.field[0].name "rankfeatures"
-documenttype[1].datatype[0].sstruct.field[0].id 1883197392
-documenttype[1].datatype[0].sstruct.field[0].id_v6 699950698
-documenttype[1].datatype[0].sstruct.field[0].datatype 2
-documenttype[1].datatype[0].sstruct.field[0].detailedtype ""
-documenttype[1].datatype[0].sstruct.field[1].name "summaryfeatures"
-documenttype[1].datatype[0].sstruct.field[1].id 1840337115
-documenttype[1].datatype[0].sstruct.field[1].id_v6 1981648971
-documenttype[1].datatype[0].sstruct.field[1].datatype 2
-documenttype[1].datatype[0].sstruct.field[1].detailedtype ""
documenttype[1].datatype[1].id 1448849794
documenttype[1].datatype[1].type STRUCT
documenttype[1].datatype[1].array.element.id 0
@@ -128,16 +108,6 @@ documenttype[2].datatype[0].sstruct.compression.type NONE
documenttype[2].datatype[0].sstruct.compression.level 0
documenttype[2].datatype[0].sstruct.compression.threshold 95
documenttype[2].datatype[0].sstruct.compression.minsize 200
-documenttype[2].datatype[0].sstruct.field[0].name "rankfeatures"
-documenttype[2].datatype[0].sstruct.field[0].id 1883197392
-documenttype[2].datatype[0].sstruct.field[0].id_v6 699950698
-documenttype[2].datatype[0].sstruct.field[0].datatype 2
-documenttype[2].datatype[0].sstruct.field[0].detailedtype ""
-documenttype[2].datatype[0].sstruct.field[1].name "summaryfeatures"
-documenttype[2].datatype[0].sstruct.field[1].id 1840337115
-documenttype[2].datatype[0].sstruct.field[1].id_v6 1981648971
-documenttype[2].datatype[0].sstruct.field[1].datatype 2
-documenttype[2].datatype[0].sstruct.field[1].detailedtype ""
documenttype[2].datatype[1].id -2003767395
documenttype[2].datatype[1].type STRUCT
documenttype[2].datatype[1].array.element.id 0
diff --git a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg
index 40743213d9e..d2759d86715 100644
--- a/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg
+++ b/config-model/src/test/configmodel/types/references/documenttypes_refs_to_same_type.cfg
@@ -30,16 +30,6 @@ documenttype[0].datatype[0].sstruct.field[1].id 874751172
documenttype[0].datatype[0].sstruct.field[1].id_v6 895644372
documenttype[0].datatype[0].sstruct.field[1].datatype 595216861
documenttype[0].datatype[0].sstruct.field[1].detailedtype ""
-documenttype[0].datatype[0].sstruct.field[2].name "rankfeatures"
-documenttype[0].datatype[0].sstruct.field[2].id 1883197392
-documenttype[0].datatype[0].sstruct.field[2].id_v6 699950698
-documenttype[0].datatype[0].sstruct.field[2].datatype 2
-documenttype[0].datatype[0].sstruct.field[2].detailedtype ""
-documenttype[0].datatype[0].sstruct.field[3].name "summaryfeatures"
-documenttype[0].datatype[0].sstruct.field[3].id 1840337115
-documenttype[0].datatype[0].sstruct.field[3].id_v6 1981648971
-documenttype[0].datatype[0].sstruct.field[3].datatype 2
-documenttype[0].datatype[0].sstruct.field[3].detailedtype ""
documenttype[0].datatype[1].id -255288561
documenttype[0].datatype[1].type STRUCT
documenttype[0].datatype[1].array.element.id 0
@@ -80,16 +70,6 @@ documenttype[1].datatype[0].sstruct.compression.type NONE
documenttype[1].datatype[0].sstruct.compression.level 0
documenttype[1].datatype[0].sstruct.compression.threshold 95
documenttype[1].datatype[0].sstruct.compression.minsize 200
-documenttype[1].datatype[0].sstruct.field[0].name "rankfeatures"
-documenttype[1].datatype[0].sstruct.field[0].id 1883197392
-documenttype[1].datatype[0].sstruct.field[0].id_v6 699950698
-documenttype[1].datatype[0].sstruct.field[0].datatype 2
-documenttype[1].datatype[0].sstruct.field[0].detailedtype ""
-documenttype[1].datatype[0].sstruct.field[1].name "summaryfeatures"
-documenttype[1].datatype[0].sstruct.field[1].id 1840337115
-documenttype[1].datatype[0].sstruct.field[1].id_v6 1981648971
-documenttype[1].datatype[0].sstruct.field[1].datatype 2
-documenttype[1].datatype[0].sstruct.field[1].detailedtype ""
documenttype[1].datatype[1].id 1448849794
documenttype[1].datatype[1].type STRUCT
documenttype[1].datatype[1].array.element.id 0
diff --git a/config-model/src/test/derived/advanced/documentmanager.cfg b/config-model/src/test/derived/advanced/documentmanager.cfg
index ee425d0d719..a0a59fbf7ac 100644
--- a/config-model/src/test/derived/advanced/documentmanager.cfg
+++ b/config-model/src/test/derived/advanced/documentmanager.cfg
@@ -12,8 +12,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "y"
datatype[].structtype[].field[].datatype 0
datatype[].structtype[].field[].detailedtype ""
-datatype[].id -1486737430
-datatype[].arraytype[].datatype 2
datatype[].id -1337915045
datatype[].structtype[].name "advanced.header"
datatype[].structtype[].version 0
@@ -66,21 +64,9 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "title_s"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "location.position"
-datatype[].structtype[].field[].datatype -1486737430
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "location.distance"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "mysummary"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -704605648
datatype[].structtype[].name "advanced.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/advanced/index-info.cfg b/config-model/src/test/derived/advanced/index-info.cfg
index 07b58814ea3..162a677be67 100644
--- a/config-model/src/test/derived/advanced/index-info.cfg
+++ b/config-model/src/test/derived/advanced/index-info.cfg
@@ -37,14 +37,6 @@ indexinfo[].command[].indexname "location"
indexinfo[].command[].command "default-position"
indexinfo[].command[].indexname "location"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "location.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "location.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "location.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "location.position"
-indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "location_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "location_zcurve"
@@ -79,10 +71,6 @@ indexinfo[].command[].indexname "product3"
indexinfo[].command[].command "stem:BEST"
indexinfo[].command[].indexname "product3"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "title"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "title"
diff --git a/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg b/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg
index 4d81b2e56ba..fae6bd46ad7 100644
--- a/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsimplicitstruct/documentmanager.cfg
@@ -29,12 +29,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1503592268
datatype[].structtype[].name "annotationsimplicitstruct.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationsimplicitstruct/index-info.cfg b/config-model/src/test/derived/annotationsimplicitstruct/index-info.cfg
index 90b9f1bd32c..f1e8326a3a6 100644
--- a/config-model/src/test/derived/annotationsimplicitstruct/index-info.cfg
+++ b/config-model/src/test/derived/annotationsimplicitstruct/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg b/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg
index 5dd3977e55e..21baed26dbf 100644
--- a/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsinheritance/documentmanager.cfg
@@ -94,12 +94,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 1181354668
datatype[].structtype[].name "annotationsinheritance.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationsinheritance/index-info.cfg b/config-model/src/test/derived/annotationsinheritance/index-info.cfg
index 2c89b66f061..631bebd4468 100644
--- a/config-model/src/test/derived/annotationsinheritance/index-info.cfg
+++ b/config-model/src/test/derived/annotationsinheritance/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg b/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg
index 7ca4a4f0d6e..3ef71148f12 100644
--- a/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsinheritance2/documentmanager.cfg
@@ -57,12 +57,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 1375438150
datatype[].structtype[].name "annotationsinheritance2.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationsinheritance2/index-info.cfg b/config-model/src/test/derived/annotationsinheritance2/index-info.cfg
index 51527ae2ef9..804227cb88f 100644
--- a/config-model/src/test/derived/annotationsinheritance2/index-info.cfg
+++ b/config-model/src/test/derived/annotationsinheritance2/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg b/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg
index ffc3d4f439f..e9ec2cb3715 100644
--- a/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationspolymorphy/documentmanager.cfg
@@ -31,12 +31,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -570750959
datatype[].structtype[].name "annotationspolymorphy.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationspolymorphy/index-info.cfg b/config-model/src/test/derived/annotationspolymorphy/index-info.cfg
index 056cc4dc570..be02d9ab314 100644
--- a/config-model/src/test/derived/annotationspolymorphy/index-info.cfg
+++ b/config-model/src/test/derived/annotationspolymorphy/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationsreference/documentmanager.cfg b/config-model/src/test/derived/annotationsreference/documentmanager.cfg
index c7d2d5c8753..6526f56a906 100644
--- a/config-model/src/test/derived/annotationsreference/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsreference/documentmanager.cfg
@@ -65,12 +65,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 1692909067
datatype[].structtype[].name "annotationsreference.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationsreference/index-info.cfg b/config-model/src/test/derived/annotationsreference/index-info.cfg
index dafe0e292f1..ab99342f4c0 100644
--- a/config-model/src/test/derived/annotationsreference/index-info.cfg
+++ b/config-model/src/test/derived/annotationsreference/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationssimple/documentmanager.cfg b/config-model/src/test/derived/annotationssimple/documentmanager.cfg
index 7d00cc4fc1a..d32f0addceb 100644
--- a/config-model/src/test/derived/annotationssimple/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationssimple/documentmanager.cfg
@@ -19,12 +19,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -682121732
datatype[].structtype[].name "annotationssimple.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationssimple/index-info.cfg b/config-model/src/test/derived/annotationssimple/index-info.cfg
index d6dbcba4217..8177896c431 100644
--- a/config-model/src/test/derived/annotationssimple/index-info.cfg
+++ b/config-model/src/test/derived/annotationssimple/index-info.cfg
@@ -3,7 +3,3 @@ indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sddocname"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/annotationsstruct/documentmanager.cfg b/config-model/src/test/derived/annotationsstruct/documentmanager.cfg
index 8677019593c..c91b5c5e97e 100644
--- a/config-model/src/test/derived/annotationsstruct/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsstruct/documentmanager.cfg
@@ -39,12 +39,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1180029319
datatype[].structtype[].name "annotationsstruct.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg b/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg
index 28fa9f9dbc0..22b951b1b5d 100644
--- a/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg
+++ b/config-model/src/test/derived/annotationsstructarray/documentmanager.cfg
@@ -41,12 +41,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 1616435858
datatype[].structtype[].name "annotationsstructarray.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg b/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
index d2989f44e04..99942d83f91 100644
--- a/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
+++ b/config-model/src/test/derived/array_of_struct_attribute/index-info.cfg
@@ -19,7 +19,3 @@ indexinfo[0].command[8].indexname "elem_array"
indexinfo[0].command[8].command "index"
indexinfo[0].command[9].indexname "elem_array"
indexinfo[0].command[9].command "multivalue"
-indexinfo[0].command[10].indexname "rankfeatures"
-indexinfo[0].command[10].command "index"
-indexinfo[0].command[11].indexname "summaryfeatures"
-indexinfo[0].command[11].command "index" \ No newline at end of file
diff --git a/config-model/src/test/derived/arrays/documentmanager.cfg b/config-model/src/test/derived/arrays/documentmanager.cfg
index 58bdb56a8dc..a2d8e2e78b4 100644
--- a/config-model/src/test/derived/arrays/documentmanager.cfg
+++ b/config-model/src/test/derived/arrays/documentmanager.cfg
@@ -42,12 +42,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "c"
datatype[].structtype[].field[].datatype 1328286588
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1747896808
datatype[].structtype[].name "arrays.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/arrays/index-info.cfg b/config-model/src/test/derived/arrays/index-info.cfg
index b18f1eaf55f..e2a0ea9dedc 100644
--- a/config-model/src/test/derived/arrays/index-info.cfg
+++ b/config-model/src/test/derived/arrays/index-info.cfg
@@ -55,10 +55,6 @@ indexinfo[].command[].indexname "c"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "c"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "default"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "default"
diff --git a/config-model/src/test/derived/attributeprefetch/documentmanager.cfg b/config-model/src/test/derived/attributeprefetch/documentmanager.cfg
index 946003f3aa9..e27c72fbe50 100644
--- a/config-model/src/test/derived/attributeprefetch/documentmanager.cfg
+++ b/config-model/src/test/derived/attributeprefetch/documentmanager.cfg
@@ -109,12 +109,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "wsstring"
datatype[].structtype[].field[].datatype 1328286588
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 932425403
datatype[].structtype[].name "prefetch.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/attributeprefetch/index-info.cfg b/config-model/src/test/derived/attributeprefetch/index-info.cfg
index a54010e24df..a113e1f1b20 100644
--- a/config-model/src/test/derived/attributeprefetch/index-info.cfg
+++ b/config-model/src/test/derived/attributeprefetch/index-info.cfg
@@ -109,7 +109,3 @@ indexinfo[].command[].indexname "wsstring"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "wsstring"
indexinfo[].command[].command "attribute"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/attributes/index-info.cfg b/config-model/src/test/derived/attributes/index-info.cfg
index df33a41b066..4b06e8dec36 100644
--- a/config-model/src/test/derived/attributes/index-info.cfg
+++ b/config-model/src/test/derived/attributes/index-info.cfg
@@ -133,10 +133,6 @@ indexinfo[].command[].indexname "a8_arr"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "a8_arr"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "default"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "default"
diff --git a/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg b/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
index d34b6557e3b..f88524ae220 100644
--- a/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
+++ b/config-model/src/test/derived/combinedattributeandindexsearch/index-info.cfg
@@ -31,10 +31,6 @@ indexinfo[].command[].indexname "attribute2"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attribute2"
indexinfo[].command[].command "attribute"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "default"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "default"
diff --git a/config-model/src/test/derived/complex/documentmanager.cfg b/config-model/src/test/derived/complex/documentmanager.cfg
index cd4ea0b212a..42234e52211 100644
--- a/config-model/src/test/derived/complex/documentmanager.cfg
+++ b/config-model/src/test/derived/complex/documentmanager.cfg
@@ -98,12 +98,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "exact"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1665926686
datatype[].structtype[].name "complex.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/emptydefault/documentmanager.cfg b/config-model/src/test/derived/emptydefault/documentmanager.cfg
index 20695652e07..b6cb2d06718 100644
--- a/config-model/src/test/derived/emptydefault/documentmanager.cfg
+++ b/config-model/src/test/derived/emptydefault/documentmanager.cfg
@@ -25,12 +25,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "two"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 311791038
datatype[].structtype[].name "emptydefault.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/emptydefault/index-info.cfg b/config-model/src/test/derived/emptydefault/index-info.cfg
index fbb4c3ebbea..a506ef52b52 100644
--- a/config-model/src/test/derived/emptydefault/index-info.cfg
+++ b/config-model/src/test/derived/emptydefault/index-info.cfg
@@ -23,7 +23,3 @@ indexinfo[].command[].indexname "two"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "two"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/exactmatch/index-info.cfg b/config-model/src/test/derived/exactmatch/index-info.cfg
index 55b6b2d8fcb..a17ff68642e 100644
--- a/config-model/src/test/derived/exactmatch/index-info.cfg
+++ b/config-model/src/test/derived/exactmatch/index-info.cfg
@@ -15,7 +15,3 @@ indexinfo[].command[].indexname "screweduserids"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "screweduserids"
indexinfo[].command[].command "exact *!!!*"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/fieldset/index-info.cfg b/config-model/src/test/derived/fieldset/index-info.cfg
index 47dcbe58805..25f37750cc9 100644
--- a/config-model/src/test/derived/fieldset/index-info.cfg
+++ b/config-model/src/test/derived/fieldset/index-info.cfg
@@ -19,10 +19,6 @@ indexinfo[].command[].indexname "word2"
indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "word2"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "words"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "words"
diff --git a/config-model/src/test/derived/id/documentmanager.cfg b/config-model/src/test/derived/id/documentmanager.cfg
index dc9ccf94830..5140abc65fa 100644
--- a/config-model/src/test/derived/id/documentmanager.cfg
+++ b/config-model/src/test/derived/id/documentmanager.cfg
@@ -22,12 +22,6 @@ datatype[].structtype[].compressminsize 800
datatype[].structtype[].field[].name "uri"
datatype[].structtype[].field[].datatype 10
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1830022377
datatype[].structtype[].name "id.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/id/index-info.cfg b/config-model/src/test/derived/id/index-info.cfg
index fe066c7d147..ca8daef9848 100644
--- a/config-model/src/test/derived/id/index-info.cfg
+++ b/config-model/src/test/derived/id/index-info.cfg
@@ -27,10 +27,6 @@ indexinfo[].command[].indexname "uri.hostname"
indexinfo[].command[].command "urlhost"
indexinfo[].command[].indexname "uri.hostname"
indexinfo[].command[].command "lowercase"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "uri.fragment"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "uri.host"
diff --git a/config-model/src/test/derived/imported_position_field/index-info.cfg b/config-model/src/test/derived/imported_position_field/index-info.cfg
index d8fffbdfb2d..4d1f76c9b4c 100644
--- a/config-model/src/test/derived/imported_position_field/index-info.cfg
+++ b/config-model/src/test/derived/imported_position_field/index-info.cfg
@@ -9,10 +9,6 @@ indexinfo[0].command[3].indexname "parent_ref"
indexinfo[0].command[3].command "attribute"
indexinfo[0].command[4].indexname "parent_ref"
indexinfo[0].command[4].command "word"
-indexinfo[0].command[5].indexname "rankfeatures"
-indexinfo[0].command[5].command "index"
-indexinfo[0].command[6].indexname "summaryfeatures"
-indexinfo[0].command[6].command "index"
indexinfo[0].command[7].indexname "my_pos_zcurve"
indexinfo[0].command[7].command "index"
indexinfo[0].command[8].indexname "my_pos_zcurve"
@@ -24,4 +20,4 @@ indexinfo[0].command[10].command "numerical"
indexinfo[0].command[11].indexname "my_pos"
indexinfo[0].command[11].command "default-position"
indexinfo[0].command[12].indexname "my_pos"
-indexinfo[0].command[12].command "index" \ No newline at end of file
+indexinfo[0].command[12].command "index"
diff --git a/config-model/src/test/derived/imported_position_field_summary/index-info.cfg b/config-model/src/test/derived/imported_position_field_summary/index-info.cfg
index 4c8dafdf59b..bf7297851dd 100644
--- a/config-model/src/test/derived/imported_position_field_summary/index-info.cfg
+++ b/config-model/src/test/derived/imported_position_field_summary/index-info.cfg
@@ -9,18 +9,6 @@ indexinfo[].command[].indexname "parent_ref"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "parent_ref"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "my_pos.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "my_pos.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "my_pos.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "my_pos.position"
-indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_pos_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_pos_zcurve"
diff --git a/config-model/src/test/derived/imported_struct_fields/index-info.cfg b/config-model/src/test/derived/imported_struct_fields/index-info.cfg
index 8d7f4d4bece..3723b06fcbe 100644
--- a/config-model/src/test/derived/imported_struct_fields/index-info.cfg
+++ b/config-model/src/test/derived/imported_struct_fields/index-info.cfg
@@ -11,10 +11,6 @@ indexinfo[].command[].indexname "parent_ref"
indexinfo[].command[].command "word"
indexinfo[].command[].indexname "documentid"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_elem_array.name"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_elem_array.name"
diff --git a/config-model/src/test/derived/importedfields/index-info.cfg b/config-model/src/test/derived/importedfields/index-info.cfg
index ef11247191f..bc688f48fc4 100644
--- a/config-model/src/test/derived/importedfields/index-info.cfg
+++ b/config-model/src/test/derived/importedfields/index-info.cfg
@@ -21,10 +21,6 @@ indexinfo[].command[].indexname "b_ref_with_summary"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "b_ref_with_summary"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_int_field"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "my_int_field"
diff --git a/config-model/src/test/derived/indexinfo_fieldsets/index-info.cfg b/config-model/src/test/derived/indexinfo_fieldsets/index-info.cfg
index 3e0bad5e172..15d50762134 100644
--- a/config-model/src/test/derived/indexinfo_fieldsets/index-info.cfg
+++ b/config-model/src/test/derived/indexinfo_fieldsets/index-info.cfg
@@ -47,10 +47,6 @@ indexinfo[].command[].indexname "exact2"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "exact2"
indexinfo[].command[].command "exact @@"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "nostemming"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "nostemming"
diff --git a/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg b/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
index d397e37804c..f5815627d2b 100644
--- a/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
+++ b/config-model/src/test/derived/indexinfo_lowercase/index-info.cfg
@@ -49,10 +49,6 @@ indexinfo[].command[].indexname "lc_summary"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "lc_summary"
indexinfo[].command[].command "lowercase"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "lc_set1"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "lc_set1"
diff --git a/config-model/src/test/derived/indexschema/index-info.cfg b/config-model/src/test/derived/indexschema/index-info.cfg
index 3a420e12a24..46c2c3fc307 100644
--- a/config-model/src/test/derived/indexschema/index-info.cfg
+++ b/config-model/src/test/derived/indexschema/index-info.cfg
@@ -219,14 +219,6 @@ indexinfo[].command[].indexname "f10"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "f10"
indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pos_zcurve"
@@ -235,8 +227,6 @@ indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "fast-search"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sd_literal"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sd_literal"
@@ -263,8 +253,6 @@ indexinfo[].command[].indexname "sh.query"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sh.scheme"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "sa"
indexinfo[].command[].command "dynteaser"
indexinfo[].command[].indexname "fs1"
diff --git a/config-model/src/test/derived/indexswitches/documentmanager.cfg b/config-model/src/test/derived/indexswitches/documentmanager.cfg
index 64d429cb2de..78dbdb7ae74 100644
--- a/config-model/src/test/derived/indexswitches/documentmanager.cfg
+++ b/config-model/src/test/derived/indexswitches/documentmanager.cfg
@@ -31,12 +31,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "source"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1892617122
datatype[].structtype[].name "indexswitches.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/indexswitches/index-info.cfg b/config-model/src/test/derived/indexswitches/index-info.cfg
index 3af5f311d8b..1ba38ad554b 100644
--- a/config-model/src/test/derived/indexswitches/index-info.cfg
+++ b/config-model/src/test/derived/indexswitches/index-info.cfg
@@ -25,8 +25,6 @@ indexinfo[].command[].indexname "descr"
indexinfo[].command[].command "plain-tokens"
indexinfo[].command[].indexname "source_src"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "source"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "source"
@@ -35,8 +33,6 @@ indexinfo[].command[].indexname "source"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "source"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "default"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "default"
diff --git a/config-model/src/test/derived/inheritance/documentmanager.cfg b/config-model/src/test/derived/inheritance/documentmanager.cfg
index 47f697c80f7..b15ef13ed3f 100644
--- a/config-model/src/test/derived/inheritance/documentmanager.cfg
+++ b/config-model/src/test/derived/inheritance/documentmanager.cfg
@@ -25,12 +25,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "overridden"
datatype[].structtype[].field[].datatype 0
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 978262812
datatype[].structtype[].name "grandparent.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/inheritance/index-info.cfg b/config-model/src/test/derived/inheritance/index-info.cfg
index e0be7b7d71f..ff3aad45906 100644
--- a/config-model/src/test/derived/inheritance/index-info.cfg
+++ b/config-model/src/test/derived/inheritance/index-info.cfg
@@ -37,7 +37,3 @@ indexinfo[].command[].indexname "onlychild"
indexinfo[].command[].command "normalize"
indexinfo[].command[].indexname "onlychild"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg b/config-model/src/test/derived/inheritance/mother/documentmanager.cfg
index 7ecd9be403c..71da9265521 100644
--- a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg
+++ b/config-model/src/test/derived/inheritance/mother/documentmanager.cfg
@@ -48,10 +48,6 @@ datatype[-1962244686].structtype[single].field[onlygrandparent].datatype 0
datatype[-1962244686].structtype[single].field[onlygrandparent].name "onlygrandparent"
datatype[-1962244686].structtype[single].field[overridden].datatype 0
datatype[-1962244686].structtype[single].field[overridden].name "overridden"
-datatype[-1962244686].structtype[single].field[rankfeatures].datatype 147991900
-datatype[-1962244686].structtype[single].field[rankfeatures].name "rankfeatures"
-datatype[-1962244686].structtype[single].field[summaryfeatures].datatype 147991900
-datatype[-1962244686].structtype[single].field[summaryfeatures].name "summaryfeatures"
datatype[-1989003153].id -1989003153
datatype[-1989003153].structtype[single].name "mother.body"
datatype[-1989003153].structtype[single].version 0
@@ -68,10 +64,6 @@ datatype[-205818510].structtype[single].field[onlymother].datatype 2
datatype[-205818510].structtype[single].field[onlymother].name "onlymother"
datatype[-205818510].structtype[single].field[overridden].datatype 0
datatype[-205818510].structtype[single].field[overridden].name "overridden"
-datatype[-205818510].structtype[single].field[rankfeatures].datatype 147991900
-datatype[-205818510].structtype[single].field[rankfeatures].name "rankfeatures"
-datatype[-205818510].structtype[single].field[summaryfeatures].datatype 147991900
-datatype[-205818510].structtype[single].field[summaryfeatures].name "summaryfeatures"
datatype[-384824039].id -384824039
datatype[-384824039].structtype[single].name "mother_search.header"
datatype[-384824039].structtype[single].version 0
@@ -81,10 +73,6 @@ datatype[-384824039].structtype[single].field[onlymother].datatype 2
datatype[-384824039].structtype[single].field[onlymother].name "onlymother"
datatype[-384824039].structtype[single].field[overridden].datatype 0
datatype[-384824039].structtype[single].field[overridden].name "overridden"
-datatype[-384824039].structtype[single].field[rankfeatures].datatype 147991900
-datatype[-384824039].structtype[single].field[rankfeatures].name "rankfeatures"
-datatype[-384824039].structtype[single].field[summaryfeatures].datatype 147991900
-datatype[-384824039].structtype[single].field[summaryfeatures].name "summaryfeatures"
datatype[-52742073].id -52742073
datatype[-52742073].structtype[single].name "father_search.body"
datatype[-52742073].structtype[single].version 0
@@ -119,10 +107,6 @@ datatype[1530060044].structtype[single].field[onlygrandparent].datatype 0
datatype[1530060044].structtype[single].field[onlygrandparent].name "onlygrandparent"
datatype[1530060044].structtype[single].field[overridden].datatype 0
datatype[1530060044].structtype[single].field[overridden].name "overridden"
-datatype[1530060044].structtype[single].field[rankfeatures].datatype 147991900
-datatype[1530060044].structtype[single].field[rankfeatures].name "rankfeatures"
-datatype[1530060044].structtype[single].field[summaryfeatures].datatype 147991900
-datatype[1530060044].structtype[single].field[summaryfeatures].name "summaryfeatures"
datatype[1845861921].id 1845861921
datatype[1845861921].structtype[single].name "grandparent_search.body"
datatype[1845861921].structtype[single].version 0
diff --git a/config-model/src/test/derived/inheritdiamond/documentmanager.cfg b/config-model/src/test/derived/inheritdiamond/documentmanager.cfg
index 36c1098c119..c3ead0d31f8 100644
--- a/config-model/src/test/derived/inheritdiamond/documentmanager.cfg
+++ b/config-model/src/test/derived/inheritdiamond/documentmanager.cfg
@@ -66,10 +66,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id -52742073
datatype[].structtype[].name "father_search.body"
datatype[].structtype[].version 0
@@ -98,10 +94,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id 1561776723
datatype[].structtype[].name "mother_struct"
datatype[].structtype[].version 0
@@ -148,10 +140,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id -1467672569
datatype[].structtype[].name "child_search.body"
datatype[].structtype[].version 0
@@ -212,10 +200,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id 328953555
datatype[].documenttype[].name "grandparent_search"
datatype[].documenttype[].version 0
diff --git a/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg b/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg
index 8d5bc57ef31..8e2ee3bbc4e 100644
--- a/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg
+++ b/config-model/src/test/derived/inheritfromgrandparent/documentmanager.cfg
@@ -29,12 +29,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 978262812
datatype[].structtype[].name "grandparent.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/inheritfromparent/documentmanager.cfg b/config-model/src/test/derived/inheritfromparent/documentmanager.cfg
index 154b6524c33..7c65a7b72f3 100644
--- a/config-model/src/test/derived/inheritfromparent/documentmanager.cfg
+++ b/config-model/src/test/derived/inheritfromparent/documentmanager.cfg
@@ -35,12 +35,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "weight"
datatype[].structtype[].field[].datatype 1
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -389494616
datatype[].structtype[].name "parent.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/inheritfromparent/documenttypes.cfg b/config-model/src/test/derived/inheritfromparent/documenttypes.cfg
index 70c4bc4297c..a5ea3dbf8ed 100644
--- a/config-model/src/test/derived/inheritfromparent/documenttypes.cfg
+++ b/config-model/src/test/derived/inheritfromparent/documenttypes.cfg
@@ -50,16 +50,6 @@ documenttype[].datatype[].sstruct.field[].id 1001392207
documenttype[].datatype[].sstruct.field[].id_v6 1329620545
documenttype[].datatype[].sstruct.field[].datatype 1
documenttype[].datatype[].sstruct.field[].detailedtype ""
-documenttype[].datatype[].sstruct.field[].name "rankfeatures"
-documenttype[].datatype[].sstruct.field[].id 1883197392
-documenttype[].datatype[].sstruct.field[].id_v6 699950698
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
-documenttype[].datatype[].sstruct.field[].name "summaryfeatures"
-documenttype[].datatype[].sstruct.field[].id 1840337115
-documenttype[].datatype[].sstruct.field[].id_v6 1981648971
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
documenttype[].datatype[].id -389494616
documenttype[].datatype[].type STRUCT
documenttype[].datatype[].array.element.id 0
diff --git a/config-model/src/test/derived/inheritstruct/index-info.cfg b/config-model/src/test/derived/inheritstruct/index-info.cfg
index e0f3fdb5dde..2d5b832a07f 100644
--- a/config-model/src/test/derived/inheritstruct/index-info.cfg
+++ b/config-model/src/test/derived/inheritstruct/index-info.cfg
@@ -19,7 +19,3 @@ indexinfo[].command[].indexname "child_struct_field"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "child_struct_field"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/mail/documentmanager.cfg b/config-model/src/test/derived/mail/documentmanager.cfg
index accc0ba9578..2fa9e5923c9 100644
--- a/config-model/src/test/derived/mail/documentmanager.cfg
+++ b/config-model/src/test/derived/mail/documentmanager.cfg
@@ -37,10 +37,6 @@ datatype[].structtype[].field[].name "subject"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].name "snippet"
datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id -1206550296
datatype[].arraytype[].datatype 12
datatype[].id -953584901
diff --git a/config-model/src/test/derived/mail/index-info.cfg b/config-model/src/test/derived/mail/index-info.cfg
index 0d45f7c120f..39b12b64285 100644
--- a/config-model/src/test/derived/mail/index-info.cfg
+++ b/config-model/src/test/derived/mail/index-info.cfg
@@ -145,11 +145,7 @@ indexinfo[].command[].indexname "attachments"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "attachments"
indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "snippet"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "snippet"
indexinfo[].command[].command "dynteaser"
diff --git a/config-model/src/test/derived/map_attribute/index-info.cfg b/config-model/src/test/derived/map_attribute/index-info.cfg
index 8d9f6a2a36f..1d44648ef6f 100644
--- a/config-model/src/test/derived/map_attribute/index-info.cfg
+++ b/config-model/src/test/derived/map_attribute/index-info.cfg
@@ -31,7 +31,3 @@ indexinfo[0].command[14].indexname "int_map"
indexinfo[0].command[14].command "index"
indexinfo[0].command[15].indexname "int_map"
indexinfo[0].command[15].command "multivalue"
-indexinfo[0].command[16].indexname "rankfeatures"
-indexinfo[0].command[16].command "index"
-indexinfo[0].command[17].indexname "summaryfeatures"
-indexinfo[0].command[17].command "index" \ No newline at end of file
diff --git a/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg b/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
index 55de436716c..659bd86b9f0 100644
--- a/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
+++ b/config-model/src/test/derived/map_of_struct_attribute/index-info.cfg
@@ -47,7 +47,3 @@ indexinfo[0].command[22].indexname "int_elem_map"
indexinfo[0].command[22].command "index"
indexinfo[0].command[23].indexname "int_elem_map"
indexinfo[0].command[23].command "multivalue"
-indexinfo[0].command[24].indexname "rankfeatures"
-indexinfo[0].command[24].command "index"
-indexinfo[0].command[25].indexname "summaryfeatures"
-indexinfo[0].command[25].command "index" \ No newline at end of file
diff --git a/config-model/src/test/derived/music/index-info.cfg b/config-model/src/test/derived/music/index-info.cfg
index 5d4cb8133e5..9e26df5fbea 100644
--- a/config-model/src/test/derived/music/index-info.cfg
+++ b/config-model/src/test/derived/music/index-info.cfg
@@ -209,10 +209,6 @@ indexinfo[].command[].indexname "metalvalue_arr"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "metalvalue_arr"
indexinfo[].command[].command "word"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "bgndata"
indexinfo[].command[].command "dynteaser"
indexinfo[].command[].indexname "ew"
diff --git a/config-model/src/test/derived/neuralnet/neuralnet.sd b/config-model/src/test/derived/neuralnet/neuralnet.sd
new file mode 100644
index 00000000000..f916b35cb75
--- /dev/null
+++ b/config-model/src/test/derived/neuralnet/neuralnet.sd
@@ -0,0 +1,238 @@
+search neuralnet {
+
+ document neuralnet {
+
+ field pinned type int {
+ indexing: attribute
+ }
+
+ field createdAt type long {
+ indexing: attribute
+ }
+
+ field updatedAt type long {
+ indexing: attribute
+ }
+
+ field uvCount type int {
+ indexing: attribute
+ }
+
+ field dvCount type int {
+ indexing: attribute
+ }
+
+ field aVoteCount type int {
+ indexing: attribute
+ }
+
+ field rCount type int {
+ indexing: attribute
+ }
+
+ field uniqueRACount type int {
+ indexing: attribute
+ }
+
+ field rTo type string {
+ indexing: attribute
+ }
+
+ field markedAsAAt type long {
+ indexing: attribute
+ }
+
+ field normalizedTextScore type float {
+ indexing: attribute
+ }
+
+ field t type float {
+ indexing: attribute
+ }
+
+ field relevance type float {
+ indexing: attribute
+ }
+
+ field normalizedCS type float {
+ indexing: attribute
+ }
+
+ field laAt type long {
+ indexing: attribute
+ }
+
+ field hsScore type double {
+ indexing: attribute
+ }
+
+ }
+
+ rank-profile defaultRankProfile inherits default {
+
+ constants {
+ maxSignedSixtyFourBitInteger: 9223372036854775807
+ }
+
+ macro log10_1p(x) {
+ expression: log10(x+1)
+ }
+
+ macro textScoreToUse() {
+ expression: if(isNan(attribute(normalizedTextScore)) == 1, 0, attribute(normalizedTextScore))
+ }
+
+ macro rCountToUse() {
+ expression: if(isNan(attribute(rCount)) == 1, 0, if(attribute(rCount) < 0, 0, attribute(rCount)))
+ }
+
+ macro uniqueRCountToUse() {
+ expression: if(isNan(attribute(uniqueRCount)) == 1, 0, if(attribute(uniqueRACount) < 0, 0, attribute(uniqueRACount)))
+ }
+
+ macro uvCountToUse() {
+ expression: if(isNan(attribute(uvCount)) == 1, 0, if(attribute(uvCount) < 0, 0, attribute(uvCount)))
+ }
+
+ macro dvCountToUse() {
+ expression: if(isNan(attribute(dvCount)) == 1, 0, if(attribute(dvCount) < 0, 0, attribute(dvCount)))
+ }
+
+ macro aVoteCountToUse() {
+ expression: if(isNan(attribute(aVoteCount)) == 1, 0, if(attribute(aVoteCount) < 0, 0, attribute(aVoteCount)))
+ }
+
+ macro totalPR() {
+ expression: uniqueRCountToUse + query(voteToRRatio) * (uvCountToUse - dvCountToUse) - aVoteCountToUse
+ }
+
+ macro totalvote() {
+ expression: query(reportaweight) * aVoteCountToUse + dvCountToUse + query(rweight) * uniqueRCountToUse + uvCountToUse
+ }
+
+ macro phat() {
+ expression: if (totalvote == 0, 0, ( query(rweight) * uniqueRCountToUse + uvCountToUse) / totalvote)
+ }
+
+ macro nCScoreToUse() {
+ expression: if (totalPR > 0, log10(totalPR), 0)
+ }
+
+ macro hsScoreToUse() {
+ expression: attribute(hsScore)
+ }
+
+ macro tScoreToUse() {
+ expression: if (isNan(attribute(t)) == 1, 0.6, attribute(t))
+ }
+
+ macro relevanceScoreToUse() {
+ expression: if (isNan(attribute(relevance)) == 1, 0.254, attribute(relevance))
+ }
+
+ macro freshnessToUse() {
+ expression: if (freshness(createdAt).logscale < 0.01, 0.01, freshness(createdAt).logscale)
+ }
+
+ macro rankedAt() {
+ expression: now
+ }
+
+ macro createdAtToUse() {
+ expression: if(isNan(attribute(createdAt)) == 1, rankedAt, attribute(createdAt))
+ }
+
+ macro laAtToUse() {
+ expression: if(isNan(attribute(laAt)) == 1, attribute(createdAt), attribute(laAt))
+ }
+
+ macro markedAsAAtToUse() {
+ expression: if(isNan(attribute(markedAsAAt)) == 1, maxSignedSixtyFourBitInteger, attribute(markedAsAAt))
+ }
+
+ macro tdToUse() {
+ expression: pow(2, 0 - ((rankedAt - createdAtToUse) / query(decay)))
+ }
+
+ macro commentOverallScore() {
+ expression: query(textweight) * textScoreToUse + query(communityweight) * nCScoreToUse
+ }
+
+ macro pinScore() {
+ expression: if(isNan(attribute(pinned)) == 1, 0, query(pinweight) * attribute(pinned))
+ }
+
+ macro freshnessRank() {
+ expression: nativeRank + freshness(createdAt)
+ }
+
+ first-phase {
+ expression: nativeRank
+ }
+
+ }
+
+ rank-profile neuralNetworkProfile inherits defaultRankProfile {
+ macro nn_input() {
+ expression {
+ concat(log10_1p(aVoteCountToUse),
+ concat(log10_1p(dvCountToUse),
+ concat(log10_1p(uniqueRCountToUse),
+ concat(log10_1p(uvCountToUse),
+ concat(phat,
+ concat(log10_1p(totalvote),
+ concat(hsScoreToUse,
+ concat(tdToUse,
+ tScoreToUse, x), x), x), x), x), x), x), x)
+ }
+ }
+
+ macro get_model_weights(field) {
+ expression: if(query(field) == 0, constant(field), query(field))
+ }
+
+ macro layer_0() {
+ expression: elu(xw_plus_b(nn_input, get_model_weights(W_0), get_model_weights(b_0), x))
+ }
+ macro layer_1() {
+ expression: elu(xw_plus_b(layer_0, get_model_weights(W_1), get_model_weights(b_1), hidden))
+ }
+ macro layer_out() {
+ expression: sum(xw_plus_b(layer_1, get_model_weights(W_out), get_model_weights(b_out), out))
+ }
+ first-phase {
+ expression: freshnessRank
+ }
+ second-phase {
+ expression: layer_out
+ rerank-count: 2000
+ }
+
+ }
+
+ constant W_0 {
+ file: neural-network-201805/W_0.json
+ type: tensor(x[9],hidden[9])
+ }
+ constant b_0 {
+ file: neural-network-201805/b_0.json
+ type: tensor(hidden[9])
+ }
+ constant W_1 {
+ file: neural-network-201805/W_1.json
+ type: tensor(hidden[9],out[9])
+ }
+ constant b_1 {
+ file: neural-network-201805/b_1.json
+ type: tensor(out[9])
+ }
+ constant W_out {
+ file: neural-network-201805/W_out.json
+ type: tensor(out[9])
+ }
+ constant b_out {
+ file: neural-network-201805/b_out.json
+ type: tensor(out[1])
+ }
+
+} \ No newline at end of file
diff --git a/config-model/src/test/derived/neuralnet/query-profiles/default.xml b/config-model/src/test/derived/neuralnet/query-profiles/default.xml
new file mode 100644
index 00000000000..eef1aaa7f53
--- /dev/null
+++ b/config-model/src/test/derived/neuralnet/query-profiles/default.xml
@@ -0,0 +1,2 @@
+<query-profile id="default" type="DefaultQueryProfileType">
+</query-profile>
diff --git a/config-model/src/test/derived/neuralnet/query-profiles/types/DefaultQueryProfileType.xml b/config-model/src/test/derived/neuralnet/query-profiles/types/DefaultQueryProfileType.xml
new file mode 100644
index 00000000000..e1659479135
--- /dev/null
+++ b/config-model/src/test/derived/neuralnet/query-profiles/types/DefaultQueryProfileType.xml
@@ -0,0 +1,8 @@
+<query-profile-type id="DefaultQueryProfileType">
+ <field name="ranking.features.query(W_0)" type="tensor(x[9],hidden[9])" />
+ <field name="ranking.features.query(b_0)" type="tensor(hidden[9])" />
+ <field name="ranking.features.query(W_1)" type="tensor(hidden[9],out[9])" />
+ <field name="ranking.features.query(b_1)" type="tensor(out[9])" />
+ <field name="ranking.features.query(W_out)" type="tensor(out[9])" />
+ <field name="ranking.features.query(b_out)" type="tensor(out[1])" />
+</query-profile-type>
diff --git a/config-model/src/test/derived/neuralnet/rank-profiles.cfg b/config-model/src/test/derived/neuralnet/rank-profiles.cfg
new file mode 100644
index 00000000000..4530bff2e20
--- /dev/null
+++ b/config-model/src/test/derived/neuralnet/rank-profiles.cfg
@@ -0,0 +1,198 @@
+rankprofile[].name "default"
+rankprofile[].fef.property[].name "vespa.type.query.b_out"
+rankprofile[].fef.property[].value "tensor(out[1])"
+rankprofile[].fef.property[].name "vespa.type.query.W_out"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_0"
+rankprofile[].fef.property[].value "tensor(hidden[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_1"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_1"
+rankprofile[].fef.property[].value "tensor(hidden[9],out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_0"
+rankprofile[].fef.property[].value "tensor(hidden[9],x[9])"
+rankprofile[].name "unranked"
+rankprofile[].fef.property[].name "vespa.rank.firstphase"
+rankprofile[].fef.property[].value "value(0)"
+rankprofile[].fef.property[].name "vespa.hitcollector.heapsize"
+rankprofile[].fef.property[].value "0"
+rankprofile[].fef.property[].name "vespa.hitcollector.arraysize"
+rankprofile[].fef.property[].value "0"
+rankprofile[].fef.property[].name "vespa.dump.ignoredefaultfeatures"
+rankprofile[].fef.property[].value "true"
+rankprofile[].fef.property[].name "vespa.type.query.b_out"
+rankprofile[].fef.property[].value "tensor(out[1])"
+rankprofile[].fef.property[].name "vespa.type.query.W_out"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_0"
+rankprofile[].fef.property[].value "tensor(hidden[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_1"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_1"
+rankprofile[].fef.property[].value "tensor(hidden[9],out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_0"
+rankprofile[].fef.property[].value "tensor(hidden[9],x[9])"
+rankprofile[].name "defaultRankProfile"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p).rankingScript"
+rankprofile[].fef.property[].value "log10(x + 1)"
+rankprofile[].fef.property[].name "rankingExpression(textScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(normalizedTextScore)) == 1, 0, attribute(normalizedTextScore))"
+rankprofile[].fef.property[].name "rankingExpression(rCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(rCount)) == 1, 0, if (attribute(rCount) < 0, 0, attribute(rCount)))"
+rankprofile[].fef.property[].name "rankingExpression(uniqueRCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(uniqueRCount)) == 1, 0, if (attribute(uniqueRACount) < 0, 0, attribute(uniqueRACount)))"
+rankprofile[].fef.property[].name "rankingExpression(uvCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(uvCount)) == 1, 0, if (attribute(uvCount) < 0, 0, attribute(uvCount)))"
+rankprofile[].fef.property[].name "rankingExpression(dvCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(dvCount)) == 1, 0, if (attribute(dvCount) < 0, 0, attribute(dvCount)))"
+rankprofile[].fef.property[].name "rankingExpression(aVoteCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(aVoteCount)) == 1, 0, if (attribute(aVoteCount) < 0, 0, attribute(aVoteCount)))"
+rankprofile[].fef.property[].name "rankingExpression(totalPR).rankingScript"
+rankprofile[].fef.property[].value "rankingExpression(uniqueRCountToUse) + query(voteToRRatio) * (rankingExpression(uvCountToUse) - rankingExpression(dvCountToUse)) - rankingExpression(aVoteCountToUse)"
+rankprofile[].fef.property[].name "rankingExpression(totalvote).rankingScript"
+rankprofile[].fef.property[].value "query(reportaweight) * rankingExpression(aVoteCountToUse) + rankingExpression(dvCountToUse) + query(rweight) * rankingExpression(uniqueRCountToUse) + rankingExpression(uvCountToUse)"
+rankprofile[].fef.property[].name "rankingExpression(phat).rankingScript"
+rankprofile[].fef.property[].value "if (rankingExpression(totalvote) == 0, 0, (query(rweight) * rankingExpression(uniqueRCountToUse) + rankingExpression(uvCountToUse)) / rankingExpression(totalvote))"
+rankprofile[].fef.property[].name "rankingExpression(nCScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (rankingExpression(totalPR) > 0, log10(rankingExpression(totalPR)), 0)"
+rankprofile[].fef.property[].name "rankingExpression(hsScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "attribute(hsScore)"
+rankprofile[].fef.property[].name "rankingExpression(tScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(t)) == 1, 0.6, attribute(t))"
+rankprofile[].fef.property[].name "rankingExpression(relevanceScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(relevance)) == 1, 0.254, attribute(relevance))"
+rankprofile[].fef.property[].name "rankingExpression(freshnessToUse).rankingScript"
+rankprofile[].fef.property[].value "if (freshness(createdAt).logscale < 0.01, 0.01, freshness(createdAt).logscale)"
+rankprofile[].fef.property[].name "rankingExpression(rankedAt).rankingScript"
+rankprofile[].fef.property[].value "now"
+rankprofile[].fef.property[].name "rankingExpression(createdAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(createdAt)) == 1, rankingExpression(rankedAt), attribute(createdAt))"
+rankprofile[].fef.property[].name "rankingExpression(laAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(laAt)) == 1, attribute(createdAt), attribute(laAt))"
+rankprofile[].fef.property[].name "rankingExpression(markedAsAAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(markedAsAAt)) == 1, 9.223372036854776E18, attribute(markedAsAAt))"
+rankprofile[].fef.property[].name "rankingExpression(tdToUse).rankingScript"
+rankprofile[].fef.property[].value "pow(2,0 - ((rankingExpression(rankedAt) - rankingExpression(createdAtToUse)) / query(decay)))"
+rankprofile[].fef.property[].name "rankingExpression(commentOverallScore).rankingScript"
+rankprofile[].fef.property[].value "query(textweight) * rankingExpression(textScoreToUse) + query(communityweight) * rankingExpression(nCScoreToUse)"
+rankprofile[].fef.property[].name "rankingExpression(pinScore).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(pinned)) == 1, 0, query(pinweight) * attribute(pinned))"
+rankprofile[].fef.property[].name "rankingExpression(freshnessRank).rankingScript"
+rankprofile[].fef.property[].value "nativeRank + freshness(createdAt)"
+rankprofile[].fef.property[].name "vespa.rank.firstphase"
+rankprofile[].fef.property[].value "nativeRank"
+rankprofile[].fef.property[].name "vespa.type.query.b_out"
+rankprofile[].fef.property[].value "tensor(out[1])"
+rankprofile[].fef.property[].name "vespa.type.query.W_out"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_0"
+rankprofile[].fef.property[].value "tensor(hidden[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_1"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_1"
+rankprofile[].fef.property[].value "tensor(hidden[9],out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_0"
+rankprofile[].fef.property[].value "tensor(hidden[9],x[9])"
+rankprofile[].name "neuralNetworkProfile"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p).rankingScript"
+rankprofile[].fef.property[].value "log10(x + 1)"
+rankprofile[].fef.property[].name "rankingExpression(textScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(normalizedTextScore)) == 1, 0, attribute(normalizedTextScore))"
+rankprofile[].fef.property[].name "rankingExpression(rCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(rCount)) == 1, 0, if (attribute(rCount) < 0, 0, attribute(rCount)))"
+rankprofile[].fef.property[].name "rankingExpression(uniqueRCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(uniqueRCount)) == 1, 0, if (attribute(uniqueRACount) < 0, 0, attribute(uniqueRACount)))"
+rankprofile[].fef.property[].name "rankingExpression(uvCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(uvCount)) == 1, 0, if (attribute(uvCount) < 0, 0, attribute(uvCount)))"
+rankprofile[].fef.property[].name "rankingExpression(dvCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(dvCount)) == 1, 0, if (attribute(dvCount) < 0, 0, attribute(dvCount)))"
+rankprofile[].fef.property[].name "rankingExpression(aVoteCountToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(aVoteCount)) == 1, 0, if (attribute(aVoteCount) < 0, 0, attribute(aVoteCount)))"
+rankprofile[].fef.property[].name "rankingExpression(totalPR).rankingScript"
+rankprofile[].fef.property[].value "rankingExpression(uniqueRCountToUse) + query(voteToRRatio) * (rankingExpression(uvCountToUse) - rankingExpression(dvCountToUse)) - rankingExpression(aVoteCountToUse)"
+rankprofile[].fef.property[].name "rankingExpression(totalvote).rankingScript"
+rankprofile[].fef.property[].value "query(reportaweight) * rankingExpression(aVoteCountToUse) + rankingExpression(dvCountToUse) + query(rweight) * rankingExpression(uniqueRCountToUse) + rankingExpression(uvCountToUse)"
+rankprofile[].fef.property[].name "rankingExpression(phat).rankingScript"
+rankprofile[].fef.property[].value "if (rankingExpression(totalvote) == 0, 0, (query(rweight) * rankingExpression(uniqueRCountToUse) + rankingExpression(uvCountToUse)) / rankingExpression(totalvote))"
+rankprofile[].fef.property[].name "rankingExpression(nCScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (rankingExpression(totalPR) > 0, log10(rankingExpression(totalPR)), 0)"
+rankprofile[].fef.property[].name "rankingExpression(hsScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "attribute(hsScore)"
+rankprofile[].fef.property[].name "rankingExpression(tScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(t)) == 1, 0.6, attribute(t))"
+rankprofile[].fef.property[].name "rankingExpression(relevanceScoreToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(relevance)) == 1, 0.254, attribute(relevance))"
+rankprofile[].fef.property[].name "rankingExpression(freshnessToUse).rankingScript"
+rankprofile[].fef.property[].value "if (freshness(createdAt).logscale < 0.01, 0.01, freshness(createdAt).logscale)"
+rankprofile[].fef.property[].name "rankingExpression(rankedAt).rankingScript"
+rankprofile[].fef.property[].value "now"
+rankprofile[].fef.property[].name "rankingExpression(createdAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(createdAt)) == 1, rankingExpression(rankedAt), attribute(createdAt))"
+rankprofile[].fef.property[].name "rankingExpression(laAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(laAt)) == 1, attribute(createdAt), attribute(laAt))"
+rankprofile[].fef.property[].name "rankingExpression(markedAsAAtToUse).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(markedAsAAt)) == 1, 9.223372036854776E18, attribute(markedAsAAt))"
+rankprofile[].fef.property[].name "rankingExpression(tdToUse).rankingScript"
+rankprofile[].fef.property[].value "pow(2,0 - ((rankingExpression(rankedAt) - rankingExpression(createdAtToUse)) / query(decay)))"
+rankprofile[].fef.property[].name "rankingExpression(commentOverallScore).rankingScript"
+rankprofile[].fef.property[].value "query(textweight) * rankingExpression(textScoreToUse) + query(communityweight) * rankingExpression(nCScoreToUse)"
+rankprofile[].fef.property[].name "rankingExpression(pinScore).rankingScript"
+rankprofile[].fef.property[].value "if (isNan(attribute(pinned)) == 1, 0, query(pinweight) * attribute(pinned))"
+rankprofile[].fef.property[].name "rankingExpression(freshnessRank).rankingScript"
+rankprofile[].fef.property[].value "nativeRank + freshness(createdAt)"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p@af9a8c53ba738798).rankingScript"
+rankprofile[].fef.property[].value "log10(rankingExpression(aVoteCountToUse) + 1)"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p@6ad21b437fe95dd9).rankingScript"
+rankprofile[].fef.property[].value "log10(rankingExpression(dvCountToUse) + 1)"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p@c05478688f81fe20).rankingScript"
+rankprofile[].fef.property[].value "log10(rankingExpression(uniqueRCountToUse) + 1)"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p@53f0a2c000e82f4).rankingScript"
+rankprofile[].fef.property[].value "log10(rankingExpression(uvCountToUse) + 1)"
+rankprofile[].fef.property[].name "rankingExpression(log10_1p@d7da61ad34902e89).rankingScript"
+rankprofile[].fef.property[].value "log10(rankingExpression(totalvote) + 1)"
+rankprofile[].fef.property[].name "rankingExpression(nn_input).rankingScript"
+rankprofile[].fef.property[].value "concat(rankingExpression(log10_1p@af9a8c53ba738798), concat(rankingExpression(log10_1p@6ad21b437fe95dd9), concat(rankingExpression(log10_1p@c05478688f81fe20), concat(rankingExpression(log10_1p@53f0a2c000e82f4), concat(rankingExpression(phat), concat(rankingExpression(log10_1p@d7da61ad34902e89), concat(rankingExpression(hsScoreToUse), concat(rankingExpression(tdToUse), rankingExpression(tScoreToUse), x), x), x), x), x), x), x), x)"
+rankprofile[].fef.property[].name "rankingExpression(nn_input).type"
+rankprofile[].fef.property[].value "tensor(x[9])"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights).rankingScript"
+rankprofile[].fef.property[].value "if (query(field) == 0, constant(field), query(field))"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@1f2b4afc2c45fbee).rankingScript"
+rankprofile[].fef.property[].value "if (query(W_0) == 0, constant(W_0), query(W_0))"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@e752cecc7900ff3e).rankingScript"
+rankprofile[].fef.property[].value "if (query(b_0) == 0, constant(b_0), query(b_0))"
+rankprofile[].fef.property[].name "rankingExpression(layer_0).rankingScript"
+rankprofile[].fef.property[].value "elu(join(reduce(join(rankingExpression(nn_input), rankingExpression(get_model_weights@1f2b4afc2c45fbee), f(a,b)(a * b)), sum, x), rankingExpression(get_model_weights@e752cecc7900ff3e), f(a,b)(a + b)))"
+rankprofile[].fef.property[].name "rankingExpression(layer_0).type"
+rankprofile[].fef.property[].value "tensor(hidden[9])"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@eac265fa16b752cf).rankingScript"
+rankprofile[].fef.property[].value "if (query(W_1) == 0, constant(W_1), query(W_1))"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@b953c19adb7d2154).rankingScript"
+rankprofile[].fef.property[].value "if (query(b_1) == 0, constant(b_1), query(b_1))"
+rankprofile[].fef.property[].name "rankingExpression(layer_1).rankingScript"
+rankprofile[].fef.property[].value "elu(join(reduce(join(rankingExpression(layer_0), rankingExpression(get_model_weights@eac265fa16b752cf), f(a,b)(a * b)), sum, hidden), rankingExpression(get_model_weights@b953c19adb7d2154), f(a,b)(a + b)))"
+rankprofile[].fef.property[].name "rankingExpression(layer_1).type"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@418462473aa32b7d).rankingScript"
+rankprofile[].fef.property[].value "if (query(W_out) == 0, constant(W_out), query(W_out))"
+rankprofile[].fef.property[].name "rankingExpression(get_model_weights@23f46853cab72961).rankingScript"
+rankprofile[].fef.property[].value "if (query(b_out) == 0, constant(b_out), query(b_out))"
+rankprofile[].fef.property[].name "rankingExpression(layer_out).rankingScript"
+rankprofile[].fef.property[].value "reduce(join(reduce(join(rankingExpression(layer_1), rankingExpression(get_model_weights@418462473aa32b7d), f(a,b)(a * b)), sum, out), rankingExpression(get_model_weights@23f46853cab72961), f(a,b)(a + b)), sum)"
+rankprofile[].fef.property[].name "vespa.rank.firstphase"
+rankprofile[].fef.property[].value "rankingExpression(freshnessRank)"
+rankprofile[].fef.property[].name "vespa.rank.secondphase"
+rankprofile[].fef.property[].value "rankingExpression(layer_out)"
+rankprofile[].fef.property[].name "vespa.hitcollector.heapsize"
+rankprofile[].fef.property[].value "2000"
+rankprofile[].fef.property[].name "vespa.type.query.b_out"
+rankprofile[].fef.property[].value "tensor(out[1])"
+rankprofile[].fef.property[].name "vespa.type.query.W_out"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_0"
+rankprofile[].fef.property[].value "tensor(hidden[9])"
+rankprofile[].fef.property[].name "vespa.type.query.b_1"
+rankprofile[].fef.property[].value "tensor(out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_1"
+rankprofile[].fef.property[].value "tensor(hidden[9],out[9])"
+rankprofile[].fef.property[].name "vespa.type.query.W_0"
+rankprofile[].fef.property[].value "tensor(hidden[9],x[9])"
diff --git a/config-model/src/test/derived/newrank/index-info.cfg b/config-model/src/test/derived/newrank/index-info.cfg
index 96d34ee2548..6967bf43538 100644
--- a/config-model/src/test/derived/newrank/index-info.cfg
+++ b/config-model/src/test/derived/newrank/index-info.cfg
@@ -171,10 +171,6 @@ indexinfo[].command[].indexname "cbid"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "cbid"
indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
indexinfo[].command[].indexname "bgndata"
indexinfo[].command[].command "dynteaser"
indexinfo[].command[].indexname "ew"
diff --git a/config-model/src/test/derived/position_array/index-info.cfg b/config-model/src/test/derived/position_array/index-info.cfg
index 0230db3fa2b..e4b9ebb5008 100644
--- a/config-model/src/test/derived/position_array/index-info.cfg
+++ b/config-model/src/test/derived/position_array/index-info.cfg
@@ -17,14 +17,6 @@ indexinfo[].command[].indexname "pos"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pos"
indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pos_zcurve"
@@ -33,7 +25,3 @@ indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "attribute"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "fast-search"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/position_attribute/index-info.cfg b/config-model/src/test/derived/position_attribute/index-info.cfg
index db61a7fff4f..75a8ada6193 100644
--- a/config-model/src/test/derived/position_attribute/index-info.cfg
+++ b/config-model/src/test/derived/position_attribute/index-info.cfg
@@ -15,14 +15,6 @@ indexinfo[].command[].indexname "pos"
indexinfo[].command[].command "default-position"
indexinfo[].command[].indexname "pos"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos.position"
-indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pos_zcurve"
@@ -31,7 +23,3 @@ indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "fast-search"
indexinfo[].command[].indexname "pos_zcurve"
indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/position_extra/index-info.cfg b/config-model/src/test/derived/position_extra/index-info.cfg
index f7afda263a1..945508f9518 100644
--- a/config-model/src/test/derived/position_extra/index-info.cfg
+++ b/config-model/src/test/derived/position_extra/index-info.cfg
@@ -9,14 +9,6 @@ indexinfo[].command[].indexname "pos_ext"
indexinfo[].command[].command "default-position"
indexinfo[].command[].indexname "pos_ext"
indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos_ext.distance"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos_ext.distance"
-indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "pos_ext.position"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "pos_ext.position"
-indexinfo[].command[].command "multivalue"
indexinfo[].command[].indexname "pos_ext_zcurve"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pos_ext_zcurve"
@@ -25,7 +17,3 @@ indexinfo[].command[].indexname "pos_ext_zcurve"
indexinfo[].command[].command "fast-search"
indexinfo[].command[].indexname "pos_ext_zcurve"
indexinfo[].command[].command "numerical"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/predicate_attribute/index-info.cfg b/config-model/src/test/derived/predicate_attribute/index-info.cfg
index 7441ca98daa..3d9f57dd84b 100644
--- a/config-model/src/test/derived/predicate_attribute/index-info.cfg
+++ b/config-model/src/test/derived/predicate_attribute/index-info.cfg
@@ -9,7 +9,3 @@ indexinfo[].command[].indexname "some_predicate_field"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "some_predicate_field"
indexinfo[].command[].command "attribute"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg b/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg
index adc1d7b3de6..060510c3578 100644
--- a/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg
+++ b/config-model/src/test/derived/prefixexactattribute/documentmanager.cfg
@@ -34,12 +34,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "indexfield2"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -480519133
datatype[].structtype[].name "prefixexactattribute.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/prefixexactattribute/index-info.cfg b/config-model/src/test/derived/prefixexactattribute/index-info.cfg
index 7a84f0515f8..e0d5786ef13 100644
--- a/config-model/src/test/derived/prefixexactattribute/index-info.cfg
+++ b/config-model/src/test/derived/prefixexactattribute/index-info.cfg
@@ -37,7 +37,3 @@ indexinfo[].command[].indexname "indexfield2"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "indexfield2"
indexinfo[].command[].command "exact @"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/ranktypes/documentmanager.cfg b/config-model/src/test/derived/ranktypes/documentmanager.cfg
index cba480489b4..072a0fff126 100644
--- a/config-model/src/test/derived/ranktypes/documentmanager.cfg
+++ b/config-model/src/test/derived/ranktypes/documentmanager.cfg
@@ -34,12 +34,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "identity_literal"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id 1374506021
datatype[].structtype[].name "ranktypes.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/ranktypes/index-info.cfg b/config-model/src/test/derived/ranktypes/index-info.cfg
index 0b9777ca488..112183ceac5 100644
--- a/config-model/src/test/derived/ranktypes/index-info.cfg
+++ b/config-model/src/test/derived/ranktypes/index-info.cfg
@@ -51,7 +51,3 @@ indexinfo[].command[].indexname "identity_literal"
indexinfo[].command[].command "lowercase"
indexinfo[].command[].indexname "identity_literal"
indexinfo[].command[].command "plain-tokens"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/streamingstruct/documentmanager.cfg b/config-model/src/test/derived/streamingstruct/documentmanager.cfg
index 28690ba807f..2cd35c7bdfa 100644
--- a/config-model/src/test/derived/streamingstruct/documentmanager.cfg
+++ b/config-model/src/test/derived/streamingstruct/documentmanager.cfg
@@ -113,12 +113,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "anothersummaryfield"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "snippet"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].detailedtype ""
diff --git a/config-model/src/test/derived/structanyorder/documentmanager.cfg b/config-model/src/test/derived/structanyorder/documentmanager.cfg
index 21503c3ad76..c18b1cc11b0 100644
--- a/config-model/src/test/derived/structanyorder/documentmanager.cfg
+++ b/config-model/src/test/derived/structanyorder/documentmanager.cfg
@@ -69,12 +69,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "structarrayfield"
datatype[].structtype[].field[].datatype -1244829667
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -1503592268
datatype[].structtype[].name "annotationsimplicitstruct.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/structanyorder/index-info.cfg b/config-model/src/test/derived/structanyorder/index-info.cfg
index 9cd84d8578f..7a8b06bceec 100644
--- a/config-model/src/test/derived/structanyorder/index-info.cfg
+++ b/config-model/src/test/derived/structanyorder/index-info.cfg
@@ -241,7 +241,3 @@ indexinfo[].command[].indexname "structarrayfield"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "structarrayfield"
indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/derived/tensor/documenttypes.cfg b/config-model/src/test/derived/tensor/documenttypes.cfg
index 56818298eb8..3b85e9b3a58 100644
--- a/config-model/src/test/derived/tensor/documenttypes.cfg
+++ b/config-model/src/test/derived/tensor/documenttypes.cfg
@@ -40,16 +40,6 @@ documenttype[].datatype[].sstruct.field[].id 1224191509
documenttype[].datatype[].sstruct.field[].id_v6 1039544782
documenttype[].datatype[].sstruct.field[].datatype 21
documenttype[].datatype[].sstruct.field[].detailedtype "tensor(x[10],y[20])"
-documenttype[].datatype[].sstruct.field[].name "rankfeatures"
-documenttype[].datatype[].sstruct.field[].id 1883197392
-documenttype[].datatype[].sstruct.field[].id_v6 699950698
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
-documenttype[].datatype[].sstruct.field[].name "summaryfeatures"
-documenttype[].datatype[].sstruct.field[].id 1840337115
-documenttype[].datatype[].sstruct.field[].id_v6 1981648971
-documenttype[].datatype[].sstruct.field[].datatype 2
-documenttype[].datatype[].sstruct.field[].detailedtype ""
documenttype[].datatype[].id -1903234535
documenttype[].datatype[].type STRUCT
documenttype[].datatype[].array.element.id 0
diff --git a/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg b/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg
index f74b4e7b458..bb5bb001036 100644
--- a/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg
+++ b/config-model/src/test/derived/twostreamingstructs/documentmanager.cfg
@@ -86,10 +86,6 @@ datatype[].structtype[].field[].name "g"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].name "anothersummaryfield"
datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].name "snippet"
datatype[].structtype[].field[].datatype 2
datatype[].structtype[].field[].name "snippet2"
@@ -143,10 +139,6 @@ datatype[].structtype[].compresstype NONE
datatype[].structtype[].compresslevel 0
datatype[].structtype[].compressthreshold 95
datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
datatype[].id -1417926544
datatype[].structtype[].name "whatever.body"
datatype[].structtype[].version 0
diff --git a/config-model/src/test/derived/types/documentmanager.cfg b/config-model/src/test/derived/types/documentmanager.cfg
index 0644659cae7..a4fcd4f49f6 100644
--- a/config-model/src/test/derived/types/documentmanager.cfg
+++ b/config-model/src/test/derived/types/documentmanager.cfg
@@ -209,12 +209,6 @@ datatype[].structtype[].field[].detailedtype ""
datatype[].structtype[].field[].name "other"
datatype[].structtype[].field[].datatype 4
datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "rankfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "summaryfeatures"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
datatype[].id -372512406
datatype[].maptype[].keytype 0
datatype[].maptype[].valtype 1707615575
diff --git a/config-model/src/test/derived/types/index-info.cfg b/config-model/src/test/derived/types/index-info.cfg
index d6b914e5b1b..6332316e7d0 100644
--- a/config-model/src/test/derived/types/index-info.cfg
+++ b/config-model/src/test/derived/types/index-info.cfg
@@ -439,7 +439,3 @@ indexinfo[].command[].indexname "pst_sta_boldingoff_nomatch_tag_01"
indexinfo[].command[].command "index"
indexinfo[].command[].indexname "pst_sta_boldingoff_nomatch_tag_01"
indexinfo[].command[].command "multivalue"
-indexinfo[].command[].indexname "rankfeatures"
-indexinfo[].command[].command "index"
-indexinfo[].command[].indexname "summaryfeatures"
-indexinfo[].command[].command "index"
diff --git a/config-model/src/test/examples/fieldoftypedocument.cfg b/config-model/src/test/examples/fieldoftypedocument.cfg
index d3dd0d06b66..b7bb444ec93 100644
--- a/config-model/src/test/examples/fieldoftypedocument.cfg
+++ b/config-model/src/test/examples/fieldoftypedocument.cfg
@@ -22,12 +22,6 @@ datatype[1].structtype[0].compressminsize 800
datatype[1].structtype[0].field[0].name "soundtrack"
datatype[1].structtype[0].field[0].datatype 1412693671
datatype[1].structtype[0].field[0].detailedtype ""
-datatype[1].structtype[0].field[1].name "rankfeatures"
-datatype[1].structtype[0].field[1].datatype 2
-datatype[1].structtype[0].field[1].detailedtype ""
-datatype[1].structtype[0].field[2].name "summaryfeatures"
-datatype[1].structtype[0].field[2].datatype 2
-datatype[1].structtype[0].field[2].detailedtype ""
datatype[2].id -820813431
datatype[2].structtype[0].name "book.body"
datatype[2].structtype[0].version 0
@@ -59,12 +53,6 @@ datatype[4].structtype[0].field[1].detailedtype ""
datatype[4].structtype[0].field[2].name "longfield"
datatype[4].structtype[0].field[2].datatype 4
datatype[4].structtype[0].field[2].detailedtype ""
-datatype[4].structtype[0].field[3].name "rankfeatures"
-datatype[4].structtype[0].field[3].datatype 2
-datatype[4].structtype[0].field[3].detailedtype ""
-datatype[4].structtype[0].field[4].name "summaryfeatures"
-datatype[4].structtype[0].field[4].datatype 2
-datatype[4].structtype[0].field[4].detailedtype ""
datatype[5].id 993120973
datatype[5].structtype[0].name "music.body"
datatype[5].structtype[0].version 0
diff --git a/config-model/src/test/examples/structresult.cfg b/config-model/src/test/examples/structresult.cfg
index 075ac54b983..eff48f18914 100755
--- a/config-model/src/test/examples/structresult.cfg
+++ b/config-model/src/test/examples/structresult.cfg
@@ -54,12 +54,6 @@ datatype[4].structtype[0].field[1].detailedtype ""
datatype[4].structtype[0].field[2].name "advanced"
datatype[4].structtype[0].field[2].datatype 93505813
datatype[4].structtype[0].field[2].detailedtype ""
-datatype[4].structtype[0].field[3].name "rankfeatures"
-datatype[4].structtype[0].field[3].datatype 2
-datatype[4].structtype[0].field[3].detailedtype ""
-datatype[4].structtype[0].field[4].name "summaryfeatures"
-datatype[4].structtype[0].field[4].datatype 2
-datatype[4].structtype[0].field[4].detailedtype ""
datatype[5].id 993120973
datatype[5].structtype[0].name "music.body"
datatype[5].structtype[0].version 0
diff --git a/config-model/src/test/integration/vespa/models/example.model b/config-model/src/test/integration/vespa/models/example.model
index 9579be4e44c..e9725d14923 100644
--- a/config-model/src/test/integration/vespa/models/example.model
+++ b/config-model/src/test/integration/vespa/models/example.model
@@ -19,7 +19,7 @@ model example {
}
function foo2() {
- expression: max(sum(input1 * input2, name) * constant1asLarge, x) * constant2
+ expression: max(sum(input1 * input2, name) * constant(constant1asLarge), x) * constant2
}
} \ No newline at end of file
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index af31a09101e..82841b52984 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -16,7 +16,6 @@ import com.yahoo.container.core.ApplicationMetadataConfig;
import com.yahoo.search.config.QrStartConfig;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.config.search.core.ProtonConfig;
-import com.yahoo.vespa.model.AbstractService;
import com.yahoo.vespa.model.HostResource;
import com.yahoo.vespa.model.HostSystem;
import com.yahoo.vespa.model.VespaModel;
@@ -51,9 +50,6 @@ import java.util.stream.Collectors;
import static com.yahoo.config.model.test.TestUtil.joinLines;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.collection.IsIn.isIn;
-import static org.hamcrest.core.Every.everyItem;
-import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@@ -762,42 +758,6 @@ public class ModelProvisioningTest {
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
- @Test
- public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
- String services =
- "<?xml version='1.0' encoding='utf-8' ?>\n" +
- "<services>" +
- " <admin version='4.0'/>" +
- " <container version='1.0' id='routing'>" +
- " <nodes count='10'/>" +
- " </container>" +
- " <container version='1.0' id='node-admin'>" +
- " <nodes count='3'/>" +
- " </container>" +
- "</services>";
-
- int numberOfHosts = 13;
- VespaModelTester tester = new VespaModelTester();
- tester.addHosts(numberOfHosts);
- tester.setApplicationId("hosted-vespa", "routing", "default");
- VespaModel model = tester.createModel(services, true);
- assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
-
- Set<String> routingHosts = getClusterHostnames(model, "routing");
- assertEquals(10, routingHosts.size());
-
- Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
- assertEquals(3, nodeAdminHosts.size());
-
- Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
- .map(AbstractService::getHostName)
- .collect(Collectors.toSet());
- assertEquals(3, slobrokHosts.size());
-
- assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
- assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
- }
-
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java
index 3331bf4cdba..9942b563297 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/FieldOfTypeDocumentTestCase.java
@@ -33,7 +33,7 @@ public class FieldOfTypeDocumentTestCase extends SearchDefinitionTestCase {
DocumentType musicType = manager.getDocumentType("music");
- assertEquals(5, musicType.getFieldCount());
+ assertEquals(3, musicType.getFieldCount());
Field intField = musicType.getField("intfield");
assertEquals(DataType.INT, intField.getDataType());
@@ -41,21 +41,13 @@ public class FieldOfTypeDocumentTestCase extends SearchDefinitionTestCase {
assertEquals(DataType.STRING, stringField.getDataType());
Field longField = musicType.getField("longfield");
assertEquals(DataType.LONG, longField.getDataType());
- Field summaryfeatures = musicType.getField("summaryfeatures");
- assertEquals(DataType.STRING, summaryfeatures.getDataType());
- Field rankfeatures = musicType.getField("rankfeatures");
- assertEquals(DataType.STRING, rankfeatures.getDataType());
DocumentType bookType = manager.getDocumentType("book");
- assertEquals(3, bookType.getFieldCount());
+ assertEquals(1, bookType.getFieldCount());
Field musicField = bookType.getField("soundtrack");
assertSame(musicType, musicField.getDataType());
- summaryfeatures = musicType.getField("summaryfeatures");
- assertEquals(DataType.STRING, summaryfeatures.getDataType());
- rankfeatures = musicType.getField("rankfeatures");
- assertEquals(DataType.STRING, rankfeatures.getDataType());
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionLoopDetectionTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionLoopDetectionTestCase.java
index 0ff8a5cc7ca..9a0dcc7dd07 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionLoopDetectionTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankingExpressionLoopDetectionTestCase.java
@@ -40,7 +40,7 @@ public class RankingExpressionLoopDetectionTestCase {
fail("Excepted exception");
}
catch (IllegalArgumentException e) {
- assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: Invocation loop: foo -> foo",
+ assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: foo is invalid: Invocation loop: foo -> foo",
Exceptions.toMessageString(e));
}
}
@@ -75,7 +75,7 @@ public class RankingExpressionLoopDetectionTestCase {
fail("Excepted exception");
}
catch (IllegalArgumentException e) {
- assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: Invocation loop: arg(5) -> foo -> arg(5)",
+ assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: arg(5) is invalid: foo is invalid: arg(5) is invalid: Invocation loop: arg(5) -> foo -> arg(5)",
Exceptions.toMessageString(e));
}
}
@@ -110,7 +110,7 @@ public class RankingExpressionLoopDetectionTestCase {
fail("Excepted exception");
}
catch (IllegalArgumentException e) {
- assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: Invocation loop: arg(foo) -> foo -> arg(foo)",
+ assertEquals("In search definition 'test', rank profile 'test': The function 'foo' is invalid: arg(foo) is invalid: a1 is invalid: foo is invalid: arg(foo) is invalid: Invocation loop: arg(foo) -> foo -> arg(foo)",
Exceptions.toMessageString(e));
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
index 89e551fa789..a94c8e76684 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
@@ -45,7 +45,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
SDDocumentType document = search.getDocument();
assertEquals("simple", document.getName());
- assertEquals(25, document.getFieldCount());
+ assertEquals(23, document.getFieldCount());
SDField field;
Attribute attribute;
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java
index ef99ec28686..7fbca88cb61 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/AbstractExportingTestCase.java
@@ -3,6 +3,7 @@ package com.yahoo.searchdefinition.derived;
import com.yahoo.document.DocumenttypesConfig;
import com.yahoo.document.config.DocumentmanagerConfig;
+import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.searchdefinition.Search;
import com.yahoo.searchdefinition.SearchBuilder;
import com.yahoo.searchdefinition.SearchDefinitionTestCase;
@@ -29,11 +30,10 @@ public abstract class AbstractExportingTestCase extends SearchDefinitionTestCase
deleteContent(toDir);
SearchBuilder builder = SearchBuilder.createFromDirectory(searchDefRoot + dirName + "/");
- //SearchBuilder builder = SearchBuilder.createFromFile(searchDefDir + name + ".sd");
return derive(dirName, searchDefinitionName, builder);
}
- protected DerivedConfiguration derive(String dirName, String searchDefinitionName, SearchBuilder builder) throws IOException {
+ private DerivedConfiguration derive(String dirName, String searchDefinitionName, SearchBuilder builder) throws IOException {
DerivedConfiguration config = new DerivedConfiguration(builder.getSearch(searchDefinitionName),
builder.getRankProfileRegistry(),
builder.getQueryProfileRegistry(),
@@ -85,14 +85,14 @@ public abstract class AbstractExportingTestCase extends SearchDefinitionTestCase
* Asserts config is correctly derived given a builder.
* This will fail if the builder contains multiple search definitions.
*/
- protected DerivedConfiguration assertCorrectDeriving(SearchBuilder builder, String dirName) throws IOException, ParseException {
+ protected DerivedConfiguration assertCorrectDeriving(SearchBuilder builder, String dirName) throws IOException {
builder.build();
DerivedConfiguration derived = derive(dirName, null, builder);
assertCorrectConfigFiles(dirName);
return derived;
}
- protected DerivedConfiguration assertCorrectDeriving(SearchBuilder builder, Search search, String name) throws IOException, ParseException {
+ protected DerivedConfiguration assertCorrectDeriving(SearchBuilder builder, Search search, String name) throws IOException {
DerivedConfiguration derived = derive(name, builder, search);
assertCorrectConfigFiles(name);
return derived;
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/NeuralNetTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NeuralNetTestCase.java
new file mode 100644
index 00000000000..b299c7fa299
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NeuralNetTestCase.java
@@ -0,0 +1,16 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.searchdefinition.derived;
+
+import com.yahoo.searchdefinition.parser.ParseException;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class NeuralNetTestCase extends AbstractExportingTestCase {
+
+ @Test
+ public void testNeuralNet() throws IOException, ParseException {
+ assertCorrectDeriving("neuralnet");
+ }
+
+}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitSearchFieldsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitSearchFieldsTestCase.java
index b66105009cd..52c36ca240c 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitSearchFieldsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitSearchFieldsTestCase.java
@@ -25,11 +25,9 @@ public class ImplicitSearchFieldsTestCase extends SearchDefinitionTestCase {
SDDocumentType docType = search.getDocument();
assertNotNull(docType);
- assertNotNull(docType.getField("rankfeatures"));
- assertNotNull(docType.getField("summaryfeatures"));
assertNotNull(docType.getField("foo"));
assertNotNull(docType.getField("bar"));
- assertEquals(4, docType.getFieldCount());
+ assertEquals(2, docType.getFieldCount());
}
@Test
@@ -39,12 +37,10 @@ public class ImplicitSearchFieldsTestCase extends SearchDefinitionTestCase {
SDDocumentType docType = search.getDocument();
assertNotNull(docType);
- assertNotNull(docType.getField("rankfeatures"));
- assertNotNull(docType.getField("summaryfeatures"));
assertNotNull(docType.getField("foo"));
assertNotNull(docType.getField("bar"));
assertNotNull(docType.getField("cox"));
- assertEquals(5, docType.getFieldCount());
+ assertEquals(3, docType.getFieldCount());
}
@Test
@@ -54,13 +50,11 @@ public class ImplicitSearchFieldsTestCase extends SearchDefinitionTestCase {
SDDocumentType docType = search.getDocument();
assertNotNull(docType);
- assertNotNull(docType.getField("rankfeatures"));
- assertNotNull(docType.getField("summaryfeatures"));
assertNotNull(docType.getField("foo"));
assertNotNull(docType.getField("bar"));
assertNotNull(docType.getField("baz"));
assertNotNull(docType.getField("cox"));
- assertEquals(6, docType.getFieldCount());
+ assertEquals(4, docType.getFieldCount());
}
@Test
@@ -70,12 +64,10 @@ public class ImplicitSearchFieldsTestCase extends SearchDefinitionTestCase {
SDDocumentType docType = search.getDocument();
assertNotNull(docType);
- assertNotNull(docType.getField("rankfeatures"));
- assertNotNull(docType.getField("summaryfeatures"));
assertNotNull(docType.getField("foo"));
assertNotNull(docType.getField("bar"));
assertNotNull(docType.getField("baz"));
- assertEquals(5, docType.getFieldCount());
+ assertEquals(3, docType.getFieldCount());
}
@Test
@@ -85,11 +77,9 @@ public class ImplicitSearchFieldsTestCase extends SearchDefinitionTestCase {
SDDocumentType docType = search.getDocument();
assertNotNull(docType);
- assertNotNull(docType.getField("rankfeatures"));
- assertNotNull(docType.getField("summaryfeatures"));
assertNotNull(docType.getField("foo"));
assertNotNull(docType.getField("bar"));
- assertEquals(4, docType.getFieldCount());
+ assertEquals(2, docType.getFieldCount());
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitStructTypesTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitStructTypesTestCase.java
index 88c85452cb3..f2d81414b5a 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitStructTypesTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ImplicitStructTypesTestCase.java
@@ -35,8 +35,6 @@ public class ImplicitStructTypesTestCase extends SearchDefinitionTestCase {
assertField(docType, "doc_str_sum", DataType.STRING);
assertField(docType, "doc_uri", DataType.URI);
assertField(docType, "docsum_str", DataType.STRING);
- assertField(docType, "rankfeatures", DataType.STRING);
- assertField(docType, "summaryfeatures", DataType.STRING);
}
@SuppressWarnings({ "UnusedDeclaration" })
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
index 1b917b6f3a3..3b3ce712387 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionTypeResolverTestCase.java
@@ -103,7 +103,9 @@ public class RankingExpressionTypeResolverTestCase {
fail("Expected exception");
}
catch (IllegalArgumentException expected) {
- assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])",
+ assertEquals("In search definition 'test', rank profile 'my_rank_profile': The first-phase expression is invalid: An if expression must produce compatible types in both alternatives, but the 'true' type is tensor(x[],y[]) while the 'false' type is tensor(z[10])" +
+ "\n'true' branch: attribute(a)" +
+ "\n'false' branch: attribute(b)",
Exceptions.toMessageString(expected));
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
index 80440ac8eb4..1b03825eef1 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTensorTestCase.java
@@ -2,9 +2,10 @@
package com.yahoo.searchdefinition.processing;
import com.yahoo.searchdefinition.parser.ParseException;
-import org.junit.Rule;
import org.junit.Test;
-import org.junit.rules.ExpectedException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
/**
* @author geirst
@@ -138,23 +139,29 @@ public class RankingExpressionWithTensorTestCase {
f.assertRankProperty("tensor(x{})", "constant(my_tensor).type", "my_profile");
}
- @Rule
- public ExpectedException exception = ExpectedException.none();
-
@Test
public void requireThatInvalidTensorTypeSpecThrowsException() throws ParseException {
- exception.expect(IllegalArgumentException.class);
- exception.expectMessage("For constant tensor 'my_tensor' in rank profile 'my_profile': Illegal tensor type spec: A tensor type spec must be on the form tensor[<valuetype>]?(dimensionidentifier[{}|[length?]*), but was 'tensor(x)'. Dimension 'x' is on the wrong format. Examples: tensor(x[]), tensor<float>(name{}, x[10])");
- RankProfileSearchFixture f = new RankProfileSearchFixture(
- " rank-profile my_profile {\n" +
- " constants {\n" +
- " my_tensor {\n" +
- " value: { {x:1}:1 }\n" +
- " type: tensor(x)\n" +
- " }\n" +
- " }\n" +
- " }");
- f.compileRankProfile("my_profile");
+ try {
+ RankProfileSearchFixture f = new RankProfileSearchFixture(
+ " rank-profile my_profile {\n" +
+ " constants {\n" +
+ " my_tensor {\n" +
+ " value: { {x:1}:1 }\n" +
+ " type: tensor(x)\n" +
+ " }\n" +
+ " }\n" +
+ " }");
+ f.compileRankProfile("my_profile");
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertStartsWith("For constant tensor 'my_tensor' in rank profile 'my_profile': Illegal tensor type spec",
+ e.getMessage());
+ }
+ }
+
+ private void assertStartsWith(String prefix, String string) {
+ assertEquals(prefix, string.substring(0, Math.min(prefix.length(), string.length())));
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
index d4fcd09e249..1a7eb96483e 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
@@ -23,8 +23,7 @@ public class RankingExpressionsTestCase extends SearchDefinitionTestCase {
public void testFunctions() throws IOException, ParseException {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
Search search = SearchBuilder.createFromDirectory("src/test/examples/rankingexpressionfunction",
- rankProfileRegistry,
- new QueryProfileRegistry()).getSearch();
+ rankProfileRegistry).getSearch();
RankProfile functionsRankProfile = rankProfileRegistry.get(search, "macros");
Map<String, RankProfile.RankingExpressionFunction> functions = functionsRankProfile.getFunctions();
assertEquals(2, functions.get("titlematch$").function().arguments().size());
@@ -62,9 +61,7 @@ public class RankingExpressionsTestCase extends SearchDefinitionTestCase {
@Test(expected = IllegalArgumentException.class)
public void testThatIncludingFileInSubdirFails() throws IOException, ParseException {
RankProfileRegistry registry = new RankProfileRegistry();
- Search search = SearchBuilder.createFromDirectory("src/test/examples/rankingexpressioninfile",
- registry,
- new QueryProfileRegistry()).getSearch();
+ Search search = SearchBuilder.createFromDirectory("src/test/examples/rankingexpressioninfile", registry).getSearch();
new DerivedConfiguration(search, registry, new QueryProfileRegistry(), new ImportedMlModels()); // rank profile parsing happens during deriving
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
index f53ca15635f..b6569357495 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorFieldTestCase.java
@@ -3,48 +3,68 @@ package com.yahoo.searchdefinition.processing;
import com.yahoo.searchdefinition.SearchBuilder;
import com.yahoo.searchdefinition.parser.ParseException;
-import org.junit.Rule;
import org.junit.Test;
-import org.junit.rules.ExpectedException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
/**
* @author geirst
*/
public class TensorFieldTestCase {
- @Rule
- public ExpectedException exception = ExpectedException.none();
-
@Test
public void requireThatTensorFieldCannotBeOfCollectionType() throws ParseException {
- exception.expect(IllegalArgumentException.class);
- exception.expectMessage("For search 'test', field 'f1': A field with collection type of tensor is not supported. Use simple type 'tensor' instead.");
- SearchBuilder.createFromString(getSd("field f1 type array<tensor(x{})> {}"));
+ try {
+ SearchBuilder.createFromString(getSd("field f1 type array<tensor(x{})> {}"));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'f1': A field with collection type of tensor is not supported. Use simple type 'tensor' instead.",
+ e.getMessage());
+ }
}
@Test
public void requireThatTensorFieldCannotBeIndexField() throws ParseException {
- exception.expect(IllegalArgumentException.class);
- exception.expectMessage("For search 'test', field 'f1': A field of type 'tensor' cannot be specified as an 'index' field.");
- SearchBuilder.createFromString(getSd("field f1 type tensor(x{}) { indexing: index }"));
+ try {
+ SearchBuilder.createFromString(getSd("field f1 type tensor(x{}) { indexing: index }"));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'f1': A field of type 'tensor' cannot be specified as an 'index' field.",
+ e.getMessage());
+ }
}
@Test
public void requireThatTensorAttributeCannotBeFastSearch() throws ParseException {
- exception.expect(IllegalArgumentException.class);
- exception.expectMessage("For search 'test', field 'f1': An attribute of type 'tensor' cannot be 'fast-search'.");
- SearchBuilder.createFromString(getSd("field f1 type tensor(x{}) { indexing: attribute \n attribute: fast-search }"));
+ try {
+ SearchBuilder.createFromString(getSd("field f1 type tensor(x{}) { indexing: attribute \n attribute: fast-search }"));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("For search 'test', field 'f1': An attribute of type 'tensor' cannot be 'fast-search'.", e.getMessage());
+ }
}
@Test
public void requireThatIllegalTensorTypeSpecThrowsException() throws ParseException {
- exception.expect(IllegalArgumentException.class);
- exception.expectMessage("Field type: Illegal tensor type spec: A tensor type spec must be on the form tensor[<valuetype>]?(dimensionidentifier[{}|[length?]*), but was 'tensor(invalid)'. Dimension 'invalid' is on the wrong format. Examples: tensor(x[]), tensor<float>(name{}, x[10])");
- SearchBuilder.createFromString(getSd("field f1 type tensor(invalid) { indexing: attribute }"));
+ try {
+ SearchBuilder.createFromString(getSd("field f1 type tensor(invalid) { indexing: attribute }"));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertStartsWith("Field type: Illegal tensor type spec:", e.getMessage());
+ }
}
private static String getSd(String field) {
return "search test {\n document test {\n" + field + "}\n}\n";
}
+ private void assertStartsWith(String prefix, String string) {
+ assertEquals(prefix, string.substring(0, Math.min(prefix.length(), string.length())));
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorTransformTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorTransformTestCase.java
index fe150b51961..15c1d24ce33 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorTransformTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/TensorTransformTestCase.java
@@ -58,8 +58,8 @@ public class TensorTransformTestCase extends SearchDefinitionTestCase {
"max(attribute(tensor_field_1),x)");
assertTransformedExpression("1+reduce(attribute(tensor_field_1),max,x)",
"1 + max(attribute(tensor_field_1),x)");
- assertTransformedExpression("if(attribute(double_field),1+reduce(attribute(tensor_field_1),max,x),attribute(tensor_field_1))",
- "if(attribute(double_field),1 + max(attribute(tensor_field_1),x),attribute(tensor_field_1))");
+ assertTransformedExpression("if(attribute(double_field),1+reduce(attribute(tensor_field_1),max,x),reduce(attribute(tensor_field_1),sum,x))",
+ "if(attribute(double_field),1 + max(attribute(tensor_field_1),x),reduce(attribute(tensor_field_1), sum, x))");
assertTransformedExpression("reduce(max(attribute(tensor_field_1),attribute(tensor_field_2)),max,x)",
"max(max(attribute(tensor_field_1),attribute(tensor_field_2)),x)");
assertTransformedExpression("reduce(if(attribute(double_field),attribute(tensor_field_2),attribute(tensor_field_2)),max,x)",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
index a10a5dcf4cc..d2bf4b601a6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.model.test.VespaModelTester;
import org.junit.Test;
import static com.yahoo.config.model.api.container.ContainerServiceType.METRICS_PROXY_CONTAINER;
+import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CLUSTER_CONFIG_ID;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CONTAINER_CONFIG_ID;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.MY_FLAVOR;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getHostedModel;
@@ -101,7 +102,9 @@ public class MetricsProxyContainerTest {
public void hosted_application_propagates_node_dimensions() {
String services = servicesWithContent();
VespaModel hostedModel = getHostedModel(services);
- NodeDimensionsConfig config = getNodeDimensionsConfig(hostedModel);
+ assertEquals(1, hostedModel.getHosts().size());
+ String configId = CLUSTER_CONFIG_ID + "/" + hostedModel.getHosts().iterator().next().getHostname();
+ NodeDimensionsConfig config = getNodeDimensionsConfig(hostedModel, configId);
assertEquals("content", config.dimensions(NodeDimensionNames.CLUSTER_TYPE));
assertEquals("my-content", config.dimensions(NodeDimensionNames.CLUSTER_ID));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
index 81b06e54585..13589c763e2 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
@@ -32,7 +32,7 @@ class MetricsProxyModelTester {
static final String CLUSTER_CONFIG_ID = "admin/metrics";
// Used for all configs that are produced by the container, not the cluster.
- static final String CONTAINER_CONFIG_ID = CLUSTER_CONFIG_ID + "/metricsproxy.0";
+ static final String CONTAINER_CONFIG_ID = CLUSTER_CONFIG_ID + "/localhost";
static VespaModel getModel(String servicesXml) {
var numberOfHosts = 1;
@@ -87,8 +87,8 @@ class MetricsProxyModelTester {
return new QrStartConfig((QrStartConfig.Builder) model.getConfig(new QrStartConfig.Builder(), CLUSTER_CONFIG_ID));
}
- static NodeDimensionsConfig getNodeDimensionsConfig(VespaModel model) {
- return new NodeDimensionsConfig((NodeDimensionsConfig.Builder) model.getConfig(new NodeDimensionsConfig.Builder(), CONTAINER_CONFIG_ID));
+ static NodeDimensionsConfig getNodeDimensionsConfig(VespaModel model, String configId) {
+ return new NodeDimensionsConfig((NodeDimensionsConfig.Builder) model.getConfig(new NodeDimensionsConfig.Builder(), configId));
}
static VespaServicesConfig getVespaServicesConfig(String servicesXml) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
index 3fbfbf33fb3..6d9eabf326b 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
@@ -61,7 +61,7 @@ public class ConfigValueChangeValidatorTest {
assertEquals(3, changes.size());
assertComponentsEquals(changes, "default/container.0", 0);
assertComponentsEquals(changes, "admin/cluster-controllers/0", 1);
- assertComponentsEquals(changes, "admin/metrics/metricsproxy.0", 2);
+ assertComponentsEquals(changes, "admin/metrics/localhost", 2);
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfileVariantsTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfileVariantsTestCase.java
index 5833bc79ebf..5e559b64bd1 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfileVariantsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfileVariantsTestCase.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.search.test;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.config.QueryProfileXMLReader;
@@ -8,6 +9,7 @@ import com.yahoo.vespa.model.container.search.QueryProfiles;
import org.junit.Test;
import java.io.IOException;
+import java.util.logging.Level;
import static helpers.CompareConfigTestHelper.assertSerializedConfigFileEquals;
import static org.junit.Assert.assertEquals;
@@ -22,28 +24,28 @@ public class QueryProfileVariantsTestCase {
@Test
public void testConfigCreation() throws IOException {
QueryProfileRegistry registry = new QueryProfileXMLReader().read(root + "queryprofilevariants");
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "query-profile-variants-configuration.cfg", profiles.getConfig().toString());
}
@Test
public void testConfigCreation2() throws IOException {
QueryProfileRegistry registry = new QueryProfileXMLReader().read("src/test/java/com/yahoo/vespa/model/container/search/test/queryprofilevariants2");
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "query-profile-variants2-configuration.cfg", profiles.getConfig().toString());
}
@Test
public void testConfigCreationNewsBESimple() throws IOException {
QueryProfileRegistry registry = new QueryProfileXMLReader().read(root + "newsbesimple");
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "newsbe-query-profiles-simple.cfg", profiles.getConfig().toString());
}
@Test
public void testConfigCreationNewsFESimple() throws IOException {
QueryProfileRegistry registry = new QueryProfileXMLReader().read(root + "newsfesimple");
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "newsfe-query-profiles-simple.cfg", profiles.getConfig().toString());
}
@@ -63,7 +65,7 @@ public class QueryProfileVariantsTestCase {
registry.register(a1);
registry.register(profile);
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "variants-of-explicit-compound.cfg", profiles.getConfig().toString());
}
@@ -88,7 +90,7 @@ public class QueryProfileVariantsTestCase {
registry.register(a2);
registry.register(profile);
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "variants-of-explicit-compound-with-reference.cfg", profiles.getConfig().toString());
}
@@ -108,8 +110,15 @@ public class QueryProfileVariantsTestCase {
registry.register(a1);
registry.register(profile);
- QueryProfiles profiles = new QueryProfiles(registry);
+ QueryProfiles profiles = new QueryProfiles(registry, new SilentDeployLogger());
assertSerializedConfigFileEquals(root + "explicit-reference-override.cfg", profiles.getConfig().toString());
}
+ private static class SilentDeployLogger implements DeployLogger {
+
+ @Override
+ public void log(Level level, String message) {}
+
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfilesTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfilesTestCase.java
index 8c725ecc43c..746e771667f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfilesTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/QueryProfilesTestCase.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.model.container.search.test;
import com.yahoo.component.ComponentId;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
@@ -11,12 +12,18 @@ import com.yahoo.search.query.profile.types.FieldDescription;
import com.yahoo.search.query.profile.types.FieldType;
import com.yahoo.search.query.profile.types.QueryProfileType;
import com.yahoo.search.query.profile.types.QueryProfileTypeRegistry;
+import com.yahoo.searchdefinition.SearchBuilder;
+import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.container.search.QueryProfiles;
+import com.yahoo.vespa.model.test.utils.DeployLoggerStub;
import org.junit.Test;
import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import static helpers.CompareConfigTestHelper.assertSerializedConfigFileEquals;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@@ -112,15 +119,63 @@ public class QueryProfilesTestCase {
registry.register(untypedUser);
assertConfig("query-profiles.cfg",registry);
+
+ DeployLoggerStub logger = new DeployLoggerStub();
+ new QueryProfiles(registry, logger);
+ assertTrue(logger.entries.isEmpty());
+ }
+
+ @Test
+ public void testValidation() {
+ QueryProfileRegistry registry = new QueryProfileRegistry();
+ QueryProfileTypeRegistry typeRegistry = registry.getTypeRegistry();
+
+ QueryProfileType userType = new QueryProfileType("user");
+ typeRegistry.register(userType);
+
+ DeployLoggerStub logger = new DeployLoggerStub();
+ new QueryProfiles(registry, logger);
+ assertEquals(1, logger.entries.size());
+ assertEquals("This application define query profile types, but has no query profiles referencing them " +
+ "so they have no effect. " +
+ "See https://docs.vespa.ai/documentation/query-profiles.html",
+ logger.entries.get(0).message);
+ }
+
+ @Test
+ public void testValidationWithTensorFields() {
+ QueryProfileRegistry registry = new QueryProfileRegistry();
+ QueryProfileTypeRegistry typeRegistry = registry.getTypeRegistry();
+
+ QueryProfileType userType = new QueryProfileType("user");
+ userType.addField(new FieldDescription("vector", FieldType.fromString("tensor(x[5])", typeRegistry)));
+ userType.addField(new FieldDescription("matrix", FieldType.fromString("tensor(x[5],y[5])", typeRegistry)));
+ typeRegistry.register(userType);
+
+ DeployLoggerStub logger = new DeployLoggerStub();
+ new QueryProfiles(registry, logger);
+ assertEquals(1, logger.entries.size());
+ assertEquals("This application define query profile types, but has no query profiles referencing them " +
+ "so they have no effect. " +
+ "In particular, the tensors (vector, matrix) will be interpreted as strings, not tensors if sent in requests. " +
+ "See https://docs.vespa.ai/documentation/query-profiles.html",
+ logger.entries.get(0).message);
}
protected void assertConfig(String correctFileName, QueryProfileRegistry check) throws IOException {
assertSerializedConfigFileEquals(root + "/" + correctFileName,
- com.yahoo.text.StringUtilities.implodeMultiline(com.yahoo.config.ConfigInstance.serialize(new QueryProfiles(check).getConfig())));
+ com.yahoo.text.StringUtilities.implodeMultiline(com.yahoo.config.ConfigInstance.serialize(new QueryProfiles(check, new SilentDeployLogger()).getConfig())));
// Also assert that the correct config config can actually be read as a config source
QueryProfileConfigurer configurer = new QueryProfileConfigurer("file:" + root + "empty.cfg");
configurer.shutdown();
}
+ private static class SilentDeployLogger implements DeployLogger {
+
+ @Override
+ public void log(Level level, String message) {}
+
+ }
+
}
diff --git a/config-provisioning/abi-spec.json b/config-provisioning/abi-spec.json
index e88947b3fdb..18f4d317019 100644
--- a/config-provisioning/abi-spec.json
+++ b/config-provisioning/abi-spec.json
@@ -260,7 +260,6 @@
"public com.yahoo.component.Version vespaVersion()",
"public java.util.Optional group()",
"public boolean isExclusive()",
- "public java.util.Set rotations()",
"public com.yahoo.config.provision.ClusterSpec with(java.util.Optional)",
"public com.yahoo.config.provision.ClusterSpec exclusive(boolean)",
"public static com.yahoo.config.provision.ClusterSpec request(com.yahoo.config.provision.ClusterSpec$Type, com.yahoo.config.provision.ClusterSpec$Id, com.yahoo.component.Version, boolean)",
@@ -791,8 +790,7 @@
"public static final enum com.yahoo.config.provision.SystemName main",
"public static final enum com.yahoo.config.provision.SystemName Public",
"public static final enum com.yahoo.config.provision.SystemName PublicCd",
- "public static final enum com.yahoo.config.provision.SystemName dev",
- "public static final enum com.yahoo.config.provision.SystemName vaas"
+ "public static final enum com.yahoo.config.provision.SystemName dev"
]
},
"com.yahoo.config.provision.TenantName": {
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
index c0099878b45..f041823bf04 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
@@ -3,14 +3,9 @@ package com.yahoo.config.provision;
import com.yahoo.component.Version;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Set;
-import java.util.stream.Collectors;
-
/**
* A node's membership in a cluster. This is a value object.
- * The format is "clusterType/clusterId/groupId/index[/exclusive][/retired][/rotationId,...]"
+ * The format is "clusterType/clusterId/groupId/index[/exclusive][/retired]"
*
* @author bratseth
*/
@@ -25,26 +20,23 @@ public class ClusterMembership {
private ClusterMembership(String stringValue, Version vespaVersion) {
String[] components = stringValue.split("/");
- if (components.length < 4 || components.length > 7)
+ if (components.length < 4)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
- "Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/rotationId,...]'");
+ "Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive]'");
boolean exclusive = false;
- Set<RotationName> rotations = Collections.emptySet();
if (components.length > 4) {
for (int i = 4; i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive": exclusive = true; break;
case "retired": retired = true; break;
- default: rotations = rotationsFrom(component); break;
}
}
}
this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]),
- ClusterSpec.Group.from(Integer.valueOf(components[2])), vespaVersion, exclusive,
- rotations);
+ ClusterSpec.Group.from(Integer.valueOf(components[2])), vespaVersion, exclusive);
this.index = Integer.parseInt(components[3]);
this.stringValue = toStringValue();
}
@@ -62,8 +54,7 @@ public class ClusterMembership {
(cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
- ( retired ? "/retired" : "") +
- ( !cluster.rotations().isEmpty() ? "/" + rotationsAsString(cluster.rotations()) : "");
+ ( retired ? "/retired" : "");
}
@@ -121,12 +112,4 @@ public class ClusterMembership {
return new ClusterMembership(cluster, index, true);
}
- private static Set<RotationName> rotationsFrom(String s) {
- return Arrays.stream(s.split(",")).map(RotationName::from).collect(Collectors.toUnmodifiableSet());
- }
-
- private static String rotationsAsString(Set<RotationName> rotations) {
- return rotations.stream().map(RotationName::value).collect(Collectors.joining(","));
- }
-
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
index 35ee538178a..8ed56b98705 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
-import com.google.common.collect.ImmutableSortedSet;
import com.yahoo.component.Version;
import java.util.Objects;
@@ -23,19 +22,13 @@ public final class ClusterSpec {
private final Optional<Group> groupId;
private final Version vespaVersion;
private boolean exclusive;
- private final Set<RotationName> rotations;
- private ClusterSpec(Type type, Id id, Optional<Group> groupId, Version vespaVersion, boolean exclusive,
- Set<RotationName> rotations) {
- if (type != Type.container && !rotations.isEmpty()) {
- throw new IllegalArgumentException("Rotations can only be declared for clusters of type " + Type.container);
- }
+ private ClusterSpec(Type type, Id id, Optional<Group> groupId, Version vespaVersion, boolean exclusive) {
this.type = type;
this.id = id;
this.groupId = groupId;
this.vespaVersion = vespaVersion;
this.exclusive = exclusive;
- this.rotations = ImmutableSortedSet.copyOf(rotations);
}
/** Returns the cluster type */
@@ -57,35 +50,30 @@ public final class ClusterSpec {
*/
public boolean isExclusive() { return exclusive; }
- /** Returns the rotations of which this cluster should be a member */
- public Set<RotationName> rotations() {
- return rotations;
- }
-
public ClusterSpec with(Optional<Group> newGroup) {
- return new ClusterSpec(type, id, newGroup, vespaVersion, exclusive, rotations);
+ return new ClusterSpec(type, id, newGroup, vespaVersion, exclusive);
}
public ClusterSpec exclusive(boolean exclusive) {
- return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, rotations);
+ return new ClusterSpec(type, id, groupId, vespaVersion, exclusive);
}
public static ClusterSpec request(Type type, Id id, Version vespaVersion, boolean exclusive) {
- return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive, Set.of());
+ return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive);
}
// TODO: Remove after June 2019
public static ClusterSpec request(Type type, Id id, Version vespaVersion, boolean exclusive, Set<RotationName> rotations) {
- return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive, rotations);
+ return new ClusterSpec(type, id, Optional.empty(), vespaVersion, exclusive);
}
public static ClusterSpec from(Type type, Id id, Group groupId, Version vespaVersion, boolean exclusive) {
- return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive, Set.of());
+ return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive);
}
// TODO: Remove after June 2019
public static ClusterSpec from(Type type, Id id, Group groupId, Version vespaVersion, boolean exclusive, Set<RotationName> rotations) {
- return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive, rotations);
+ return new ClusterSpec(type, id, Optional.of(groupId), vespaVersion, exclusive);
}
@Override
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java b/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
index ba462b9eb64..0f6a87020da 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
@@ -27,10 +27,7 @@ public enum SystemName {
PublicCd(true, true),
/** Local development system */
- dev(false, false),
-
- /** VaaS */
- vaas(true, true); // TODO: Remove this and use public everywhere
+ dev(false, false);
private final boolean isPublic;
private final boolean isCd;
@@ -51,7 +48,6 @@ public enum SystemName {
case "main": return main;
case "public": return Public;
case "publiccd": return PublicCd;
- case "vaas": return vaas;
default: throw new IllegalArgumentException(String.format("'%s' is not a valid system", value));
}
}
@@ -63,7 +59,6 @@ public enum SystemName {
case main: return "main";
case Public: return "public";
case PublicCd: return "publiccd";
- case vaas: return "vaas";
default : throw new IllegalStateException();
}
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java
index c6aaf05492e..5e664e00b4c 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java
@@ -18,17 +18,13 @@ import java.util.Objects;
public class ZoneId {
// TODO: Replace usages of environment + region with usages of this.
- // TODO: Remove static factory methods not specifying cloud and system
-
private final Environment environment;
private final RegionName region;
- private final CloudName cloud;
private final SystemName system;
private ZoneId(Environment environment, RegionName region, CloudName cloud, SystemName system) {
this.environment = Objects.requireNonNull(environment, "environment must be non-null");
this.region = Objects.requireNonNull(region, "region must be non-null");
- this.cloud = Objects.requireNonNull(cloud, "cloud must be non-null");
this.system = Objects.requireNonNull(system, "system must be non-null");
}
@@ -69,10 +65,6 @@ public class ZoneId {
return new ZoneId(environment, region, cloud, SystemName.defaultSystem());
}
- public static ZoneId from(String environment, String region, String cloud) {
- return new ZoneId(Environment.from(environment), RegionName.from(region), CloudName.from(cloud), SystemName.defaultSystem());
- }
-
public static ZoneId from(String environment, String region, String cloud, String system) {
return new ZoneId(Environment.from(environment), RegionName.from(region), CloudName.from(cloud), SystemName.from(system));
}
@@ -89,10 +81,6 @@ public class ZoneId {
return region;
}
- public CloudName cloud() {
- return cloud;
- }
-
public SystemName system() {
return system;
}
@@ -100,20 +88,14 @@ public class ZoneId {
/** Returns the serialised value of this. Inverse of {@code ZoneId.from(String value)}. */
public String value() {
return environment + "." + region;
- // TODO: Change to the below when there only methods use constructor including cloud and system are used and
- // all serialized values contain cloud and system
- // return cloud + "." + system + "." + environment + "." + region;
}
@Override
public String toString() {
- return "zone " + value() + " in " + cloud;
- // TODO: Use the below (need to fix some use of toString() in tests first)
- //return "zone " + cloud + "." + system + "." + environment + "." + region;
+ return value();
}
@Override
- // TODO: Update to check cloud and system when everyone use methods that specify cloud and system
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
index 9bd0680b691..5eee55a1886 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
@@ -6,7 +6,6 @@ import com.yahoo.component.Vtag;
import org.junit.Test;
import java.util.Collections;
-import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -37,26 +36,25 @@ public class ClusterMembershipTest {
assertTrue(instance.cluster().isExclusive());
}
+ // TODO: Remove after June 2019. This ensures stale rotation data is handled
{
ClusterMembership instance = ClusterMembership.from("container/id1/4/37/rotation1,rotation2", Vtag.currentVersion);
assertFalse(instance.retired());
assertFalse(instance.cluster().isExclusive());
- assertEquals(Set.of(RotationName.from("rotation1"), RotationName.from("rotation2")), instance.cluster().rotations());
}
{
ClusterMembership instance = ClusterMembership.from("container/id1/4/37/exclusive/rotation1,rotation2", Vtag.currentVersion);
assertFalse(instance.retired());
assertTrue(instance.cluster().isExclusive());
- assertEquals(Set.of(RotationName.from("rotation1"), RotationName.from("rotation2")), instance.cluster().rotations());
}
{
ClusterMembership instance = ClusterMembership.from("container/id1/4/37/exclusive/retired/rotation1,rotation2", Vtag.currentVersion);
assertTrue(instance.retired());
assertTrue(instance.cluster().isExclusive());
- assertEquals(Set.of(RotationName.from("rotation1"), RotationName.from("rotation2")), instance.cluster().rotations());
}
+ // end TODO
}
@Test
@@ -101,7 +99,6 @@ public class ClusterMembershipTest {
assertFalse(instance.cluster().group().isPresent());
assertEquals(3, instance.index());
assertEquals("container/id1/3", instance.stringValue());
- assertTrue(instance.cluster().rotations().isEmpty());
}
private void assertContentService(ClusterMembership instance) {
@@ -111,7 +108,6 @@ public class ClusterMembershipTest {
assertEquals(37, instance.index());
assertFalse(instance.retired());
assertEquals("content/id1/37", instance.stringValue());
- assertTrue(instance.cluster().rotations().isEmpty());
}
private void assertContentServiceWithGroup(ClusterMembership instance) {
@@ -121,7 +117,6 @@ public class ClusterMembershipTest {
assertEquals(37, instance.index());
assertFalse(instance.retired());
assertEquals("content/id1/4/37", instance.stringValue());
- assertTrue(instance.cluster().rotations().isEmpty());
}
/** Serializing a spec without a group assigned works, but not deserialization */
@@ -131,7 +126,6 @@ public class ClusterMembershipTest {
assertEquals(37, instance.index());
assertTrue(instance.retired());
assertEquals("content/id1/37/retired", instance.stringValue());
- assertTrue(instance.cluster().rotations().isEmpty());
}
private void assertContentServiceWithGroupAndRetire(ClusterMembership instance) {
@@ -141,7 +135,6 @@ public class ClusterMembershipTest {
assertEquals(37, instance.index());
assertTrue(instance.retired());
assertEquals("content/id1/4/37/retired", instance.stringValue());
- assertTrue(instance.cluster().rotations().isEmpty());
}
}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/SystemNameTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/SystemNameTest.java
index eb066958254..f50005edd52 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/SystemNameTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/SystemNameTest.java
@@ -20,7 +20,7 @@ public class SystemNameTest {
@Test
public void allOf() {
- assertEquals(Set.of(SystemName.cd, SystemName.PublicCd, SystemName.vaas), SystemName.allOf(SystemName::isCd));
- assertEquals(Set.of(SystemName.PublicCd, SystemName.Public, SystemName.vaas), SystemName.allOf(SystemName::isPublic));
+ assertEquals(Set.of(SystemName.cd, SystemName.PublicCd), SystemName.allOf(SystemName::isCd));
+ assertEquals(Set.of(SystemName.PublicCd, SystemName.Public), SystemName.allOf(SystemName::isPublic));
}
} \ No newline at end of file
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java
index 83f8412f66b..27d45ba7d7d 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java
@@ -26,19 +26,16 @@ public class ZoneIdTest {
ZoneId zoneId = ZoneId.from(environment, region);
assertEquals(region, zoneId.region());
assertEquals(environment, zoneId.environment());
- assertEquals(CloudName.defaultName(), zoneId.cloud());
assertEquals(SystemName.defaultSystem(), zoneId.system());
ZoneId zoneIdWithSystem = ZoneId.from(system, environment, region);
assertEquals(region, zoneIdWithSystem.region());
assertEquals(environment, zoneIdWithSystem.environment());
- assertEquals(CloudName.defaultName(), zoneIdWithSystem.cloud());
assertEquals(system, zoneIdWithSystem.system());
ZoneId zoneIdWithCloudAndSystem = ZoneId.from(environment, region, cloud, system);
assertEquals(region, zoneIdWithCloudAndSystem.region());
assertEquals(environment, zoneIdWithCloudAndSystem.environment());
- assertEquals(cloud, zoneIdWithCloudAndSystem.cloud());
assertEquals(system, zoneIdWithCloudAndSystem.system());
}
diff --git a/configdefinitions/src/vespa/athenz-provider-service.def b/configdefinitions/src/vespa/athenz-provider-service.def
index 281db6fb43d..7a06b13d435 100644
--- a/configdefinitions/src/vespa/athenz-provider-service.def
+++ b/configdefinitions/src/vespa/athenz-provider-service.def
@@ -2,22 +2,22 @@
namespace=vespa.hosted.athenz.instanceproviderservice.config
# Athenz domain
-zones{}.domain string
+domain string
# Athenz service name
-zones{}.serviceName string
+serviceName string
# Secret name of private Key
-zones{}.secretName string
+secretName string
# Secret version
-zones{}.secretVersion int
+secretVersion int
# Certificate DNS suffix
-zones{}.certDnsSuffix string
+certDnsSuffix string
# Athenz ZTS server url
-zones{}.ztsUrl string
+ztsUrl string
# Path to Athenz CA JKS trust store
athenzCaTrustStore string
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index ecf300359a7..c8640a6e074 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -29,6 +29,7 @@ import com.yahoo.slime.Slime;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.CompressedApplicationInputStream;
import com.yahoo.vespa.config.server.application.ConfigConvergenceChecker;
import com.yahoo.vespa.config.server.application.FileDistributionStatus;
import com.yahoo.vespa.config.server.application.HttpProxy;
@@ -39,7 +40,6 @@ import com.yahoo.vespa.config.server.configchange.RestartActions;
import com.yahoo.vespa.config.server.deploy.DeployHandlerLogger;
import com.yahoo.vespa.config.server.deploy.Deployment;
import com.yahoo.vespa.config.server.deploy.InfraDeployerProvider;
-import com.yahoo.vespa.config.server.http.CompressedApplicationInputStream;
import com.yahoo.vespa.config.server.http.LogRetriever;
import com.yahoo.vespa.config.server.http.SimpleHttpFetcher;
import com.yahoo.vespa.config.server.http.v2.PrepareResult;
@@ -693,11 +693,6 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
}
- private List<ApplicationId> listApplicationIds(Tenant tenant) {
- TenantApplications applicationRepo = tenant.getApplicationRepo();
- return applicationRepo.activeApplications();
- }
-
private void cleanupTempDirectory(File tempDir) {
logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'");
if (!IOUtils.recursiveDeleteDir(tempDir)) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStream.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStream.java
index 8619435389a..c94f739b958 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStream.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStream.java
@@ -1,9 +1,11 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.http;
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.application;
import com.google.common.io.ByteStreams;
import com.google.common.io.Files;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.config.server.http.BadRequestException;
+import com.yahoo.vespa.config.server.http.InternalServerException;
import com.yahoo.vespa.config.server.http.v2.ApplicationApiHandler;
import org.apache.commons.compress.archivers.ArchiveEntry;
import org.apache.commons.compress.archivers.ArchiveInputStream;
@@ -18,7 +20,6 @@ import java.util.zip.GZIPInputStream;
* A compressed application points to an application package that can be decompressed.
*
* @author Ulf Lilleengen
- * @since 5.1
*/
public class CompressedApplicationInputStream implements AutoCloseable {
@@ -41,7 +42,7 @@ public class CompressedApplicationInputStream implements AutoCloseable {
}
}
- public static CompressedApplicationInputStream createFromCompressedStream(ArchiveInputStream ais) {
+ static CompressedApplicationInputStream createFromCompressedStream(ArchiveInputStream ais) {
return new CompressedApplicationInputStream(ais);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
index 74a1eb6391b..4d0df545c39 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigConvergenceChecker.java
@@ -7,7 +7,6 @@ import com.yahoo.component.AbstractComponent;
import com.yahoo.config.model.api.HostInfo;
import com.yahoo.config.model.api.PortInfo;
import com.yahoo.config.model.api.ServiceInfo;
-import com.yahoo.config.provision.ApplicationId;
import com.yahoo.log.LogLevel;
import com.yahoo.slime.Cursor;
import com.yahoo.vespa.config.server.http.JSONResponse;
@@ -23,13 +22,12 @@ import javax.ws.rs.client.WebTarget;
import java.net.URI;
import java.time.Duration;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.logging.Logger;
import java.util.stream.Collectors;
import static com.yahoo.config.model.api.container.ContainerServiceType.CONTAINER;
@@ -44,18 +42,17 @@ import static com.yahoo.config.model.api.container.ContainerServiceType.QRSERVER
*/
public class ConfigConvergenceChecker extends AbstractComponent {
- private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(ConfigConvergenceChecker.class.getName());
- private static final ApplicationId routingApplicationId = ApplicationId.from("hosted-vespa", "routing", "default");
+ private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName());
private static final String statePath = "/state/v1/";
private static final String configSubPath = "config";
- private final static Set<String> serviceTypesToCheck = new HashSet<>(Arrays.asList(
+ private final static Set<String> serviceTypesToCheck = Set.of(
CONTAINER.serviceName,
QRSERVER.serviceName,
LOGSERVER_CONTAINER.serviceName,
"searchnode",
"storagenode",
"distributor"
- ));
+ );
private final StateApiFactory stateApiFactory;
@@ -75,9 +72,6 @@ public class ConfigConvergenceChecker extends AbstractComponent {
application.getModel().getHosts()
.forEach(host -> host.getServices().stream()
.filter(service -> serviceTypesToCheck.contains(service.getServiceType()))
-
- // TODO: Remove after removing tenant hosts from zone-app
- .filter(service -> ! isHostAdminService(application.getId(), service))
.forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service))));
Map<ServiceInfo, Long> currentGenerations = getServiceGenerations(servicesToCheck, timeoutPerService);
@@ -181,13 +175,6 @@ public class ConfigConvergenceChecker extends AbstractComponent {
return WebResourceFactory.newResource(StateApi.class, target);
}
- private static boolean isHostAdminService(ApplicationId id, ServiceInfo service) {
- return routingApplicationId.equals(id)
- && service.getProperty("clustername")
- .map("node-admin"::equals)
- .orElse(false);
- }
-
private static class ServiceListResponse extends JSONResponse {
// Pre-condition: servicesToCheck has a state port
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
index 7f409e4c8fa..3caaa7693a9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
@@ -8,7 +8,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.vespa.config.server.ApplicationRepository;
-import com.yahoo.vespa.config.server.http.CompressedApplicationInputStream;
+import com.yahoo.vespa.config.server.application.CompressedApplicationInputStream;
import com.yahoo.vespa.config.server.http.SessionHandler;
import com.yahoo.vespa.config.server.http.Utils;
import com.yahoo.vespa.config.server.session.PrepareParams;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
index a5e74d3b85f..4cabf39edcc 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
@@ -6,12 +6,17 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.config.server.TimeoutBudget;
import com.yahoo.vespa.config.server.http.SessionHandler;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer;
import java.time.Clock;
import java.time.Duration;
import java.util.LinkedHashSet;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -29,6 +34,7 @@ public final class PrepareParams {
static final String VERBOSE_PARAM_NAME = "verbose";
static final String VESPA_VERSION_PARAM_NAME = "vespaVersion";
static final String ROTATIONS_PARAM_NAME = "rotations";
+ static final String CONTAINER_ENDPOINTS_PARAM_NAME = "containerEndpoints";
private final ApplicationId applicationId;
private final TimeoutBudget timeoutBudget;
@@ -38,9 +44,11 @@ public final class PrepareParams {
private final boolean isBootstrap;
private final Optional<Version> vespaVersion;
private final Set<Rotation> rotations;
+ private final List<ContainerEndpoint> containerEndpoints;
private PrepareParams(ApplicationId applicationId, TimeoutBudget timeoutBudget, boolean ignoreValidationErrors,
- boolean dryRun, boolean verbose, boolean isBootstrap, Optional<Version> vespaVersion, Set<Rotation> rotations) {
+ boolean dryRun, boolean verbose, boolean isBootstrap, Optional<Version> vespaVersion,
+ Set<Rotation> rotations, List<ContainerEndpoint> containerEndpoints) {
this.timeoutBudget = timeoutBudget;
this.applicationId = applicationId;
this.ignoreValidationErrors = ignoreValidationErrors;
@@ -49,6 +57,10 @@ public final class PrepareParams {
this.isBootstrap = isBootstrap;
this.vespaVersion = vespaVersion;
this.rotations = rotations;
+ this.containerEndpoints = containerEndpoints;
+ if ((rotations != null && !rotations.isEmpty()) && !containerEndpoints.isEmpty()) {
+ throw new IllegalArgumentException("Cannot set both rotations and containerEndpoints");
+ }
}
public static class Builder {
@@ -61,6 +73,7 @@ public final class PrepareParams {
private TimeoutBudget timeoutBudget = new TimeoutBudget(Clock.systemUTC(), Duration.ofSeconds(30));
private Optional<Version> vespaVersion = Optional.empty();
private Set<Rotation> rotations;
+ private List<ContainerEndpoint> containerEndpoints = List.of();
public Builder() { }
@@ -119,22 +132,30 @@ public final class PrepareParams {
return this;
}
+ public Builder containerEndpoints(String serialized) {
+ if (serialized == null) return this;
+ Slime slime = SlimeUtils.jsonToSlime(serialized);
+ containerEndpoints = ContainerEndpointSerializer.endpointListFromSlime(slime);
+ return this;
+ }
+
public PrepareParams build() {
return new PrepareParams(applicationId, timeoutBudget, ignoreValidationErrors, dryRun,
- verbose, isBootstrap, vespaVersion, rotations);
+ verbose, isBootstrap, vespaVersion, rotations, containerEndpoints);
}
}
public static PrepareParams fromHttpRequest(HttpRequest request, TenantName tenant, Duration barrierTimeout) {
return new Builder().ignoreValidationErrors(request.getBooleanProperty(IGNORE_VALIDATION_PARAM_NAME))
- .dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
- .verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
- .timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
- .applicationId(createApplicationId(request, tenant))
- .vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
- .rotations(request.getProperty(ROTATIONS_PARAM_NAME))
- .build();
+ .dryRun(request.getBooleanProperty(DRY_RUN_PARAM_NAME))
+ .verbose(request.getBooleanProperty(VERBOSE_PARAM_NAME))
+ .timeoutBudget(SessionHandler.getTimeoutBudget(request, barrierTimeout))
+ .applicationId(createApplicationId(request, tenant))
+ .vespaVersion(request.getProperty(VESPA_VERSION_PARAM_NAME))
+ .rotations(request.getProperty(ROTATIONS_PARAM_NAME))
+ .containerEndpoints(request.getProperty(CONTAINER_ENDPOINTS_PARAM_NAME))
+ .build();
}
private static ApplicationId createApplicationId(HttpRequest request, TenantName tenant) {
@@ -164,8 +185,15 @@ public final class PrepareParams {
/** Returns the Vespa version the nodes running the prepared system should have, or empty to use the system version */
public Optional<Version> vespaVersion() { return vespaVersion; }
+ /** Returns the global rotations that should be made available for this deployment */
+ // TODO: Remove this once all applications have to switched to containerEndpoints
public Set<Rotation> rotations() { return rotations; }
+ /** Returns the container endpoints that should be made available for this deployment. One per cluster */
+ public List<ContainerEndpoint> containerEndpoints() {
+ return containerEndpoints;
+ }
+
public boolean ignoreValidationErrors() {
return ignoreValidationErrors;
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index 0f24c3026d2..7af61a6efc1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -8,18 +8,19 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.Zone;
import com.yahoo.lang.SettableOptional;
import com.yahoo.log.LogLevel;
import com.yahoo.path.Path;
+import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.application.ApplicationSet;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
@@ -30,6 +31,8 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.modelfactory.PreparedModelsBuilder;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache;
import com.yahoo.vespa.config.server.tenant.Rotations;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
@@ -108,6 +111,10 @@ public class SessionPreparer {
if ( ! params.isDryRun()) {
preparation.writeStateZK();
preparation.writeRotZK();
+ var globalServiceId = context.getApplicationPackage().getDeployment()
+ .map(DeploymentSpec::fromXml)
+ .flatMap(DeploymentSpec::globalServiceId);
+ preparation.writeContainerEndpointsZK(globalServiceId);
preparation.distribute();
}
log.log(LogLevel.DEBUG, () -> "time used " + params.getTimeoutBudget().timesUsed() +
@@ -132,7 +139,8 @@ public class SessionPreparer {
/** The version of Vespa the application to be prepared specifies for its nodes */
final com.yahoo.component.Version vespaVersion;
- final Rotations rotations;
+ final Rotations rotations; // TODO: Remove this once we have migrated fully to container endpoints
+ final ContainerEndpointsCache containerEndpoints;
final Set<Rotation> rotationsSet;
final ModelContext.Properties properties;
@@ -153,6 +161,7 @@ public class SessionPreparer {
this.applicationId = params.getApplicationId();
this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion);
this.rotations = new Rotations(curator, tenantPath);
+ this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator);
this.rotationsSet = getRotations(params.rotations());
this.properties = new ModelContextImpl.Properties(params.getApplicationId(),
configserverConfig.multitenant(),
@@ -225,6 +234,21 @@ public class SessionPreparer {
checkTimeout("write rotations to zookeeper");
}
+ void writeContainerEndpointsZK(Optional<String> globalServiceId) {
+ if (!params.containerEndpoints().isEmpty()) { // Use endpoints from parameter when explicitly given
+ containerEndpoints.write(applicationId, params.containerEndpoints());
+ } else { // Fall back to writing rotations as container endpoints
+ if (!rotationsSet.isEmpty()) {
+ if (globalServiceId.isEmpty()) {
+ log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen");
+ return;
+ }
+ containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet));
+ }
+ }
+ checkTimeout("write container endpoints to zookeeper");
+ }
+
void distribute() {
prepareResult.asList().forEach(modelResult -> modelResult.model
.distributeFiles(modelResult.fileDistributionProvider.getFileDistribution()));
@@ -244,6 +268,13 @@ public class SessionPreparer {
}
+ private static List<ContainerEndpoint> toContainerEndpoints(String globalServceId, Set<Rotation> rotations) {
+ return List.of(new ContainerEndpoint(new ClusterId(globalServceId),
+ rotations.stream()
+ .map(Rotation::getId)
+ .collect(Collectors.toUnmodifiableList())));
+ }
+
private void writeStateToZooKeeper(SessionZooKeeperClient zooKeeperClient,
ApplicationPackage applicationPackage,
ApplicationId applicationId,
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java
index fb1035b5a03..b0fd3a81732 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java
@@ -8,16 +8,17 @@ import java.util.Objects;
/**
* ContainerEndpoint tracks the service names that a Container Cluster should be
- * known as. This is used during request routing both for regular requests and
+ * known as. This is used during request routing both for regular requests and
* for health checks in traffic distribution.
*
* @author ogronnesby
*/
public class ContainerEndpoint {
+
private final ClusterId clusterId;
private final List<String> names;
- ContainerEndpoint(ClusterId clusterId, List<String> names) {
+ public ContainerEndpoint(ClusterId clusterId, List<String> names) {
this.clusterId = Objects.requireNonNull(clusterId);
this.names = List.copyOf(Objects.requireNonNull(names));
}
@@ -46,9 +47,7 @@ public class ContainerEndpoint {
@Override
public String toString() {
- return "ContainerEndpoint{" +
- "clusterId=" + clusterId +
- ", names=" + names +
- '}';
+ return String.format("container endpoint %s -> %s", clusterId, names);
}
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
index 83d65f5b38b..379af7f71ea 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java
@@ -17,9 +17,12 @@ import java.util.List;
* @author ogronnesby
*/
public class ContainerEndpointSerializer {
+
private static final String clusterIdField = "clusterId";
private static final String namesField = "names";
+ private ContainerEndpointSerializer() {}
+
public static ContainerEndpoint endpointFromSlime(Inspector inspector) {
final var clusterId = inspector.field(clusterIdField).asString();
final var namesInspector = inspector.field(namesField);
@@ -73,4 +76,5 @@ public class ContainerEndpointSerializer {
return slime;
}
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
index 06f93f2006f..7e29f9abc1d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java
@@ -24,10 +24,11 @@ import java.util.List;
* @author ogronnesby
*/
public class ContainerEndpointsCache {
+
private final Path cachePath;
private final Curator curator;
- ContainerEndpointsCache(Path tenantPath, Curator curator) {
+ public ContainerEndpointsCache(Path tenantPath, Curator curator) {
this.cachePath = tenantPath.append("containerEndpointsCache/");
this.curator = curator;
}
@@ -37,7 +38,7 @@ public class ContainerEndpointsCache {
return optionalData
.map(SlimeUtils::jsonToSlime)
.map(ContainerEndpointSerializer::endpointListFromSlime)
- .orElse(List.of());
+ .orElseGet(List::of);
}
public void write(ApplicationId applicationId, List<ContainerEndpoint> endpoints) {
@@ -56,4 +57,5 @@ public class ContainerEndpointsCache {
private Path applicationPath(ApplicationId applicationId) {
return cachePath.append(applicationId.serializedForm());
}
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStreamTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStreamTest.java
index 7faf33b2ff2..496da2cf809 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/CompressedApplicationInputStreamTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/CompressedApplicationInputStreamTest.java
@@ -1,8 +1,9 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.http;
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.config.server.application;
import com.google.common.collect.ImmutableList;
import com.google.common.io.ByteStreams;
+import com.yahoo.vespa.config.server.application.CompressedApplicationInputStream;
import org.apache.commons.compress.archivers.ArchiveOutputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
@@ -26,7 +27,6 @@ import static org.junit.Assert.assertTrue;
/**
* @author Ulf Lilleengen
- * @since 5.1
*/
public class CompressedApplicationInputStreamTest {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
index 81dd41ac714..ac4b4ea9005 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionCreateHandlerTest.java
@@ -11,7 +11,7 @@ import com.yahoo.vespa.config.server.MockReloadHandler;
import com.yahoo.vespa.config.server.TestComponentRegistry;
import com.yahoo.vespa.config.server.application.OrchestratorMock;
import com.yahoo.vespa.config.server.application.TenantApplications;
-import com.yahoo.vespa.config.server.http.CompressedApplicationInputStreamTest;
+import com.yahoo.vespa.config.server.application.CompressedApplicationInputStreamTest;
import com.yahoo.vespa.config.server.http.HandlerTest;
import com.yahoo.vespa.config.server.http.HttpErrorResponse;
import com.yahoo.vespa.config.server.http.SessionHandlerTest;
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
index f2fb4aa1c40..6eba85af37e 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
@@ -6,10 +6,14 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
-
+import com.yahoo.vespa.applicationmodel.ClusterId;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
import org.junit.Test;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
import java.time.Duration;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -17,6 +21,7 @@ import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
@@ -26,6 +31,15 @@ import static org.junit.Assert.assertTrue;
*/
public class PrepareParamsTest {
+ private static final String rotation = "rotation-042.vespa.a02.yahoodns.net";
+ private static final String vespaVersion = "6.37.49";
+ private static final String request = "http://foo:19071/application/v2/tenant/foo/application/bar?" +
+ PrepareParams.DRY_RUN_PARAM_NAME + "=true&" +
+ PrepareParams.VERBOSE_PARAM_NAME+ "=true&" +
+ PrepareParams.IGNORE_VALIDATION_PARAM_NAME + "=false&" +
+ PrepareParams.APPLICATION_NAME_PARAM_NAME + "=baz&" +
+ PrepareParams.VESPA_VERSION_PARAM_NAME + "=" + vespaVersion;
+
@Test
public void testCorrectParsing() {
PrepareParams prepareParams = createParams("http://foo:19071/application/v2/", TenantName.defaultName());
@@ -38,15 +52,6 @@ public class PrepareParamsTest {
assertTrue(prepareParams.getTimeoutBudget().hasTimeLeft());
assertThat(prepareParams.rotations().size(), is(0));
}
-
- private static final String rotation = "rotation-042.vespa.a02.yahoodns.net";
- private static final String vespaVersion = "6.37.49";
- private static final String request = "http://foo:19071/application/v2/tenant/foo/application/bar?" +
- PrepareParams.DRY_RUN_PARAM_NAME + "=true&" +
- PrepareParams.VERBOSE_PARAM_NAME+ "=true&" +
- PrepareParams.IGNORE_VALIDATION_PARAM_NAME + "=false&" +
- PrepareParams.APPLICATION_NAME_PARAM_NAME + "=baz&" +
- PrepareParams.VESPA_VERSION_PARAM_NAME + "=" + vespaVersion;
@Test
public void testCorrectParsingWithRotation() {
@@ -77,6 +82,31 @@ public class PrepareParamsTest {
assertThat(rotations, containsInAnyOrder(new Rotation(rotation), new Rotation(rotationTwo)));
}
+ @Test
+ public void testCorrectParsingWithContainerEndpoints() {
+ var endpoints = List.of(new ContainerEndpoint(new ClusterId("qrs1"),
+ List.of("c1.example.com",
+ "c2.example.com")),
+ new ContainerEndpoint(new ClusterId("qrs2"),
+ List.of("c3.example.com",
+ "c4.example.com")));
+ var param = "[\n" +
+ " {\n" +
+ " \"clusterId\": \"qrs1\",\n" +
+ " \"names\": [\"c1.example.com\", \"c2.example.com\"]\n" +
+ " },\n" +
+ " {\n" +
+ " \"clusterId\": \"qrs2\",\n" +
+ " \"names\": [\"c3.example.com\", \"c4.example.com\"]\n" +
+ " }\n" +
+ "]";
+
+ var encoded = URLEncoder.encode(param, StandardCharsets.UTF_8);
+ var prepareParams = createParams(request + "&" + PrepareParams.CONTAINER_ENDPOINTS_PARAM_NAME +
+ "=" + encoded, TenantName.from("foo"));
+ assertEquals(endpoints, prepareParams.containerEndpoints());
+ }
+
// Create PrepareParams from a request (based on uri and tenant name)
private static PrepareParams createParams(String uri, TenantName tenantName) {
return PrepareParams.fromHttpRequest(
@@ -84,4 +114,5 @@ public class PrepareParamsTest {
tenantName,
Duration.ofSeconds(60));
}
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
index 072dcf0e26f..6b2810af66c 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
@@ -1,22 +1,22 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.session;
+import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.component.Version;
import com.yahoo.io.IOUtils;
import com.yahoo.log.LogLevel;
import com.yahoo.path.Path;
import com.yahoo.slime.Slime;
-import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.config.server.MockReloadHandler;
-import com.yahoo.vespa.config.server.SuperModelGenerationCounter;
import com.yahoo.vespa.config.server.TestComponentRegistry;
import com.yahoo.vespa.config.server.TimeoutBudgetTest;
import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
@@ -27,9 +27,10 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.model.TestModelFactory;
import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpoint;
+import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache;
import com.yahoo.vespa.config.server.tenant.Rotations;
import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
-
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import org.junit.Before;
@@ -42,6 +43,7 @@ import java.io.IOException;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -49,8 +51,8 @@ import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.contains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* @author Ulf Lilleengen
@@ -170,6 +172,10 @@ public class SessionPreparerTest {
assertThat(zkc.readApplicationId(), is(origId));
}
+ private List<ContainerEndpoint> readContainerEndpoints(ApplicationId application) {
+ return new ContainerEndpointsCache(tenantPath, curator).read(application);
+ }
+
private Set<Rotation> readRotationsFromZK(ApplicationId applicationId) {
return new Rotations(curator, tenantPath).readRotationsFromZooKeeper(applicationId);
}
@@ -205,6 +211,52 @@ public class SessionPreparerTest {
assertThat(readRotationsFromZK(applicationId), contains(new Rotation(rotations)));
}
+ @Test
+ public void require_that_rotations_are_written_as_container_endpoints() throws Exception {
+ var rotations = "app1.tenant1.global.vespa.example.com,rotation-042.vespa.global.routing";
+ var applicationId = applicationId("test");
+ var params = new PrepareParams.Builder().applicationId(applicationId).rotations(rotations).build();
+ prepare(new File("src/test/resources/deploy/hosted-app"), params);
+
+ var expected = List.of(new ContainerEndpoint(new ClusterId("qrs"),
+ List.of("app1.tenant1.global.vespa.example.com",
+ "rotation-042.vespa.global.routing")));
+ assertEquals(expected, readContainerEndpoints(applicationId));
+ }
+
+ @Test
+ public void require_that_container_endpoints_are_written() throws Exception {
+ var endpoints = "[\n" +
+ " {\n" +
+ " \"clusterId\": \"foo\",\n" +
+ " \"names\": [\n" +
+ " \"foo.app1.tenant1.global.vespa.example.com\",\n" +
+ " \"rotation-042.vespa.global.routing\"\n" +
+ " ]\n" +
+ " },\n" +
+ " {\n" +
+ " \"clusterId\": \"bar\",\n" +
+ " \"names\": [\n" +
+ " \"bar.app1.tenant1.global.vespa.example.com\",\n" +
+ " \"rotation-043.vespa.global.routing\"\n" +
+ " ]\n" +
+ " }\n" +
+ "]";
+ var applicationId = applicationId("test");
+ var params = new PrepareParams.Builder().applicationId(applicationId)
+ .containerEndpoints(endpoints)
+ .build();
+ prepare(new File("src/test/resources/deploy/hosted-app"), params);
+
+ var expected = List.of(new ContainerEndpoint(new ClusterId("foo"),
+ List.of("foo.app1.tenant1.global.vespa.example.com",
+ "rotation-042.vespa.global.routing")),
+ new ContainerEndpoint(new ClusterId("bar"),
+ List.of("bar.app1.tenant1.global.vespa.example.com",
+ "rotation-043.vespa.global.routing")));
+ assertEquals(expected, readContainerEndpoints(applicationId));
+ }
+
private void prepare(File app) throws IOException {
prepare(app, new PrepareParams.Builder().build());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
index b4d52e6d37c..aac0b6d1a16 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java
@@ -8,7 +8,11 @@ import java.util.List;
import static org.junit.Assert.assertEquals;
+/**
+ * @author ogronnesby
+ */
public class ContainerEndpointSerializerTest {
+
@Test
public void readSingleEndpoint() {
final var slime = new Slime();
@@ -42,4 +46,5 @@ public class ContainerEndpointSerializerTest {
assertEquals(endpoints, deserialized);
}
+
}
diff --git a/configserver/src/test/resources/deploy/hosted-app/deployment.xml b/configserver/src/test/resources/deploy/hosted-app/deployment.xml
new file mode 100644
index 00000000000..a92404c161d
--- /dev/null
+++ b/configserver/src/test/resources/deploy/hosted-app/deployment.xml
@@ -0,0 +1,7 @@
+<!-- Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<deployment version='1.0'>
+ <prod global-service-id="qrs">
+ <region active="true">us-north-1</region>
+ <region active="true">us-north-2</region>
+ </prod>
+</deployment>
diff --git a/configserver/src/test/resources/deploy/hosted-app/services.xml b/configserver/src/test/resources/deploy/hosted-app/services.xml
new file mode 100644
index 00000000000..57bee6ce9c9
--- /dev/null
+++ b/configserver/src/test/resources/deploy/hosted-app/services.xml
@@ -0,0 +1,6 @@
+<!-- Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<services version="1.0">
+ <container id="qrs" version="1.0">
+ <nodes count="2"/>
+ </container>
+</services>
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 80038da6750..38ce2aa2cf2 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -6177,6 +6177,7 @@
"methods": [
"public void <init>()",
"public void register(com.yahoo.search.query.profile.types.QueryProfileType)",
+ "public boolean hasApplicationTypes()",
"public void freeze()",
"public static com.yahoo.search.query.profile.types.QueryProfileTypeRegistry emptyFrozen()"
],
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
index 457e587da40..bf0abb1132f 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedSearchInvoker.java
@@ -124,7 +124,7 @@ public class InterleavedSearchInvoker extends SearchInvoker implements ResponseM
}
private void trimResult(Execution execution) {
- if (trimResult) {
+ if (trimResult || result.hits().size() > query.getHits()) {
result.hits().trim(query.getOffset(), query.getHits());
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/Model.java b/container-search/src/main/java/com/yahoo/search/query/Model.java
index d2f59e0710e..5ed58a74627 100644
--- a/container-search/src/main/java/com/yahoo/search/query/Model.java
+++ b/container-search/src/main/java/com/yahoo/search/query/Model.java
@@ -377,7 +377,7 @@ public class Model implements Cloneable {
* from a sources
*/
public void setRestrict(String restrictString) {
- setFromString(restrictString,restrict);
+ setFromString(restrictString, restrict);
}
/**
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/config/QueryProfileXMLReader.java b/container-search/src/main/java/com/yahoo/search/query/profile/config/QueryProfileXMLReader.java
index eb4a0ad6be4..210b4899c58 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/config/QueryProfileXMLReader.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/config/QueryProfileXMLReader.java
@@ -23,7 +23,7 @@ import java.util.Collections;
import java.util.List;
/**
- * A class which imports query profiles and types from XML files
+ * Importer of query profiles and types from XML files
*
* @author bratseth
*/
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileTypeRegistry.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileTypeRegistry.java
index ff8c4845845..b76ae88ede1 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileTypeRegistry.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileTypeRegistry.java
@@ -12,8 +12,11 @@ import com.yahoo.search.query.profile.QueryProfileRegistry;
*/
public class QueryProfileTypeRegistry extends ComponentRegistry<QueryProfileType> {
+ private final int nativeProfileCount;
+
public QueryProfileTypeRegistry() {
Query.addNativeQueryProfileTypesTo(this);
+ nativeProfileCount = allComponents().size();
}
/** Register this type by its id */
@@ -21,6 +24,11 @@ public class QueryProfileTypeRegistry extends ComponentRegistry<QueryProfileType
super.register(type.getId(), type);
}
+ /** Returns true if this has types in addition to the native Vespa types */
+ public boolean hasApplicationTypes() {
+ return allComponents().size() > nativeProfileCount;
+ }
+
@Override
public void freeze() {
if (isFrozen()) return;
diff --git a/container-search/src/main/java/com/yahoo/search/querytransform/DefaultPositionSearcher.java b/container-search/src/main/java/com/yahoo/search/querytransform/DefaultPositionSearcher.java
index e68368f1eba..0a89565ed4d 100644
--- a/container-search/src/main/java/com/yahoo/search/querytransform/DefaultPositionSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/querytransform/DefaultPositionSearcher.java
@@ -13,7 +13,6 @@ import com.yahoo.search.searchchain.Execution;
import com.yahoo.search.searchchain.PhaseNames;
import java.util.List;
-import java.util.Set;
/**
* If default position has not been set, it will be set here.
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
index 03b3d586b73..20469e6449a 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServer.java
@@ -25,13 +25,11 @@ import java.util.Set;
public interface ConfigServer {
interface PreparedApplication {
- // TODO: Remove the two methods below
- void activate();
- List<Log> messages();
PrepareResponse prepareResponse();
}
- PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationCnames, Set<String> rotationNames, byte[] content);
+ PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationNames,
+ List<ContainerEndpoint> containerEndpoints, byte[] content);
void restart(DeploymentId deployment, Optional<Hostname> hostname);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java
new file mode 100644
index 00000000000..2134320bdc1
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java
@@ -0,0 +1,59 @@
+// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.configserver;
+
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * This represents a list of one or more names for a container cluster.
+ *
+ * @author mpolden
+ */
+public class ContainerEndpoint {
+
+ private final String clusterId;
+ private final List<String> names;
+
+ public ContainerEndpoint(String clusterId, List<String> names) {
+ this.clusterId = nonEmpty(clusterId, "message must be non-empty");
+ this.names = List.copyOf(Objects.requireNonNull(names, "names must be non-null"));
+ }
+
+ /** ID of the cluster to which this points */
+ public String clusterId() {
+ return clusterId;
+ }
+
+ /**
+ * All valid DNS names for this endpoint. This can contain both proper DNS names and synthetic identifiers used for
+ * routing, such as a Host header value that is not necessarily a proper DNS name.
+ */
+ public List<String> names() {
+ return names;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ContainerEndpoint that = (ContainerEndpoint) o;
+ return clusterId.equals(that.clusterId) &&
+ names.equals(that.names);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(clusterId, names);
+ }
+
+ @Override
+ public String toString() {
+ return "container endpoint for " + clusterId + " " + names;
+ }
+
+ private static String nonEmpty(String s, String message) {
+ if (s == null || s.isBlank()) throw new IllegalArgumentException(message);
+ return s;
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
index 585a8f84fb2..94e111455ac 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
@@ -15,7 +15,6 @@ import java.util.stream.Stream;
import static com.yahoo.config.provision.SystemName.PublicCd;
import static com.yahoo.config.provision.SystemName.cd;
import static com.yahoo.config.provision.SystemName.main;
-import static com.yahoo.config.provision.SystemName.vaas;
/** Job types that exist in the build system */
public enum JobType {
@@ -88,9 +87,6 @@ public enum JobType {
devCdUsCentral1 ("dev-cd-us-central-1",
Map.of(cd , ZoneId.from("dev" , "cd-us-central-1"))),
- devAwsUsEast1b ("dev-aws-us-east-1b",
- Map.of(vaas, ZoneId.from("dev" , "vaas-aws-us-east-1b"))),
-
devAwsUsEast1c ("dev-aws-us-east-1c",
Map.of(PublicCd, ZoneId.from("dev", "aws-us-east-1c"))),
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/RoutingGenerator.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/RoutingGenerator.java
index 276e19da8f6..f5c82018ac6 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/RoutingGenerator.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/RoutingGenerator.java
@@ -1,9 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.routing;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import java.net.URI;
import java.util.List;
+import java.util.Map;
/**
* @author bratseth
@@ -16,4 +19,8 @@ public interface RoutingGenerator {
* @return List of endpoints for that deploymentId
*/
List<RoutingEndpoint> endpoints(DeploymentId deploymentId);
+
+ /** Returns the endpoints of each cluster in the given deployment — nothing global. */
+ Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId deploymentId);
+
}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/role/RoleTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/role/RoleTest.java
index 4c11da3b697..d141ef6c73e 100644
--- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/role/RoleTest.java
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/role/RoleTest.java
@@ -17,7 +17,7 @@ import static org.junit.Assert.assertTrue;
public class RoleTest {
private static final Enforcer mainEnforcer = new Enforcer(SystemName.main);
- private static final Enforcer vaasEnforcer = new Enforcer(SystemName.vaas);
+ private static final Enforcer publicEnforcer = new Enforcer(SystemName.Public);
@Test
public void operator_membership() {
@@ -40,18 +40,18 @@ public class RoleTest {
assertTrue(mainEnforcer.allows(role, Action.update, URI.create("/application/v4/tenant/t1/application/a1")));
Role publicSystem = Role.athenzTenantAdmin(TenantName.from("t1"));
- assertFalse(vaasEnforcer.allows(publicSystem, Action.read, URI.create("/controller/v1/foo")));
- assertTrue(vaasEnforcer.allows(publicSystem, Action.read, URI.create("/badge/v1/badge")));
- assertTrue(vaasEnforcer.allows(publicSystem, Action.update, URI.create("/application/v4/tenant/t1/application/a1")));
+ assertFalse(publicEnforcer.allows(publicSystem, Action.read, URI.create("/controller/v1/foo")));
+ assertTrue(publicEnforcer.allows(publicSystem, Action.read, URI.create("/badge/v1/badge")));
+ assertTrue(publicEnforcer.allows(publicSystem, Action.update, URI.create("/application/v4/tenant/t1/application/a1")));
}
@Test
public void build_service_membership() {
Role role = Role.tenantPipeline(TenantName.from("t1"), ApplicationName.from("a1"));
- assertFalse(vaasEnforcer.allows(role, Action.create, URI.create("/not/explicitly/defined")));
- assertFalse(vaasEnforcer.allows(role, Action.update, URI.create("/application/v4/tenant/t1/application/a1")));
- assertTrue(vaasEnforcer.allows(role, Action.create, URI.create("/application/v4/tenant/t1/application/a1/jobreport")));
- assertFalse("No global read access", vaasEnforcer.allows(role, Action.read, URI.create("/controller/v1/foo")));
+ assertFalse(publicEnforcer.allows(role, Action.create, URI.create("/not/explicitly/defined")));
+ assertFalse(publicEnforcer.allows(role, Action.update, URI.create("/application/v4/tenant/t1/application/a1")));
+ assertTrue(publicEnforcer.allows(role, Action.create, URI.create("/application/v4/tenant/t1/application/a1/jobreport")));
+ assertFalse("No global read access", publicEnforcer.allows(role, Action.read, URI.create("/controller/v1/foo")));
}
@Test
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index c3d5788ccb3..b9b808d573e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -7,6 +7,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationId;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
@@ -76,6 +77,7 @@ import java.security.Principal;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
+import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
@@ -86,6 +88,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
+import java.util.TreeMap;
import java.util.function.Consumer;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -208,7 +211,7 @@ public class ApplicationController {
return findGlobalEndpoint(deployment).map(endpoint -> {
try {
EndpointStatus status = configServer.getGlobalRotationStatus(deployment, endpoint.upstreamName());
- return Collections.singletonMap(endpoint, status);
+ return Map.of(endpoint, status);
} catch (IOException e) {
throw new UncheckedIOException("Failed to get rotation status of " + deployment, e);
}
@@ -282,7 +285,6 @@ public class ApplicationController {
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
Set<String> rotationNames = new HashSet<>();
- Set<String> cnames;
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(require(applicationId), lock);
@@ -324,9 +326,9 @@ public class ApplicationController {
application = withRotation(application, zone);
Application app = application.get();
// Include global DNS names
- cnames = app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).collect(Collectors.toSet());
+ app.endpointsIn(controller.system()).asList().stream().map(Endpoint::dnsName).forEach(rotationNames::add);
// Include rotation ID to ensure that deployment can respond to health checks with rotation ID as Host header
- app.rotations().stream().map(RotationId::asString).forEach(cnames::add);
+ app.rotations().stream().map(RotationId::asString).forEach(rotationNames::add);
// Update application with information from application package
if ( ! preferOldestVersion
@@ -338,7 +340,7 @@ public class ApplicationController {
// Carry out deployment without holding the application lock.
options = withVersion(platformVersion, options);
- ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames, cnames);
+ ActivateResult result = deploy(applicationId, applicationPackage, zone, options, rotationNames);
lockOrThrow(applicationId, application ->
store(application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant(),
@@ -405,7 +407,7 @@ public class ApplicationController {
artifactRepository.getSystemApplicationPackage(application.id(), zone, version)
);
DeployOptions options = withVersion(version, DeployOptions.none());
- return deploy(application.id(), applicationPackage, zone, options, Set.of(), Set.of());
+ return deploy(application.id(), applicationPackage, zone, options, Set.of());
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
@@ -413,16 +415,15 @@ public class ApplicationController {
/** Deploys the given tester application to the given zone. */
public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, DeployOptions options) {
- return deploy(tester.id(), applicationPackage, zone, options, Collections.emptySet(), Collections.emptySet());
+ return deploy(tester.id(), applicationPackage, zone, options, Set.of());
}
private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage,
ZoneId zone, DeployOptions deployOptions,
- Set<String> rotationNames, Set<String> cnames) {
+ Set<String> rotationNames) {
DeploymentId deploymentId = new DeploymentId(application, zone);
ConfigServer.PreparedApplication preparedApplication =
- configServer.deploy(deploymentId, deployOptions, cnames, rotationNames,
- applicationPackage.zippedContent());
+ configServer.deploy(deploymentId, deployOptions, rotationNames, List.of(), applicationPackage.zippedContent());
// Refresh routing policies on successful deployment. At this point we can safely assume that the config server
// has allocated load balancers for the deployment.
@@ -466,8 +467,8 @@ public class ApplicationController {
logEntry.message = "Ignoring deployment of application '" + application + "' to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
- prepareResponse.log = Collections.singletonList(logEntry);
- prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
+ prepareResponse.log = List.of(logEntry);
+ prepareResponse.configChangeActions = new ConfigChangeActions(List.of(), List.of());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
@@ -518,26 +519,59 @@ public class ApplicationController {
controller.nameServiceForwarder().createCname(RecordName.from(name), RecordData.fqdn(targetName), Priority.normal);
}
- /** Returns the endpoints of the deployment, or an empty list if the request fails */
- public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
+ /** Returns the endpoints of the deployment, or empty if the request fails */
+ public List<URI> getDeploymentEndpoints(DeploymentId deploymentId) {
if ( ! get(deploymentId.applicationId())
.map(application -> application.deployments().containsKey(deploymentId.zoneId()))
.orElse(deploymentId.applicationId().instance().isTester()))
throw new NotExistsException("Deployment", deploymentId.toString());
try {
- return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
- .map(RoutingEndpoint::endpoint)
- .map(URI::create)
- .iterator()));
+ return ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
+ .map(RoutingEndpoint::endpoint)
+ .map(URI::create)
+ .iterator());
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
- return Optional.empty();
+ return Collections.emptyList();
}
}
+ /** Returns the non-empty endpoints per cluster in the given deployment, or empty if endpoints can't be found. */
+ public Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId id) {
+ if ( ! get(id.applicationId())
+ .map(application -> application.deployments().containsKey(id.zoneId()))
+ .orElse(id.applicationId().instance().isTester()))
+ throw new NotExistsException("Deployment", id.toString());
+
+ try {
+ return Optional.of(routingGenerator.clusterEndpoints(id))
+ .filter(endpoints -> ! endpoints.isEmpty())
+ .orElseGet(() -> routingPolicies.get(id).stream()
+ .filter(policy -> policy.endpointIn(controller.system()).scope() == Endpoint.Scope.zone)
+ .collect(Collectors.toUnmodifiableMap(policy -> policy.cluster(),
+ policy -> policy.endpointIn(controller.system()).url())));
+ }
+ catch (RuntimeException e) {
+ log.log(Level.WARNING, "Failed to get endpoint information for " + id + ": "
+ + Exceptions.toMessageString(e));
+ return Collections.emptyMap();
+ }
+ }
+
+ /** Returns all zone-specific cluster endpoints for the given application, in the given zones. */
+ public Map<ZoneId, Map<ClusterSpec.Id, URI>> clusterEndpoints(ApplicationId id, Collection<ZoneId> zones) {
+ Map<ZoneId, Map<ClusterSpec.Id, URI>> deployments = new TreeMap<>(Comparator.comparing(ZoneId::value));
+ for (ZoneId zone : zones) {
+ var endpoints = clusterEndpoints(new DeploymentId(id, zone));
+ if ( ! endpoints.isEmpty())
+ deployments.put(zone, endpoints);
+ }
+ return Collections.unmodifiableMap(deployments);
+ }
+
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
@@ -778,7 +812,7 @@ public class ApplicationController {
if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue;
warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum);
}
- return Collections.unmodifiableMap(warnings);
+ return Map.copyOf(warnings);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
index 6c9b8dd0784..5026ca75a83 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
@@ -152,7 +152,6 @@ public class Endpoint {
if (legacy) return YAHOO_DNS_SUFFIX;
return OATH_DNS_SUFFIX;
case Public:
- case vaas:
return PUBLIC_DNS_SUFFIX;
case PublicCd:
return PUBLIC_CD_DNS_SUFFIX;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 891a696be9c..bf2460284ab 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -11,13 +11,11 @@ import com.yahoo.config.application.api.Notifications.When;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.AthenzService;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.io.IOUtils;
import com.yahoo.log.LogLevel;
-import com.yahoo.slime.Cursor;
-import com.yahoo.slime.Slime;
-import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.ActivateResult;
@@ -28,7 +26,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.LogEntry;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.PrepareResponse;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ServiceConvergence;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
@@ -41,7 +38,6 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
import com.yahoo.yolean.Exceptions;
import java.io.ByteArrayOutputStream;
-import java.io.IOException;
import java.io.PrintStream;
import java.io.UncheckedIOException;
import java.net.URI;
@@ -57,7 +53,6 @@ import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import java.util.stream.Stream;
import static com.yahoo.config.application.api.Notifications.Role.author;
import static com.yahoo.config.application.api.Notifications.When.failing;
@@ -99,10 +94,12 @@ public class InternalStepRunner implements StepRunner {
static final Duration installationTimeout = Duration.ofMinutes(150);
private final Controller controller;
+ private final TestConfigSerializer testConfigSerializer;
private final DeploymentFailureMails mails;
public InternalStepRunner(Controller controller) {
this.controller = controller;
+ this.testConfigSerializer = new TestConfigSerializer(controller.system());
this.mails = new DeploymentFailureMails(controller.zoneRegistry());
}
@@ -320,19 +317,23 @@ public class InternalStepRunner implements StepRunner {
private boolean endpointsAvailable(ApplicationId id, ZoneId zoneId, DualLogger logger) {
logger.log("Attempting to find deployment endpoints ...");
- Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id, Set.of(zoneId));
+ var endpoints = controller.applications().clusterEndpoints(id, Set.of(zoneId));
if ( ! endpoints.containsKey(zoneId)) {
logger.log("Endpoints not yet ready.");
return false;
}
+ logEndpoints(endpoints, logger);
+ return true;
+ }
+
+ private void logEndpoints(Map<ZoneId, Map<ClusterSpec.Id, URI>> endpoints, DualLogger logger) {
List<String> messages = new ArrayList<>();
messages.add("Found endpoints:");
endpoints.forEach((zone, uris) -> {
messages.add("- " + zone);
- uris.forEach(uri -> messages.add(" |-- " + uri));
+ uris.forEach((cluster, uri) -> messages.add(" |-- " + uri + " (" + cluster + ")"));
});
logger.log(messages);
- return true;
}
private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) {
@@ -384,21 +385,15 @@ public class InternalStepRunner implements StepRunner {
return Optional.of(aborted);
}
- Set<ZoneId> zones = testedZoneAndProductionZones(id);
+ Set<ZoneId> zones = controller.jobController().testedZoneAndProductionZones(id.application(), id.type());
logger.log("Attempting to find endpoints ...");
- Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones);
+ var endpoints = controller.applications().clusterEndpoints(id.application(), zones);
if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) {
logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!");
return Optional.of(error);
}
- List<String> messages = new ArrayList<>();
- messages.add("Found endpoints:");
- endpoints.forEach((zone, uris) -> {
- messages.add("- " + zone);
- uris.forEach(uri -> messages.add(" |-- " + uri));
- });
- logger.log(messages);
+ logEndpoints(endpoints, logger);
Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id);
if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) {
@@ -410,9 +405,10 @@ public class InternalStepRunner implements StepRunner {
logger.log("Starting tests ...");
controller.jobController().cloud().startTests(testerEndpoint.get(),
TesterCloud.Suite.of(id.type()),
- testConfig(id.application(), id.type().zone(controller.system()),
- controller.system(), endpoints,
- listClusters(id.application(), zones)));
+ testConfigSerializer.configJson(id.application(),
+ id.type(),
+ endpoints,
+ listClusters(id.application(), zones)));
return Optional.of(running);
}
@@ -612,28 +608,6 @@ public class InternalStepRunner implements StepRunner {
throw new IllegalStateException("No step deploys to the zone this run is for!");
}
- /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */
- private Set<ZoneId> testedZoneAndProductionZones(RunId id) {
- return Stream.concat(Stream.of(id.type().zone(controller.system())),
- application(id.application()).productionDeployments().keySet().stream())
- .collect(Collectors.toSet());
- }
-
- /** Returns all endpoints for all current deployments of the given real application. */
- private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) {
- ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder();
- for (ZoneId zone : zones) {
- controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone))
- .filter(endpoints -> ! endpoints.isEmpty())
- .or(() -> Optional.of(controller.applications().routingPolicies().get(id, zone).stream()
- .map(policy -> policy.endpointIn(controller.system()).url())
- .collect(Collectors.toUnmodifiableList()))
- .filter(endpoints -> ! endpoints.isEmpty()))
- .ifPresent(endpoints -> deployments.put(zone, endpoints));
- }
- return deployments.build();
- }
-
/** Returns all content clusters in all current deployments of the given real application. */
private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) {
ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder();
@@ -712,38 +686,6 @@ public class InternalStepRunner implements StepRunner {
return deploymentSpec.getBytes(StandardCharsets.UTF_8);
}
- /** Returns the config for the tests to run for the given job. */
- private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system,
- Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) {
- Slime slime = new Slime();
- Cursor root = slime.setObject();
-
- root.setString("application", id.serializedForm());
- root.setString("zone", testerZone.value());
- root.setString("system", system.value());
-
- Cursor endpointsObject = root.setObject("endpoints");
- deployments.forEach((zone, endpoints) -> {
- Cursor endpointArray = endpointsObject.setArray(zone.value());
- for (URI endpoint : endpoints)
- endpointArray.addString(endpoint.toString());
- });
-
- Cursor clustersObject = root.setObject("clusters");
- clusters.forEach((zone, clusterList) -> {
- Cursor clusterArray = clustersObject.setArray(zone.value());
- for (String cluster : clusterList)
- clusterArray.addString(cluster);
- });
-
- try {
- return SlimeUtils.toJsonBytes(slime);
- }
- catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- }
-
/** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */
private class DualLogger {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 5bb9686ce0f..c644af2e554 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.google.common.collect.ImmutableMap;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -41,6 +42,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.UnaryOperator;
import java.util.logging.Level;
+import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.google.common.collect.ImmutableList.copyOf;
@@ -408,12 +410,19 @@ public class JobController {
Optional<URI> testerEndpoint(RunId id) {
DeploymentId testerId = new DeploymentId(id.tester().id(), id.type().zone(controller.system()));
return controller.applications().getDeploymentEndpoints(testerId)
- .flatMap(uris -> uris.stream().findAny())
+ .stream().findAny()
.or(() -> controller.applications().routingPolicies().get(testerId).stream()
.findAny()
.map(policy -> policy.endpointIn(controller.system()).url()));
}
+ /** Returns a set containing the zone of the deployment tested in the given run, and all production zones for the application. */
+ public Set<ZoneId> testedZoneAndProductionZones(ApplicationId id, JobType type) {
+ return Stream.concat(Stream.of(type.zone(controller.system())),
+ controller.applications().require(id).productionDeployments().keySet().stream())
+ .collect(Collectors.toSet());
+ }
+
// TODO jvenstad: Find a more appropriate way of doing this, at least when this is the only build service.
private long nextBuild(ApplicationId id) {
return 1 + controller.applications().require(id).deploymentJobs()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializer.java
new file mode 100644
index 00000000000..e79692d34ed
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializer.java
@@ -0,0 +1,82 @@
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Serializes config for integration tests against Vespa deployments.
+ *
+ * @author jonmv
+ */
+public class TestConfigSerializer {
+
+ private final SystemName system;
+
+ public TestConfigSerializer(SystemName system) {
+ this.system = system;
+ }
+
+ public Slime configSlime(ApplicationId id,
+ JobType type,
+ Map<ZoneId, Map<ClusterSpec.Id, URI>> deployments,
+ Map<ZoneId, List<String>> clusters) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+
+ root.setString("application", id.serializedForm());
+ root.setString("zone", type.zone(system).value());
+ root.setString("system", system.value());
+
+ Cursor endpointsObject = root.setObject("endpoints"); // TODO jvenstad: remove.
+ deployments.forEach((zone, endpoints) -> {
+ Cursor endpointArray = endpointsObject.setArray(zone.value());
+ for (URI endpoint : endpoints.values())
+ endpointArray.addString(endpoint.toString());
+ });
+
+ Cursor zoneEndpointsObject = root.setObject("zoneEndpoints");
+ deployments.forEach((zone, endpoints) -> {
+ Cursor clusterEndpointsObject = zoneEndpointsObject.setObject(zone.value());
+ endpoints.forEach((cluster, endpoint) -> {
+ clusterEndpointsObject.setString(cluster.value(), endpoint.toString());
+ });
+ });
+
+ if ( ! clusters.isEmpty()) {
+ Cursor clustersObject = root.setObject("clusters");
+ clusters.forEach((zone, clusterList) -> {
+ Cursor clusterArray = clustersObject.setArray(zone.value());
+ for (String cluster : clusterList)
+ clusterArray.addString(cluster);
+ });
+ }
+
+ return slime;
+ }
+
+ /** Returns the config for the tests to run for the given job. */
+ public byte[] configJson(ApplicationId id,
+ JobType type,
+ Map<ZoneId, Map<ClusterSpec.Id, URI>> deployments,
+ Map<ZoneId, List<String>> clusters) {
+ try {
+ return SlimeUtils.toJsonBytes(configSlime(id, type, deployments, clusters));
+ }
+ catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceForwarder.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceForwarder.java
index 299ea168c7a..4e461534cc0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceForwarder.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceForwarder.java
@@ -78,6 +78,7 @@ public class NameServiceForwarder {
"requests. This likely means that the name service is not successfully " +
"executing requests");
}
+ log.log(LogLevel.INFO, "Queueing name service request: " + request);
db.writeNameServiceQueue(queue.with(request, priority).last(maxQueuedRequests));
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceQueue.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceQueue.java
index 684fb091d92..4768577aa7b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceQueue.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/dns/NameServiceQueue.java
@@ -74,6 +74,7 @@ public class NameServiceQueue {
var queue = new NameServiceQueue(requests);
for (int i = 0; i < n && !queue.requests.isEmpty(); i++) {
var request = queue.requests.peek();
+ log.log(LogLevel.INFO, "Dispatching name service request: " + request);
try {
request.dispatchTo(nameService);
queue.requests.poll();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
index 508401b0e14..4a98cb49227 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPolicies.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
@@ -74,46 +75,44 @@ public class RoutingPolicies {
if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return;
var lbs = new LoadBalancers(application, zone, controller.applications().configServer()
.getLoadBalancers(application, zone));
- removeObsoleteEndpointsFromDns(lbs);
- storePoliciesOf(lbs);
- removeObsoletePolicies(lbs);
- registerEndpointsInDns(lbs);
+ try (var lock = db.lockRoutingPolicies()) {
+ removeObsoleteEndpointsFromDns(lbs, lock);
+ storePoliciesOf(lbs, lock);
+ removeObsoletePolicies(lbs, lock);
+ registerEndpointsInDns(lbs, lock);
+ }
}
/** Create global endpoints for given route, if any */
- private void registerEndpointsInDns(LoadBalancers loadBalancers) {
- try (var lock = db.lockRoutingPolicies()) {
- Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application));
-
- // Create DNS record for each routing ID
- for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
- Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().rotation(),
- controller.system());
- Set<AliasTarget> targets = routeEntry.getValue()
- .stream()
- .filter(policy -> policy.dnsZone().isPresent())
- .map(policy -> new AliasTarget(policy.canonicalName(),
- policy.dnsZone().get(),
- policy.zone()))
- .collect(Collectors.toSet());
- controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal);
- }
+ private void registerEndpointsInDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
+ Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application));
+
+ // Create DNS record for each routing ID
+ for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
+ Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().rotation(),
+ controller.system());
+ Set<AliasTarget> targets = routeEntry.getValue()
+ .stream()
+ .filter(policy -> policy.dnsZone().isPresent())
+ .map(policy -> new AliasTarget(policy.canonicalName(),
+ policy.dnsZone().get(),
+ policy.zone()))
+ .collect(Collectors.toSet());
+ controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal);
}
}
/** Store routing policies for given route */
- private void storePoliciesOf(LoadBalancers loadBalancers) {
- try (var lock = db.lockRoutingPolicies()) {
- Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application));
- for (LoadBalancer loadBalancer : loadBalancers.list) {
- RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer);
- if (!policies.add(policy)) {
- policies.remove(policy);
- policies.add(policy);
- }
+ private void storePoliciesOf(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
+ Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application));
+ for (LoadBalancer loadBalancer : loadBalancers.list) {
+ RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer);
+ if (!policies.add(policy)) {
+ policies.remove(policy);
+ policies.add(policy);
}
- db.writeRoutingPolicies(loadBalancers.application, policies);
}
+ db.writeRoutingPolicies(loadBalancers.application, policies);
}
/** Create a policy for given load balancer and register a CNAME for it */
@@ -128,36 +127,32 @@ public class RoutingPolicies {
}
/** Remove obsolete policies for given route and their CNAME records */
- private void removeObsoletePolicies(LoadBalancers loadBalancers) {
- try (var lock = db.lockRoutingPolicies()) {
- var allPolicies = new LinkedHashSet<>(get(loadBalancers.application));
- var removalCandidates = new HashSet<>(allPolicies);
- var activeLoadBalancers = loadBalancers.list.stream()
- .map(LoadBalancer::hostname)
- .collect(Collectors.toSet());
- // Remove active load balancers and irrelevant zones from candidates
- removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) ||
- !policy.zone().equals(loadBalancers.zone));
- for (var policy : removalCandidates) {
- var dnsName = policy.endpointIn(controller.system()).dnsName();
- controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal);
- allPolicies.remove(policy);
- }
- db.writeRoutingPolicies(loadBalancers.application, allPolicies);
+ private void removeObsoletePolicies(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
+ var allPolicies = new LinkedHashSet<>(get(loadBalancers.application));
+ var removalCandidates = new HashSet<>(allPolicies);
+ var activeLoadBalancers = loadBalancers.list.stream()
+ .map(LoadBalancer::hostname)
+ .collect(Collectors.toSet());
+ // Remove active load balancers and irrelevant zones from candidates
+ removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) ||
+ !policy.zone().equals(loadBalancers.zone));
+ for (var policy : removalCandidates) {
+ var dnsName = policy.endpointIn(controller.system()).dnsName();
+ controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal);
+ allPolicies.remove(policy);
}
+ db.writeRoutingPolicies(loadBalancers.application, allPolicies);
}
/** Remove unreferenced global endpoints for given route from DNS */
- private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers) {
- try (var lock = db.lockRoutingPolicies()) {
- var zonePolicies = get(loadBalancers.application, loadBalancers.zone);
- var removalCandidates = routingTableFrom(zonePolicies).keySet();
- var activeRoutingIds = routingIdsFrom(loadBalancers.list);
- removalCandidates.removeAll(activeRoutingIds);
- for (var id : removalCandidates) {
- Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.rotation(), controller.system());
- controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal);
- }
+ private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) {
+ var zonePolicies = get(loadBalancers.application, loadBalancers.zone);
+ var removalCandidates = routingTableFrom(zonePolicies).keySet();
+ var activeRoutingIds = routingIdsFrom(loadBalancers.list);
+ removalCandidates.removeAll(activeRoutingIds);
+ for (var id : removalCandidates) {
+ Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.rotation(), controller.system());
+ controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index a2e5a0c78f7..d704d701cf0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -106,6 +106,10 @@ public class CuratorDb {
CuratorDb(Curator curator, Duration tryLockTimeout) {
this.curator = curator;
this.tryLockTimeout = tryLockTimeout;
+
+ // TODO: Remove after 7.60
+ curator.delete(root.append("openStackServerPool"));
+ curator.delete(root.append("vespaServerPool"));
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
@@ -168,16 +172,6 @@ public class CuratorDb {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
- @SuppressWarnings("unused") // Called by internal code
- public Lock lockVespaServerPool() {
- return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
- }
-
- @SuppressWarnings("unused") // Called by internal code
- public Lock lockOpenStackServerPool() {
- return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
- }
-
public Lock lockOsVersions() {
return lock(lockRoot.append("osTargetVersion"), defaultLockTimeout);
}
@@ -469,26 +463,6 @@ public class CuratorDb {
return curator.getChildren(provisionStatePath());
}
- @SuppressWarnings("unused")
- public Optional<byte[]> readVespaServerPool() {
- return curator.getData(vespaServerPoolPath());
- }
-
- @SuppressWarnings("unused")
- public void writeVespaServerPool(byte[] data) {
- curator.set(vespaServerPoolPath(), data);
- }
-
- @SuppressWarnings("unused")
- public Optional<byte[]> readOpenStackServerPool() {
- return curator.getData(openStackServerPoolPath());
- }
-
- @SuppressWarnings("unused")
- public void writeOpenStackServerPool(byte[] data) {
- curator.set(openStackServerPoolPath(), data);
- }
-
// -------------- Routing policies ----------------------------------------
public void writeRoutingPolicies(ApplicationId application, Set<RoutingPolicy> policies) {
@@ -589,14 +563,6 @@ public class CuratorDb {
return provisionStatePath().append(provisionId);
}
- private static Path vespaServerPoolPath() {
- return root.append("vespaServerPool");
- }
-
- private static Path openStackServerPoolPath() {
- return root.append("openStackServerPool");
- }
-
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index b4483010176..3db8c447572 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -66,6 +66,7 @@ import com.yahoo.vespa.hosted.controller.application.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTrigger.ChangesToCancel;
+import com.yahoo.vespa.hosted.controller.deployment.TestConfigSerializer;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponse;
import com.yahoo.vespa.hosted.controller.restapi.MessageResponse;
import com.yahoo.vespa.hosted.controller.restapi.ResourceResponse;
@@ -95,6 +96,7 @@ import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.Base64;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -186,6 +188,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), path.get("instance"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)), request.getUri());
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
@@ -629,7 +632,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
// ask the routing layer here
Cursor serviceUrlArray = response.setArray("serviceUrls");
controller.applications().getDeploymentEndpoints(deploymentId)
- .ifPresent(endpoints -> endpoints.forEach(endpoint -> serviceUrlArray.addString(endpoint.toString())));
+ .forEach(endpoint -> serviceUrlArray.addString(endpoint.toString()));
response.setString("nodes", withPath("/zone/v2/" + deploymentId.zoneId().environment() + "/" + deploymentId.zoneId().region() + "/nodes/v2/node/?&recursive=true&application=" + deploymentId.applicationId().tenant() + "." + deploymentId.applicationId().application() + "." + deploymentId.applicationId().instance(), request.getUri()).toString());
response.setString("yamasUrl", monitoringSystemUri(deploymentId).toString());
@@ -1139,6 +1142,14 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
}
+ private HttpResponse testConfig(ApplicationId id, JobType type) {
+ var endpoints = controller.applications().clusterEndpoints(id, controller.jobController().testedZoneAndProductionZones(id, type));
+ return new SlimeJsonResponse(new TestConfigSerializer(controller.system()).configSlime(id,
+ type,
+ endpoints,
+ Collections.emptyMap()));
+ }
+
private static DeploymentJobs.JobReport toJobReport(String tenantName, String applicationName, Inspector report) {
Optional<DeploymentJobs.JobError> jobError = Optional.empty();
if (report.field("jobError").valid()) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 3fca94ef21f..de31f1f67f9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -293,7 +293,7 @@ public class ControllerTest {
"app1--tenant1.global.vespa.oath.cloud",
"app1.tenant1.global.vespa.yahooapis.com",
"app1--tenant1.global.vespa.yahooapis.com"),
- tester.configServer().rotationCnames().get(new DeploymentId(application.id(), deployment.zone())));
+ tester.configServer().rotationNames().get(new DeploymentId(application.id(), deployment.zone())));
}
tester.flushDnsRequests();
assertEquals(3, tester.controllerTester().nameService().records().size());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
index 370fd03d9e7..095651df033 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
@@ -3,12 +3,15 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Inspector;
+import com.yahoo.slime.JsonFormat;
+import com.yahoo.slime.ObjectTraverser;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.configserverbindings.ConfigChangeActions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.configserverbindings.RefeedAction;
@@ -25,6 +28,7 @@ import com.yahoo.vespa.hosted.controller.application.RoutingPolicy;
import org.junit.Before;
import org.junit.Test;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
@@ -34,6 +38,7 @@ import java.nio.file.Paths;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executors;
@@ -369,7 +374,7 @@ public class InternalStepRunnerTest {
tester.runner().run();
assertEquals(failed, tester.jobs().run(id).get().steps().get(Step.endTests));
assertTestLogEntries(id, Step.copyVespaLogs,
- new LogEntry(lastId + 2, tester.clock().millis(), debug, "Copying Vespa log from nodes of tenant.application in zone test.us-east-1 in default ..."),
+ new LogEntry(lastId + 2, tester.clock().millis(), debug, "Copying Vespa log from nodes of tenant.application in test.us-east-1 ..."),
new LogEntry(lastId + 3, 1554970337084L, info,
"17480180-v6-3.ostk.bm2.prod.ne1.yahoo.com\tcontainer\tContainer.com.yahoo.container.jdisc.ConfiguredApplication\n" +
"Switching to the latest deployed set of configurations and components. Application switch number: 2"),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java
new file mode 100644
index 00000000000..bc411d4377d
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/TestConfigSerializerTest.java
@@ -0,0 +1,36 @@
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+
+import static com.yahoo.vespa.hosted.controller.deployment.InternalDeploymentTester.appId;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author jonmv
+ */
+public class TestConfigSerializerTest {
+
+ @Test
+ public void testConfig() throws IOException {
+ ZoneId zone = JobType.systemTest.zone(SystemName.PublicCd);
+ byte[] json = new TestConfigSerializer(SystemName.PublicCd).configJson(appId,
+ JobType.systemTest,
+ Map.of(zone, Map.of(ClusterSpec.Id.from("ai"),
+ URI.create("https://server/"))),
+ Map.of(zone, List.of("facts")));
+ byte[] expected = InternalStepRunnerTest.class.getResourceAsStream("/testConfig.json").readAllBytes();
+ assertEquals(new String(SlimeUtils.toJsonBytes(SlimeUtils.jsonToSlime(expected))),
+ new String(json));
+ }
+
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index 1894a51adc3..d4df9c20ead 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.Hostname;
import com.yahoo.vespa.hosted.controller.api.identifiers.Identifier;
import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -59,7 +60,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
private final Set<DeploymentId> suspendedApplications = new HashSet<>();
private final Map<ZoneId, List<LoadBalancer>> loadBalancers = new HashMap<>();
private final Map<DeploymentId, List<Log>> warnings = new HashMap<>();
- private final Map<DeploymentId, Set<String>> rotationCnames = new HashMap<>();
+ private final Map<DeploymentId, Set<String>> rotationNames = new HashMap<>();
private Version lastPrepareVersion = null;
private RuntimeException prepareException = null;
@@ -180,8 +181,8 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
warnings.put(deployment, List.copyOf(logs));
}
- public Map<DeploymentId, Set<String>> rotationCnames() {
- return Collections.unmodifiableMap(rotationCnames);
+ public Map<DeploymentId, Set<String>> rotationNames() {
+ return Collections.unmodifiableMap(rotationNames);
}
@Override
@@ -223,8 +224,8 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
}
@Override
- public PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationCnames,
- Set<String> rotationNames, byte[] content) {
+ public PreparedApplication deploy(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationNames,
+ List<ContainerEndpoint> containerEndpoints, byte[] content) {
lastPrepareVersion = deployOptions.vespaVersion.map(Version::fromString).orElse(null);
if (prepareException != null) {
RuntimeException prepareException = this.prepareException;
@@ -236,64 +237,41 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
if (nodeRepository().list(deployment.zoneId(), deployment.applicationId()).isEmpty())
provision(deployment.zoneId(), deployment.applicationId());
- this.rotationCnames.put(deployment, Set.copyOf(rotationCnames));
-
- return new PreparedApplication() {
-
- // TODO: Remove when no longer part of interface
- public void activate() {}
-
- // TODO: Remove when no longer part of interface
- public List<Log> messages() {
- Log warning = new Log();
- warning.level = "WARNING";
- warning.time = 1;
- warning.message = "The warning";
-
- Log info = new Log();
- info.level = "INFO";
- info.time = 2;
- info.message = "The info";
-
- return List.of(warning, info);
- }
-
- @Override
- public PrepareResponse prepareResponse() {
- Application application = applications.get(deployment.applicationId());
- application.activate();
- List<Node> nodes = nodeRepository.list(deployment.zoneId(), deployment.applicationId());
- for (Node node : nodes) {
- nodeRepository.putByHostname(deployment.zoneId(), new Node(node.hostname(),
- Node.State.active,
- node.type(),
- node.owner(),
- node.currentVersion(),
- application.version().get()));
- }
- serviceStatus.put(deployment, new ServiceConvergence(deployment.applicationId(),
- deployment.zoneId(),
- false,
- 2,
- nodes.stream()
- .map(node -> new ServiceConvergence.Status(node.hostname(),
- 43,
- "container",
- 1))
- .collect(Collectors.toList())));
-
- PrepareResponse prepareResponse = new PrepareResponse();
- prepareResponse.message = "foo";
- prepareResponse.configChangeActions = configChangeActions != null
- ? configChangeActions
- : new ConfigChangeActions(Collections.emptyList(),
- Collections.emptyList());
- setConfigChangeActions(null);
- prepareResponse.tenant = new TenantId("tenant");
- prepareResponse.log = warnings.getOrDefault(deployment, Collections.emptyList());
- return prepareResponse;
+ this.rotationNames.put(deployment, Set.copyOf(rotationNames));
+
+ return () -> {
+ Application application = applications.get(deployment.applicationId());
+ application.activate();
+ List<Node> nodes = nodeRepository.list(deployment.zoneId(), deployment.applicationId());
+ for (Node node : nodes) {
+ nodeRepository.putByHostname(deployment.zoneId(), new Node(node.hostname(),
+ Node.State.active,
+ node.type(),
+ node.owner(),
+ node.currentVersion(),
+ application.version().get()));
}
-
+ serviceStatus.put(deployment, new ServiceConvergence(deployment.applicationId(),
+ deployment.zoneId(),
+ false,
+ 2,
+ nodes.stream()
+ .map(node -> new ServiceConvergence.Status(node.hostname(),
+ 43,
+ "container",
+ 1))
+ .collect(Collectors.toList())));
+
+ PrepareResponse prepareResponse = new PrepareResponse();
+ prepareResponse.message = "foo";
+ prepareResponse.configChangeActions = configChangeActions != null
+ ? configChangeActions
+ : new ConfigChangeActions(Collections.emptyList(),
+ Collections.emptyList());
+ setConfigChangeActions(null);
+ prepareResponse.tenant = new TenantId("tenant");
+ prepareResponse.log = warnings.getOrDefault(deployment, Collections.emptyList());
+ return prepareResponse;
};
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/RoutingGeneratorMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/RoutingGeneratorMock.java
index 410d7950e97..98b2dd2f7f3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/RoutingGeneratorMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/RoutingGeneratorMock.java
@@ -1,14 +1,17 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.integration;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
+import java.net.URI;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
/**
* Returns a default set of endpoints on every query if it has no mappings, or those added by the user, otherwise.
@@ -34,6 +37,14 @@ public class RoutingGeneratorMock implements RoutingGenerator {
: routingTable.getOrDefault(deployment, Collections.emptyList());
}
+ @Override
+ public Map<ClusterSpec.Id, URI> clusterEndpoints(DeploymentId deployment) {
+ return endpoints(deployment).stream()
+ .limit(1)
+ .collect(Collectors.toMap(__ -> ClusterSpec.Id.from("default"),
+ endpoint -> URI.create(endpoint.endpoint())));
+ }
+
public void putEndpoints(DeploymentId deployment, List<RoutingEndpoint> endpoints) {
routingTable.put(deployment, endpoints);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
index 5cf8268c18e..700e6e9cb42 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java
@@ -43,7 +43,7 @@ public class ZoneFilterMock implements ZoneList {
@Override
public ZoneList all() {
- return filter(zoneId -> true);
+ return filter(zone -> true);
}
@Override
@@ -63,17 +63,17 @@ public class ZoneFilterMock implements ZoneList {
@Override
public ZoneList in(Environment... environments) {
- return filter(zoneId -> new HashSet<>(Arrays.asList(environments)).contains(zoneId.environment()));
+ return filter(zone -> new HashSet<>(Arrays.asList(environments)).contains(zone.getEnvironment()));
}
@Override
public ZoneList in(RegionName... regions) {
- return filter(zoneId -> new HashSet<>(Arrays.asList(regions)).contains(zoneId.region()));
+ return filter(zone -> new HashSet<>(Arrays.asList(regions)).contains(zone.getRegionName()));
}
@Override
public ZoneList among(ZoneId... zones) {
- return filter(zoneId -> new HashSet<>(Arrays.asList(zones)).contains(zoneId));
+ return filter(zone -> new HashSet<>(Arrays.asList(zones)).contains(zone.getId()));
}
@Override
@@ -88,15 +88,15 @@ public class ZoneFilterMock implements ZoneList {
@Override
public ZoneList ofCloud(CloudName cloud) {
- return filter(zoneId -> zoneId.cloud().equals(cloud));
+ return filter(zone -> zone.getCloudName().equals(cloud));
}
- private ZoneFilterMock filter(Predicate<ZoneId> condition) {
+ private ZoneFilterMock filter(Predicate<ZoneApi> condition) {
return new ZoneFilterMock(
zones.stream()
- .filter(zoneApi -> negate ?
- condition.negate().test(zoneApi.toDeprecatedId()) :
- condition.test(zoneApi.toDeprecatedId()))
+ .filter(zone -> negate ?
+ condition.negate().test(zone) :
+ condition.test(zone))
.collect(Collectors.toList()),
false);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
index d94c40e6b99..449ca509ee4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RoutingPoliciesTest.java
@@ -9,7 +9,9 @@ import com.yahoo.config.provision.RotationName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
@@ -18,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import org.junit.Test;
+import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -195,6 +198,20 @@ public class RoutingPoliciesTest {
assertEquals("Keeps routing policies for " + app1, 4, tester.controller().applications().routingPolicies().get(app1.id()).size());
}
+ @Test
+ public void cluster_endpoints_resolve_from_policies() {
+ provisionLoadBalancers(3, app1.id(), zone1);
+ tester.deployCompletely(app1, applicationPackage);
+ tester.controllerTester().routingGenerator().putEndpoints(new DeploymentId(app1.id(), zone1), Collections.emptyList());
+ assertEquals(Map.of(ClusterSpec.Id.from("c0"),
+ URI.create("https://c0.app1.tenant1.us-west-1.vespa.oath.cloud/"),
+ ClusterSpec.Id.from("c1"),
+ URI.create("https://c1.app1.tenant1.us-west-1.vespa.oath.cloud/"),
+ ClusterSpec.Id.from("c2"),
+ URI.create("https://c2.app1.tenant1.us-west-1.vespa.oath.cloud/")),
+ tester.controller().applications().clusterEndpoints(new DeploymentId(app1.id(), zone1)));
+ }
+
private Set<RoutingPolicy> policies(Application application) {
return tester.controller().curator().readRoutingPolicies(application.id());
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 2c17abf9e90..71c4b41a276 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -709,14 +709,14 @@ public class ApplicationApiTest extends ControllerContainerTest {
// Invalid deployment fails
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation", GET)
.userIdentity(USER_ID),
- "{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in zone prod.us-east-3 in default\"}",
+ "{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
// Change status of non-existing deployment fails
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-east-3/global-rotation/override", PUT)
.userIdentity(USER_ID)
.data("{\"reason\":\"unit-test\"}"),
- "{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in zone prod.us-east-3 in default\"}",
+ "{\"error-code\":\"NOT_FOUND\",\"message\":\"application 'tenant1.application1.instance1' has no deployment in prod.us-east-3\"}",
404);
// GET global rotation status
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-recursive.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-recursive.json
index c90d5759791..c0e9d10a40c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-recursive.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application1-recursive.json
@@ -223,7 +223,7 @@
],
"rotationId": "rotation-id-1",
"instances": [
- @include(dev-us-west-1.json),
+ @include(dev-us-east-1.json),
@include(prod-us-central-1.json)
],
"metrics": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
index b051fb38a41..ad7e4f00027 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/delete-with-active-deployments.json
@@ -1,4 +1,4 @@
{
"error-code": "BAD_REQUEST",
- "message": "Could not delete 'application 'tenant1.application1.instance1'': It has active deployments in: zone dev.us-west-1 in default, zone prod.us-central-1 in default"
+ "message": "Could not delete 'application 'tenant1.application1.instance1'': It has active deployments in: dev.us-west-1, prod.us-central-1"
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-west-1.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json
index 1a2025e4de2..1a2025e4de2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-west-1.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1.json
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/test-config.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/test-config.json
new file mode 100644
index 00000000000..fef3cf6a372
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/test-config.json
@@ -0,0 +1,21 @@
+{
+ "application": "tenant1:application1:default",
+ "zone": "dev.us-east-1",
+ "system": "main",
+ "endpoints": {
+ "dev.us-east-1": [
+ "http://old-endpoint.vespa.yahooapis.com:4080"
+ ],
+ "prod.us-central-1": [
+ "http://old-endpoint.vespa.yahooapis.com:4080"
+ ]
+ },
+ "zoneEndpoints": {
+ "dev.us-east-1": {
+ "default": "http://old-endpoint.vespa.yahooapis.com:4080"
+ },
+ "prod.us-central-1": {
+ "default": "http://old-endpoint.vespa.yahooapis.com:4080"
+ }
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
index 77d5b3479be..390024fe33d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/us-east-3-log-without-first.json
@@ -12,7 +12,7 @@
{
"at": 1000,
"type": "debug",
- "message": "Deactivating tester of tenant.application in zone prod.us-east-3 in default ..."
+ "message": "Deactivating tester of tenant.application in prod.us-east-3 ..."
}
]
},
diff --git a/controller-server/src/test/resources/testConfig.json b/controller-server/src/test/resources/testConfig.json
new file mode 100644
index 00000000000..4145d863995
--- /dev/null
+++ b/controller-server/src/test/resources/testConfig.json
@@ -0,0 +1,20 @@
+{
+ "application": "tenant:application:default",
+ "zone": "test.aws-us-east-1c",
+ "system": "publiccd",
+ "endpoints": {
+ "test.aws-us-east-1c": [
+ "https://server/"
+ ]
+ },
+ "zoneEndpoints": {
+ "test.aws-us-east-1c": {
+ "ai": "https://server/"
+ }
+ },
+ "clusters": {
+ "test.aws-us-east-1c": [
+ "facts"
+ ]
+ }
+}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
index 8344830e229..4e7ef5a1ff6 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
@@ -81,9 +81,8 @@ public interface Docker {
*/
boolean pullImageAsyncIfNeeded(DockerImage image);
- /**
- * Deletes the local images that are currently not in use by any container and not recently used.
- */
+ boolean noManagedContainersRunning(String manager);
+
boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete);
/**
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
index 9299cedaf21..8cdb0bee7c2 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
@@ -296,6 +296,13 @@ public class DockerImpl implements Docker {
return encodedContainerName.substring(FRAMEWORK_CONTAINER_PREFIX.length());
}
+ @Override
+ public boolean noManagedContainersRunning(String manager) {
+ return listAllContainers().stream()
+ .filter(container -> isManagedBy(container, manager))
+ .noneMatch(container -> "running".equalsIgnoreCase(container.getState()));
+ }
+
List<com.github.dockerjava.api.model.Container> listAllContainers() {
try {
return dockerClient.listContainersCmd().withShowAll(true).exec();
diff --git a/document/src/vespa/document/base/idstring.cpp b/document/src/vespa/document/base/idstring.cpp
index 7606ec58f9f..223baa6fd8d 100644
--- a/document/src/vespa/document/base/idstring.cpp
+++ b/document/src/vespa/document/base/idstring.cpp
@@ -6,6 +6,7 @@
#include <vespa/vespalib/util/md5.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <limits>
+#include <cerrno>
using vespalib::string;
using vespalib::stringref;
diff --git a/document/src/vespa/document/datatype/referencedatatype.cpp b/document/src/vespa/document/datatype/referencedatatype.cpp
index bc91f6b30ed..6caa6fdf7a9 100644
--- a/document/src/vespa/document/datatype/referencedatatype.cpp
+++ b/document/src/vespa/document/datatype/referencedatatype.cpp
@@ -3,6 +3,7 @@
#include "referencedatatype.h"
#include <vespa/document/fieldvalue/referencefieldvalue.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <ostream>
using vespalib::make_string;
using vespalib::IllegalArgumentException;
diff --git a/document/src/vespa/document/fieldvalue/arrayfieldvalue.cpp b/document/src/vespa/document/fieldvalue/arrayfieldvalue.cpp
index 8d928f7fb12..2f7f2208cbe 100644
--- a/document/src/vespa/document/fieldvalue/arrayfieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/arrayfieldvalue.cpp
@@ -8,6 +8,7 @@
#include <vespa/vespalib/util/polymorphicarrays.h>
#include <vespa/vespalib/util/xmlstream.h>
#include <vespa/log/log.h>
+#include <ostream>
LOG_SETUP(".document.fieldvalue.array");
diff --git a/document/src/vespa/document/fieldvalue/mapfieldvalue.cpp b/document/src/vespa/document/fieldvalue/mapfieldvalue.cpp
index 9c1b101e4ab..b5464401df2 100644
--- a/document/src/vespa/document/fieldvalue/mapfieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/mapfieldvalue.cpp
@@ -9,6 +9,7 @@
#include <vespa/vespalib/stllike/hash_set.hpp>
#include <cassert>
#include <algorithm>
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".document.fieldvalue.map");
diff --git a/document/src/vespa/document/fieldvalue/referencefieldvalue.cpp b/document/src/vespa/document/fieldvalue/referencefieldvalue.cpp
index dcda102f656..281161fccbf 100644
--- a/document/src/vespa/document/fieldvalue/referencefieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/referencefieldvalue.cpp
@@ -4,6 +4,7 @@
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <cassert>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::make_string;
diff --git a/document/src/vespa/document/fieldvalue/stringfieldvalue.cpp b/document/src/vespa/document/fieldvalue/stringfieldvalue.cpp
index d79535d54cd..1d38653dba2 100644
--- a/document/src/vespa/document/fieldvalue/stringfieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/stringfieldvalue.cpp
@@ -11,6 +11,7 @@
#include <vespa/document/serialization/annotationdeserializer.h>
#include <vespa/document/repo/fixedtyperepo.h>
#include <vespa/document/serialization/vespadocumentserializer.h>
+#include <ostream>
using vespalib::nbostream;
using vespalib::ConstBufferRef;
diff --git a/document/src/vespa/document/fieldvalue/structfieldvalue.cpp b/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
index ac37970213c..078e4d0ec21 100644
--- a/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
@@ -14,6 +14,7 @@
#include <vespa/document/util/bytebuffer.h>
#include <vespa/vespalib/util/xmlstream.h>
#include <algorithm>
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".document.structfieldvalue");
diff --git a/document/src/vespa/document/select/doctype.cpp b/document/src/vespa/document/select/doctype.cpp
index ba0338b9b61..0b55407092e 100644
--- a/document/src/vespa/document/select/doctype.cpp
+++ b/document/src/vespa/document/select/doctype.cpp
@@ -6,6 +6,7 @@
#include <vespa/document/update/documentupdate.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/datatype/documenttype.h>
+#include <ostream>
namespace document::select {
diff --git a/document/src/vespa/document/select/operator.cpp b/document/src/vespa/document/select/operator.cpp
index 85dbef5ad9b..eaa795549bf 100644
--- a/document/src/vespa/document/select/operator.cpp
+++ b/document/src/vespa/document/select/operator.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <cassert>
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".document.select.operator");
diff --git a/document/src/vespa/document/select/simpleparser.cpp b/document/src/vespa/document/select/simpleparser.cpp
index a19d6086abe..879a1f195e8 100644
--- a/document/src/vespa/document/select/simpleparser.cpp
+++ b/document/src/vespa/document/select/simpleparser.cpp
@@ -2,6 +2,7 @@
#include "simpleparser.h"
#include "compare.h"
+#include <cerrno>
namespace document {
diff --git a/document/src/vespa/document/select/value.cpp b/document/src/vespa/document/select/value.cpp
index 6b4bf15fc3b..5ebf527b82b 100644
--- a/document/src/vespa/document/select/value.cpp
+++ b/document/src/vespa/document/select/value.cpp
@@ -3,6 +3,7 @@
#include "value.h"
#include "operator.h"
#include <vespa/document/fieldvalue/fieldvalue.h>
+#include <ostream>
namespace document {
namespace select {
diff --git a/document/src/vespa/document/update/addvalueupdate.cpp b/document/src/vespa/document/update/addvalueupdate.cpp
index e5a99b49a9e..e1132b2b571 100644
--- a/document/src/vespa/document/update/addvalueupdate.cpp
+++ b/document/src/vespa/document/update/addvalueupdate.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/util/serializableexceptions.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/document/src/vespa/document/update/arithmeticvalueupdate.cpp b/document/src/vespa/document/update/arithmeticvalueupdate.cpp
index 3af9350062e..90286da521a 100644
--- a/document/src/vespa/document/update/arithmeticvalueupdate.cpp
+++ b/document/src/vespa/document/update/arithmeticvalueupdate.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/document/src/vespa/document/update/assignfieldpathupdate.cpp b/document/src/vespa/document/update/assignfieldpathupdate.cpp
index bec717874dc..63e61ed3221 100644
--- a/document/src/vespa/document/update/assignfieldpathupdate.cpp
+++ b/document/src/vespa/document/update/assignfieldpathupdate.cpp
@@ -9,6 +9,7 @@
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
#include <boost/numeric/conversion/cast.hpp>
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".document.update.fieldpathupdate");
diff --git a/document/src/vespa/document/update/assignvalueupdate.cpp b/document/src/vespa/document/update/assignvalueupdate.cpp
index cfedc1eb01b..48b30f13437 100644
--- a/document/src/vespa/document/update/assignvalueupdate.cpp
+++ b/document/src/vespa/document/update/assignvalueupdate.cpp
@@ -7,6 +7,7 @@
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/document/src/vespa/document/update/documentupdate.cpp b/document/src/vespa/document/update/documentupdate.cpp
index 48f32cb7d65..f2b8d40e0a3 100644
--- a/document/src/vespa/document/update/documentupdate.cpp
+++ b/document/src/vespa/document/update/documentupdate.cpp
@@ -12,6 +12,7 @@
#include <vespa/document/datatype/documenttype.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/document/src/vespa/document/update/fieldupdate.cpp b/document/src/vespa/document/update/fieldupdate.cpp
index d36a134ecaa..3498d14d96e 100644
--- a/document/src/vespa/document/update/fieldupdate.cpp
+++ b/document/src/vespa/document/update/fieldupdate.cpp
@@ -5,6 +5,7 @@
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/objects/nbostream.h>
+#include <ostream>
namespace document {
diff --git a/document/src/vespa/document/update/mapvalueupdate.cpp b/document/src/vespa/document/update/mapvalueupdate.cpp
index be970b3c30a..0837615e4fb 100644
--- a/document/src/vespa/document/update/mapvalueupdate.cpp
+++ b/document/src/vespa/document/update/mapvalueupdate.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/util/serializableexceptions.h>
#include <vespa/vespalib/util/xmlstream.h>
#include <vespa/vespalib/objects/nbostream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/document/src/vespa/document/update/removevalueupdate.cpp b/document/src/vespa/document/update/removevalueupdate.cpp
index fdbee3cb394..60176d93dce 100644
--- a/document/src/vespa/document/update/removevalueupdate.cpp
+++ b/document/src/vespa/document/update/removevalueupdate.cpp
@@ -8,6 +8,7 @@
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/document/util/serializableexceptions.h>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java b/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
index 82e7a87e95b..df0e0f0abdd 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
@@ -36,7 +36,7 @@ public class VisitorParameters extends Parameters {
private String remoteDataHandler = null;
private VisitorDataHandler localDataHandler;
private VisitorControlHandler controlHandler;
- private Map<String, byte []> libraryParameters = new TreeMap<String, byte []>();
+ private Map<String, byte []> libraryParameters = new TreeMap<>();
private Route visitRoute = null;
private float weight = 1;
private long maxFirstPassHits = -1;
diff --git a/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp b/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
index 6b10d2782f2..b4b4b628ee2 100644
--- a/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
+++ b/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
@@ -17,21 +17,48 @@ using Dict = std::vector<vespalib::string>;
//-----------------------------------------------------------------------------
-nbostream make_sparse() {
+template <typename T> std::vector<bool> with_cell_type_opts();
+template <> std::vector<bool> with_cell_type_opts<double>() { return {false, true}; }
+template <> std::vector<bool> with_cell_type_opts<float>() { return {true}; }
+
+template <typename T> uint8_t cell_type();
+template <> uint8_t cell_type<double>() { return 0; }
+template <> uint8_t cell_type<float>() { return 1; }
+
+template <typename T> const char *cell_type_str();
+template <> const char *cell_type_str<double>() { return ""; }
+template <> const char *cell_type_str<float>() { return "<float>"; }
+
+template <typename T> nbostream make_sparse(bool with_cell_type) {
nbostream data;
- data << uint8_t(0x1);
+ if (with_cell_type) {
+ data << uint8_t(0x5);
+ data << cell_type<T>();
+ } else {
+ data << uint8_t(0x1);
+ }
return data;
}
-nbostream make_dense() {
+template <typename T> nbostream make_dense(bool with_cell_type) {
nbostream data;
- data << uint8_t(0x2);
+ if (with_cell_type) {
+ data << uint8_t(0x6);
+ data << cell_type<T>();
+ } else {
+ data << uint8_t(0x2);
+ }
return data;
}
-nbostream make_mixed() {
+template <typename T> nbostream make_mixed(bool with_cell_type) {
nbostream data;
- data << uint8_t(0x3);
+ if (with_cell_type) {
+ data << uint8_t(0x7);
+ data << cell_type<T>();
+ } else {
+ data << uint8_t(0x3);
+ }
return data;
}
@@ -113,204 +140,226 @@ double mix(std::initializer_list<double> vals) {
//-----------------------------------------------------------------------------
void make_number_test(Cursor &test, double value) {
- TensorSpec spec("double");
- spec.add({{}}, value);
- nbostream sparse = make_sparse();
- sparse.putInt1_4Bytes(0);
- sparse.putInt1_4Bytes(1);
- sparse << value;
- nbostream dense = make_dense();
- dense.putInt1_4Bytes(0);
- dense << value;
- nbostream mixed = make_mixed();
- mixed.putInt1_4Bytes(0);
- mixed.putInt1_4Bytes(0);
- mixed << value;
- set_tensor(test, spec);
- add_binary(test, {sparse, dense, mixed});
- if (value == 0.0) {
- nbostream empty = make_sparse();
- empty.putInt1_4Bytes(0);
- empty.putInt1_4Bytes(0);
- add_binary(test, empty);
+ for (bool with_cell_type: with_cell_type_opts<double>()) {
+ TensorSpec spec("double");
+ spec.add({{}}, value);
+ nbostream sparse = make_sparse<double>(with_cell_type);
+ sparse.putInt1_4Bytes(0);
+ sparse.putInt1_4Bytes(1);
+ sparse << value;
+ nbostream dense = make_dense<double>(with_cell_type);
+ dense.putInt1_4Bytes(0);
+ dense << value;
+ nbostream mixed = make_mixed<double>(with_cell_type);
+ mixed.putInt1_4Bytes(0);
+ mixed.putInt1_4Bytes(0);
+ mixed << value;
+ set_tensor(test, spec);
+ add_binary(test, {sparse, dense, mixed});
+ if (value == 0.0) {
+ nbostream empty = make_sparse<double>(with_cell_type);
+ empty.putInt1_4Bytes(0);
+ empty.putInt1_4Bytes(0);
+ add_binary(test, empty);
+ }
}
}
//-----------------------------------------------------------------------------
+template <typename T>
void make_vector_test(Cursor &test, size_t x_size) {
- TensorSpec spec(vespalib::make_string("tensor(x[%zu])", x_size));
- nbostream dense = make_dense();
- dense.putInt1_4Bytes(1);
- dense.writeSmallString("x");
- dense.putInt1_4Bytes(x_size);
- nbostream mixed = make_mixed();
- mixed.putInt1_4Bytes(0);
- mixed.putInt1_4Bytes(1);
- mixed.writeSmallString("x");
- mixed.putInt1_4Bytes(x_size);
- for (size_t x = 0; x < x_size; ++x) {
- double value = val(x);
- spec.add({{"x", x}}, value);
- dense << value;
- mixed << value;
+ for (bool with_cell_type: with_cell_type_opts<T>()) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x[%zu])", cell_type_str<T>(), x_size));
+ nbostream dense = make_dense<T>(with_cell_type);
+ dense.putInt1_4Bytes(1);
+ dense.writeSmallString("x");
+ dense.putInt1_4Bytes(x_size);
+ nbostream mixed = make_mixed<T>(with_cell_type);
+ mixed.putInt1_4Bytes(0);
+ mixed.putInt1_4Bytes(1);
+ mixed.writeSmallString("x");
+ mixed.putInt1_4Bytes(x_size);
+ for (size_t x = 0; x < x_size; ++x) {
+ double value = val(x);
+ spec.add({{"x", x}}, value);
+ dense << static_cast<T>(value);
+ mixed << static_cast<T>(value);
+ }
+ set_tensor(test, spec);
+ add_binary(test, {dense, mixed});
}
- set_tensor(test, spec);
- add_binary(test, {dense, mixed});
}
+template <typename T>
void make_matrix_test(Cursor &test, size_t x_size, size_t y_size) {
- TensorSpec spec(vespalib::make_string("tensor(x[%zu],y[%zu])", x_size, y_size));
- nbostream dense = make_dense();
- dense.putInt1_4Bytes(2);
- dense.writeSmallString("x");
- dense.putInt1_4Bytes(x_size);
- dense.writeSmallString("y");
- dense.putInt1_4Bytes(y_size);
- nbostream mixed = make_mixed();
- mixed.putInt1_4Bytes(0);
- mixed.putInt1_4Bytes(2);
- mixed.writeSmallString("x");
- mixed.putInt1_4Bytes(x_size);
- mixed.writeSmallString("y");
- mixed.putInt1_4Bytes(y_size);
- for (size_t x = 0; x < x_size; ++x) {
- for (size_t y = 0; y < y_size; ++y) {
- double value = mix({val(x), val(y)});
- spec.add({{"x", x}, {"y", y}}, value);
- dense << value;
- mixed << value;
+ for (bool with_cell_type: with_cell_type_opts<T>()) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x[%zu],y[%zu])", cell_type_str<T>(), x_size, y_size));
+ nbostream dense = make_dense<T>(with_cell_type);
+ dense.putInt1_4Bytes(2);
+ dense.writeSmallString("x");
+ dense.putInt1_4Bytes(x_size);
+ dense.writeSmallString("y");
+ dense.putInt1_4Bytes(y_size);
+ nbostream mixed = make_mixed<T>(with_cell_type);
+ mixed.putInt1_4Bytes(0);
+ mixed.putInt1_4Bytes(2);
+ mixed.writeSmallString("x");
+ mixed.putInt1_4Bytes(x_size);
+ mixed.writeSmallString("y");
+ mixed.putInt1_4Bytes(y_size);
+ for (size_t x = 0; x < x_size; ++x) {
+ for (size_t y = 0; y < y_size; ++y) {
+ double value = mix({val(x), val(y)});
+ spec.add({{"x", x}, {"y", y}}, value);
+ dense << static_cast<T>(value);
+ mixed << static_cast<T>(value);
+ }
}
+ set_tensor(test, spec);
+ add_binary(test, {dense, mixed});
}
- set_tensor(test, spec);
- add_binary(test, {dense, mixed});
}
//-----------------------------------------------------------------------------
+template <typename T>
void make_map_test(Cursor &test, const Dict &x_dict_in) {
- nbostream sparse_base = make_sparse();
- sparse_base.putInt1_4Bytes(1);
- sparse_base.writeSmallString("x");
- sparse_base.putInt1_4Bytes(x_dict_in.size());
- nbostream mixed_base = make_mixed();
- mixed_base.putInt1_4Bytes(1);
- mixed_base.writeSmallString("x");
- mixed_base.putInt1_4Bytes(0);
- mixed_base.putInt1_4Bytes(x_dict_in.size());
- auto x_perm = make_permutations(x_dict_in);
- for (const Dict &x_dict: x_perm) {
- TensorSpec spec("tensor(x{})");
- nbostream sparse = sparse_base;
- nbostream mixed = mixed_base;
- for (vespalib::string x: x_dict) {
- double value = val(x);
- spec.add({{"x", x}}, value);
- sparse.writeSmallString(x);
- mixed.writeSmallString(x);
- sparse << value;
- mixed << value;
+ for (bool with_cell_type: with_cell_type_opts<T>()) {
+ nbostream sparse_base = make_sparse<T>(with_cell_type);
+ sparse_base.putInt1_4Bytes(1);
+ sparse_base.writeSmallString("x");
+ sparse_base.putInt1_4Bytes(x_dict_in.size());
+ nbostream mixed_base = make_mixed<T>(with_cell_type);
+ mixed_base.putInt1_4Bytes(1);
+ mixed_base.writeSmallString("x");
+ mixed_base.putInt1_4Bytes(0);
+ mixed_base.putInt1_4Bytes(x_dict_in.size());
+ auto x_perm = make_permutations(x_dict_in);
+ for (const Dict &x_dict: x_perm) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x{})", cell_type_str<T>()));
+ nbostream sparse = sparse_base;
+ nbostream mixed = mixed_base;
+ for (vespalib::string x: x_dict) {
+ double value = val(x);
+ spec.add({{"x", x}}, value);
+ sparse.writeSmallString(x);
+ mixed.writeSmallString(x);
+ sparse << static_cast<T>(value);
+ mixed << static_cast<T>(value);
+ }
+ set_tensor(test, spec);
+ add_binary(test, {sparse, mixed});
+ }
+ if (x_dict_in.empty()) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x{})", cell_type_str<T>()));
+ set_tensor(test, spec);
+ add_binary(test, {sparse_base, mixed_base});
}
- set_tensor(test, spec);
- add_binary(test, {sparse, mixed});
- }
- if (x_dict_in.empty()) {
- TensorSpec spec("tensor(x{})");
- set_tensor(test, spec);
- add_binary(test, {sparse_base, mixed_base});
}
}
+template <typename T>
void make_mesh_test(Cursor &test, const Dict &x_dict_in, const vespalib::string &y) {
- nbostream sparse_base = make_sparse();
- sparse_base.putInt1_4Bytes(2);
- sparse_base.writeSmallString("x");
- sparse_base.writeSmallString("y");
- sparse_base.putInt1_4Bytes(x_dict_in.size() * 1);
- nbostream mixed_base = make_mixed();
- mixed_base.putInt1_4Bytes(2);
- mixed_base.writeSmallString("x");
- mixed_base.writeSmallString("y");
- mixed_base.putInt1_4Bytes(0);
- mixed_base.putInt1_4Bytes(x_dict_in.size() * 1);
- auto x_perm = make_permutations(x_dict_in);
- for (const Dict &x_dict: x_perm) {
- TensorSpec spec("tensor(x{},y{})");
- nbostream sparse = sparse_base;
- nbostream mixed = mixed_base;
- for (vespalib::string x: x_dict) {
- double value = mix({val(x), val(y)});
- spec.add({{"x", x}, {"y", y}}, value);
- sparse.writeSmallString(x);
- sparse.writeSmallString(y);
- mixed.writeSmallString(x);
- mixed.writeSmallString(y);
- sparse << value;
- mixed << value;
+ for (bool with_cell_type: with_cell_type_opts<T>()) {
+ nbostream sparse_base = make_sparse<T>(with_cell_type);
+ sparse_base.putInt1_4Bytes(2);
+ sparse_base.writeSmallString("x");
+ sparse_base.writeSmallString("y");
+ sparse_base.putInt1_4Bytes(x_dict_in.size() * 1);
+ nbostream mixed_base = make_mixed<T>(with_cell_type);
+ mixed_base.putInt1_4Bytes(2);
+ mixed_base.writeSmallString("x");
+ mixed_base.writeSmallString("y");
+ mixed_base.putInt1_4Bytes(0);
+ mixed_base.putInt1_4Bytes(x_dict_in.size() * 1);
+ auto x_perm = make_permutations(x_dict_in);
+ for (const Dict &x_dict: x_perm) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x{},y{})", cell_type_str<T>()));
+ nbostream sparse = sparse_base;
+ nbostream mixed = mixed_base;
+ for (vespalib::string x: x_dict) {
+ double value = mix({val(x), val(y)});
+ spec.add({{"x", x}, {"y", y}}, value);
+ sparse.writeSmallString(x);
+ sparse.writeSmallString(y);
+ mixed.writeSmallString(x);
+ mixed.writeSmallString(y);
+ sparse << static_cast<T>(value);
+ mixed << static_cast<T>(value);
+ }
+ set_tensor(test, spec);
+ add_binary(test, {sparse, mixed});
+ }
+ if (x_dict_in.empty()) {
+ TensorSpec spec(vespalib::make_string("tensor%s(x{},y{})", cell_type_str<T>()));
+ set_tensor(test, spec);
+ add_binary(test, {sparse_base, mixed_base});
}
- set_tensor(test, spec);
- add_binary(test, {sparse, mixed});
- }
- if (x_dict_in.empty()) {
- TensorSpec spec("tensor(x{},y{})");
- set_tensor(test, spec);
- add_binary(test, {sparse_base, mixed_base});
}
}
//-----------------------------------------------------------------------------
+template <typename T>
void make_vector_map_test(Cursor &test,
const vespalib::string &mapped_name, const Dict &mapped_dict,
const vespalib::string &indexed_name, size_t indexed_size)
{
- auto type_str = vespalib::make_string("tensor(%s{},%s[%zu])",
- mapped_name.c_str(), indexed_name.c_str(), indexed_size);
- ValueType type = ValueType::from_spec(type_str);
- nbostream mixed_base = make_mixed();
- mixed_base.putInt1_4Bytes(1);
- mixed_base.writeSmallString(mapped_name);
- mixed_base.putInt1_4Bytes(1);
- mixed_base.writeSmallString(indexed_name);
- mixed_base.putInt1_4Bytes(indexed_size);
- mixed_base.putInt1_4Bytes(mapped_dict.size());
- auto mapped_perm = make_permutations(mapped_dict);
- for (const Dict &dict: mapped_perm) {
- TensorSpec spec(type.to_spec()); // ensures type string is normalized
- nbostream mixed = mixed_base;
- for (vespalib::string label: dict) {
- mixed.writeSmallString(label);
- for (size_t idx = 0; idx < indexed_size; ++idx) {
- double value = mix({val(label), val(idx)});
- spec.add({{mapped_name, label}, {indexed_name, idx}}, value);
- mixed << value;
+ for (bool with_cell_type: with_cell_type_opts<T>()) {
+ auto type_str = vespalib::make_string("tensor%s(%s{},%s[%zu])", cell_type_str<T>(),
+ mapped_name.c_str(), indexed_name.c_str(), indexed_size);
+ ValueType type = ValueType::from_spec(type_str);
+ nbostream mixed_base = make_mixed<T>(with_cell_type);
+ mixed_base.putInt1_4Bytes(1);
+ mixed_base.writeSmallString(mapped_name);
+ mixed_base.putInt1_4Bytes(1);
+ mixed_base.writeSmallString(indexed_name);
+ mixed_base.putInt1_4Bytes(indexed_size);
+ mixed_base.putInt1_4Bytes(mapped_dict.size());
+ auto mapped_perm = make_permutations(mapped_dict);
+ for (const Dict &dict: mapped_perm) {
+ TensorSpec spec(type.to_spec()); // ensures type string is normalized
+ nbostream mixed = mixed_base;
+ for (vespalib::string label: dict) {
+ mixed.writeSmallString(label);
+ for (size_t idx = 0; idx < indexed_size; ++idx) {
+ double value = mix({val(label), val(idx)});
+ spec.add({{mapped_name, label}, {indexed_name, idx}}, value);
+ mixed << static_cast<T>(value);
+ }
}
+ set_tensor(test, spec);
+ add_binary(test, mixed);
+ }
+ if (mapped_dict.empty()) {
+ TensorSpec spec(type.to_spec()); // ensures type string is normalized
+ set_tensor(test, spec);
+ add_binary(test, mixed_base);
}
- set_tensor(test, spec);
- add_binary(test, mixed);
- }
- if (mapped_dict.empty()) {
- TensorSpec spec(type.to_spec()); // ensures type string is normalized
- set_tensor(test, spec);
- add_binary(test, mixed_base);
}
}
//-----------------------------------------------------------------------------
+template <typename T> void make_typed_tests(test::TestWriter &writer) {
+ make_vector_test<T>(writer.create(), 3);
+ make_matrix_test<T>(writer.create(), 2, 3);
+ make_map_test<T>(writer.create(), {});
+ make_map_test<T>(writer.create(), {"a", "b", "c"});
+ make_mesh_test<T>(writer.create(), {}, "a");
+ make_mesh_test<T>(writer.create(), {"foo", "bar"}, "a");
+ make_vector_map_test<T>(writer.create(), "x", {}, "y", 10);
+ make_vector_map_test<T>(writer.create(), "y", {}, "x", 10);
+ make_vector_map_test<T>(writer.create(), "x", {"a", "b"}, "y", 3);
+ make_vector_map_test<T>(writer.create(), "y", {"a", "b"}, "x", 3);
+}
+
void make_tests(test::TestWriter &writer) {
make_number_test(writer.create(), 0.0);
make_number_test(writer.create(), 42.0);
- make_vector_test(writer.create(), 3);
- make_matrix_test(writer.create(), 2, 3);
- make_map_test(writer.create(), {});
- make_map_test(writer.create(), {"a", "b", "c"});
- make_mesh_test(writer.create(), {}, "a");
- make_mesh_test(writer.create(), {"foo", "bar"}, "a");
- make_vector_map_test(writer.create(), "x", {}, "y", 10);
- make_vector_map_test(writer.create(), "y", {}, "x", 10);
- make_vector_map_test(writer.create(), "x", {"a", "b"}, "y", 3);
- make_vector_map_test(writer.create(), "y", {"a", "b"}, "x", 3);
+ make_typed_tests<double>(writer);
+ make_typed_tests<float>(writer);
}
int main(int, char **) {
diff --git a/eval/src/apps/make_tensor_binary_format_test_spec/test_spec.json b/eval/src/apps/make_tensor_binary_format_test_spec/test_spec.json
index 701b829e5bc..f6b535e071a 100644
--- a/eval/src/apps/make_tensor_binary_format_test_spec/test_spec.json
+++ b/eval/src/apps/make_tensor_binary_format_test_spec/test_spec.json
@@ -1,13 +1,23 @@
-{"tensor":{"type":"double","cells":[{"address":{},"value":0}]},"binary":["0x0100010000000000000000","0x02000000000000000000","0x0300000000000000000000","0x010000"]}
-{"tensor":{"type":"double","cells":[{"address":{},"value":42}]},"binary":["0x0100014045000000000000","0x02004045000000000000","0x0300004045000000000000"]}
-{"tensor":{"type":"tensor(x[3])","cells":[{"address":{"x":0},"value":1},{"address":{"x":1},"value":2},{"address":{"x":2},"value":3}]},"binary":["0x02010178033FF000000000000040000000000000004008000000000000","0x0300010178033FF000000000000040000000000000004008000000000000"]}
-{"tensor":{"type":"tensor(x[2],y[3])","cells":[{"address":{"x":0,"y":0},"value":11},{"address":{"x":0,"y":1},"value":12},{"address":{"x":0,"y":2},"value":13},{"address":{"x":1,"y":0},"value":21},{"address":{"x":1,"y":1},"value":22},{"address":{"x":1,"y":2},"value":23}]},"binary":["0x020201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000","0x03000201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000"]}
-{"tensor":{"type":"tensor(x{})","cells":[]},"binary":["0x0101017800","0x030101780000"]}
-{"tensor":{"type":"tensor(x{})","cells":[{"address":{"x":"a"},"value":1},{"address":{"x":"b"},"value":2},{"address":{"x":"c"},"value":3}]},"binary":["0x010101780301613FF00000000000000162400000000000000001634008000000000000","0x03010178000301613FF00000000000000162400000000000000001634008000000000000","0x010101780301613FF00000000000000163400800000000000001624000000000000000","0x03010178000301613FF00000000000000163400800000000000001624000000000000000","0x01010178030162400000000000000001613FF000000000000001634008000000000000","0x0301017800030162400000000000000001613FF000000000000001634008000000000000","0x0101017803016240000000000000000163400800000000000001613FF0000000000000","0x030101780003016240000000000000000163400800000000000001613FF0000000000000","0x01010178030163400800000000000001613FF000000000000001624000000000000000","0x0301017800030163400800000000000001613FF000000000000001624000000000000000","0x0101017803016340080000000000000162400000000000000001613FF0000000000000","0x030101780003016340080000000000000162400000000000000001613FF0000000000000"]}
-{"tensor":{"type":"tensor(x{},y{})","cells":[]},"binary":["0x01020178017900","0x0302017801790000"]}
-{"tensor":{"type":"tensor(x{},y{})","cells":[{"address":{"x":"bar","y":"a"},"value":21},{"address":{"x":"foo","y":"a"},"value":11}]},"binary":["0x0102017801790203666F6F016140260000000000000362617201614035000000000000","0x030201780179000203666F6F016140260000000000000362617201614035000000000000","0x01020178017902036261720161403500000000000003666F6F01614026000000000000","0x0302017801790002036261720161403500000000000003666F6F01614026000000000000"]}
-{"tensor":{"type":"tensor(x{},y[10])","cells":[]},"binary":["0x030101780101790A00"]}
-{"tensor":{"type":"tensor(x[10],y{})","cells":[]},"binary":["0x030101790101780A00"]}
-{"tensor":{"type":"tensor(x{},y[3])","cells":[{"address":{"x":"a","y":0},"value":11},{"address":{"x":"a","y":1},"value":12},{"address":{"x":"a","y":2},"value":13},{"address":{"x":"b","y":0},"value":21},{"address":{"x":"b","y":1},"value":22},{"address":{"x":"b","y":2},"value":23}]},"binary":["0x030101780101790302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x0301017801017903020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000"]}
-{"tensor":{"type":"tensor(x[3],y{})","cells":[{"address":{"x":0,"y":"a"},"value":11},{"address":{"x":0,"y":"b"},"value":21},{"address":{"x":1,"y":"a"},"value":12},{"address":{"x":1,"y":"b"},"value":22},{"address":{"x":2,"y":"a"},"value":13},{"address":{"x":2,"y":"b"},"value":23}]},"binary":["0x030101790101780302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x0301017901017803020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000"]}
-{"num_tests":12}
+{"tensor":{"type":"double","cells":[{"address":{},"value":0}]},"binary":["0x0100010000000000000000","0x02000000000000000000","0x0300000000000000000000","0x010000","0x050000010000000000000000","0x0600000000000000000000","0x070000000000000000000000","0x05000000"]}
+{"tensor":{"type":"double","cells":[{"address":{},"value":42}]},"binary":["0x0100014045000000000000","0x02004045000000000000","0x0300004045000000000000","0x050000014045000000000000","0x0600004045000000000000","0x070000004045000000000000"]}
+{"tensor":{"type":"tensor(x[3])","cells":[{"address":{"x":0},"value":1},{"address":{"x":1},"value":2},{"address":{"x":2},"value":3}]},"binary":["0x02010178033FF000000000000040000000000000004008000000000000","0x0300010178033FF000000000000040000000000000004008000000000000","0x0600010178033FF000000000000040000000000000004008000000000000","0x070000010178033FF000000000000040000000000000004008000000000000"]}
+{"tensor":{"type":"tensor(x[2],y[3])","cells":[{"address":{"x":0,"y":0},"value":11},{"address":{"x":0,"y":1},"value":12},{"address":{"x":0,"y":2},"value":13},{"address":{"x":1,"y":0},"value":21},{"address":{"x":1,"y":1},"value":22},{"address":{"x":1,"y":2},"value":23}]},"binary":["0x020201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000","0x03000201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000","0x06000201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000","0x0700000201780201790340260000000000004028000000000000402A000000000000403500000000000040360000000000004037000000000000"]}
+{"tensor":{"type":"tensor(x{})","cells":[]},"binary":["0x0101017800","0x030101780000","0x050001017800","0x07000101780000"]}
+{"tensor":{"type":"tensor(x{})","cells":[{"address":{"x":"a"},"value":1},{"address":{"x":"b"},"value":2},{"address":{"x":"c"},"value":3}]},"binary":["0x010101780301613FF00000000000000162400000000000000001634008000000000000","0x03010178000301613FF00000000000000162400000000000000001634008000000000000","0x010101780301613FF00000000000000163400800000000000001624000000000000000","0x03010178000301613FF00000000000000163400800000000000001624000000000000000","0x01010178030162400000000000000001613FF000000000000001634008000000000000","0x0301017800030162400000000000000001613FF000000000000001634008000000000000","0x0101017803016240000000000000000163400800000000000001613FF0000000000000","0x030101780003016240000000000000000163400800000000000001613FF0000000000000","0x01010178030163400800000000000001613FF000000000000001624000000000000000","0x0301017800030163400800000000000001613FF000000000000001624000000000000000","0x0101017803016340080000000000000162400000000000000001613FF0000000000000","0x030101780003016340080000000000000162400000000000000001613FF0000000000000","0x05000101780301613FF00000000000000162400000000000000001634008000000000000","0x0700010178000301613FF00000000000000162400000000000000001634008000000000000","0x05000101780301613FF00000000000000163400800000000000001624000000000000000","0x0700010178000301613FF00000000000000163400800000000000001624000000000000000","0x0500010178030162400000000000000001613FF000000000000001634008000000000000","0x070001017800030162400000000000000001613FF000000000000001634008000000000000","0x050001017803016240000000000000000163400800000000000001613FF0000000000000","0x07000101780003016240000000000000000163400800000000000001613FF0000000000000","0x0500010178030163400800000000000001613FF000000000000001624000000000000000","0x070001017800030163400800000000000001613FF000000000000001624000000000000000","0x050001017803016340080000000000000162400000000000000001613FF0000000000000","0x07000101780003016340080000000000000162400000000000000001613FF0000000000000"]}
+{"tensor":{"type":"tensor(x{},y{})","cells":[]},"binary":["0x01020178017900","0x0302017801790000","0x0500020178017900","0x070002017801790000"]}
+{"tensor":{"type":"tensor(x{},y{})","cells":[{"address":{"x":"bar","y":"a"},"value":21},{"address":{"x":"foo","y":"a"},"value":11}]},"binary":["0x0102017801790203666F6F016140260000000000000362617201614035000000000000","0x030201780179000203666F6F016140260000000000000362617201614035000000000000","0x01020178017902036261720161403500000000000003666F6F01614026000000000000","0x0302017801790002036261720161403500000000000003666F6F01614026000000000000","0x050002017801790203666F6F016140260000000000000362617201614035000000000000","0x07000201780179000203666F6F016140260000000000000362617201614035000000000000","0x0500020178017902036261720161403500000000000003666F6F01614026000000000000","0x070002017801790002036261720161403500000000000003666F6F01614026000000000000"]}
+{"tensor":{"type":"tensor(x{},y[10])","cells":[]},"binary":["0x030101780101790A00","0x07000101780101790A00"]}
+{"tensor":{"type":"tensor(x[10],y{})","cells":[]},"binary":["0x030101790101780A00","0x07000101790101780A00"]}
+{"tensor":{"type":"tensor(x{},y[3])","cells":[{"address":{"x":"a","y":0},"value":11},{"address":{"x":"a","y":1},"value":12},{"address":{"x":"a","y":2},"value":13},{"address":{"x":"b","y":0},"value":21},{"address":{"x":"b","y":1},"value":22},{"address":{"x":"b","y":2},"value":23}]},"binary":["0x030101780101790302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x0301017801017903020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000","0x07000101780101790302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x070001017801017903020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000"]}
+{"tensor":{"type":"tensor(x[3],y{})","cells":[{"address":{"x":0,"y":"a"},"value":11},{"address":{"x":0,"y":"b"},"value":21},{"address":{"x":1,"y":"a"},"value":12},{"address":{"x":1,"y":"b"},"value":22},{"address":{"x":2,"y":"a"},"value":13},{"address":{"x":2,"y":"b"},"value":23}]},"binary":["0x030101790101780302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x0301017901017803020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000","0x07000101790101780302016140260000000000004028000000000000402A0000000000000162403500000000000040360000000000004037000000000000","0x070001017901017803020162403500000000000040360000000000004037000000000000016140260000000000004028000000000000402A000000000000"]}
+{"tensor":{"type":"tensor<float>(x[3])","cells":[{"address":{"x":0},"value":1},{"address":{"x":1},"value":2},{"address":{"x":2},"value":3}]},"binary":["0x0601010178033F8000004000000040400000","0x070100010178033F8000004000000040400000"]}
+{"tensor":{"type":"tensor<float>(x[2],y[3])","cells":[{"address":{"x":0,"y":0},"value":11},{"address":{"x":0,"y":1},"value":12},{"address":{"x":0,"y":2},"value":13},{"address":{"x":1,"y":0},"value":21},{"address":{"x":1,"y":1},"value":22},{"address":{"x":1,"y":2},"value":23}]},"binary":["0x06010201780201790341300000414000004150000041A8000041B0000041B80000","0x0701000201780201790341300000414000004150000041A8000041B0000041B80000"]}
+{"tensor":{"type":"tensor<float>(x{})","cells":[]},"binary":["0x050101017800","0x07010101780000"]}
+{"tensor":{"type":"tensor<float>(x{})","cells":[{"address":{"x":"a"},"value":1},{"address":{"x":"b"},"value":2},{"address":{"x":"c"},"value":3}]},"binary":["0x05010101780301613F800000016240000000016340400000","0x0701010178000301613F800000016240000000016340400000","0x05010101780301613F800000016340400000016240000000","0x0701010178000301613F800000016340400000016240000000","0x05010101780301624000000001613F800000016340400000","0x0701010178000301624000000001613F800000016340400000","0x05010101780301624000000001634040000001613F800000","0x0701010178000301624000000001634040000001613F800000","0x05010101780301634040000001613F800000016240000000","0x0701010178000301634040000001613F800000016240000000","0x05010101780301634040000001624000000001613F800000","0x0701010178000301634040000001624000000001613F800000"]}
+{"tensor":{"type":"tensor<float>(x{},y{})","cells":[]},"binary":["0x0501020178017900","0x070102017801790000"]}
+{"tensor":{"type":"tensor<float>(x{},y{})","cells":[{"address":{"x":"bar","y":"a"},"value":21},{"address":{"x":"foo","y":"a"},"value":11}]},"binary":["0x050102017801790203666F6F01614130000003626172016141A80000","0x07010201780179000203666F6F01614130000003626172016141A80000","0x050102017801790203626172016141A8000003666F6F016141300000","0x07010201780179000203626172016141A8000003666F6F016141300000"]}
+{"tensor":{"type":"tensor<float>(x{},y[10])","cells":[]},"binary":["0x07010101780101790A00"]}
+{"tensor":{"type":"tensor<float>(x[10],y{})","cells":[]},"binary":["0x07010101790101780A00"]}
+{"tensor":{"type":"tensor<float>(x{},y[3])","cells":[{"address":{"x":"a","y":0},"value":11},{"address":{"x":"a","y":1},"value":12},{"address":{"x":"a","y":2},"value":13},{"address":{"x":"b","y":0},"value":21},{"address":{"x":"b","y":1},"value":22},{"address":{"x":"b","y":2},"value":23}]},"binary":["0x070101017801017903020161413000004140000041500000016241A8000041B0000041B80000","0x07010101780101790302016241A8000041B0000041B800000161413000004140000041500000"]}
+{"tensor":{"type":"tensor<float>(x[3],y{})","cells":[{"address":{"x":0,"y":"a"},"value":11},{"address":{"x":0,"y":"b"},"value":21},{"address":{"x":1,"y":"a"},"value":12},{"address":{"x":1,"y":"b"},"value":22},{"address":{"x":2,"y":"a"},"value":13},{"address":{"x":2,"y":"b"},"value":23}]},"binary":["0x070101017901017803020161413000004140000041500000016241A8000041B0000041B80000","0x07010101790101780302016241A8000041B0000041B800000161413000004140000041500000"]}
+{"num_tests":22}
diff --git a/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp b/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
index c8e57f970e3..91a6087ea3a 100644
--- a/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
+++ b/eval/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp
@@ -20,12 +20,12 @@ TEST("require that dimensions can be combined")
{
EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}}));
EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
- EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 7}}, {{"b", 5}}));
+ EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}}));
EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
combine({{"a", 3}, {"c", 5}, {"d", 7}},
- {{"b", 11}, {"c", 13}, {"e", 17}}));
+ {{"b", 11}, {"c", 5}, {"e", 17}}));
EXPECT_EQUAL(ValueType::tensor_type({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}),
- combine({{"b", 11}, {"c", 13}, {"e", 17}},
+ combine({{"b", 11}, {"c", 5}, {"e", 17}},
{{"a", 3}, {"c", 5}, {"d", 7}}));
}
diff --git a/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
index b43a127bc60..d1491e4f758 100644
--- a/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
+++ b/eval/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp
@@ -76,8 +76,8 @@ TEST("test tensor serialization for SparseTensor") {
TensorSpec("tensor(x{})")
.add({{"x", "1"}}, 3)));
TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x00,
- 0x00, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00 },
+ 0x00, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00 },
TensorSpec("tensor(x{},y{})")
.add({{"x", ""}, {"y", ""}}, 3)));
TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x01,
@@ -85,23 +85,32 @@ TEST("test tensor serialization for SparseTensor") {
0x00, 0x00 },
TensorSpec("tensor(x{},y{})")
.add({{"x", "1"}, {"y", ""}}, 3)));
- TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x00,
- 0x01, 0x33, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00 },
+ TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x00,
+ 0x01, 0x33, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
TensorSpec("tensor(x{},y{})")
.add({{"x", ""}, {"y", "3"}}, 3)));
- TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x01,
- 0x32, 0x01, 0x34, 0x40, 0x08, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00 },
+ TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79, 0x01, 0x01,
+ 0x32, 0x01, 0x34, 0x40, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 },
TensorSpec("tensor(x{},y{})")
.add({{"x", "2"}, {"y", "4"}}, 3)));
- TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79,
- 0x01, 0x01, 0x31, 0x00, 0x40, 0x08,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ TEST_DO(verify_serialized({ 0x01, 0x02, 0x01, 0x78, 0x01, 0x79,
+ 0x01, 0x01, 0x31, 0x00, 0x40, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
TensorSpec("tensor(x{},y{})")
.add({{"x", "1"}, {"y", ""}}, 3)));
}
+TEST("test float cells from sparse tensor") {
+ TEST_DO(verify_serialized({ 0x05, 0x01,
+ 0x02, 0x01, 0x78, 0x01, 0x79,
+ 0x01, 0x01, 0x31, 0x00,
+ 0x40, 0x40, 0x00, 0x00 },
+ TensorSpec("tensor<float>(x{},y{})")
+ .add({{"x", "1"}, {"y", ""}}, 3)));
+}
+
TEST("test tensor serialization for DenseTensor") {
TEST_DO(verify_serialized({0x02, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -187,7 +196,7 @@ TEST("test tensor serialization for DenseTensor") {
.add({{"x", 2}, {"y", 4}}, 3)));
}
-TEST("test 'float' cells") {
+TEST("test float cells for dense tensor") {
TEST_DO(verify_serialized({0x06, 0x01, 0x02, 0x01, 0x78, 0x03,
0x01, 0x79, 0x05,
0x00, 0x00, 0x00, 0x00,
diff --git a/eval/src/vespa/eval/eval/operation.cpp b/eval/src/vespa/eval/eval/operation.cpp
index 2c10e070bbd..fa0a99de461 100644
--- a/eval/src/vespa/eval/eval/operation.cpp
+++ b/eval/src/vespa/eval/eval/operation.cpp
@@ -2,6 +2,7 @@
#include "operation.h"
#include <vespa/vespalib/util/approx.h>
+#include <algorithm>
namespace vespalib::eval::operation {
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
index 6210295ebd4..2206cde49a9 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
@@ -170,8 +170,11 @@ Value::UP
DefaultTensorEngine::from_spec(const TensorSpec &spec) const
{
ValueType type = ValueType::from_spec(spec.type());
- if (!tensor::Tensor::supported({type})) {
- return std::make_unique<WrappedSimpleTensor>(eval::SimpleTensor::create(spec));
+ if (type.is_error()) {
+ return std::make_unique<ErrorValue>();
+ } else if (type.is_double()) {
+ double value = spec.cells().empty() ? 0.0 : spec.cells().begin()->second.value;
+ return std::make_unique<DoubleValue>(value);
} else if (type.is_dense()) {
DirectDenseTensorBuilder builder(type);
for (const auto &cell: spec.cells()) {
@@ -195,13 +198,8 @@ DefaultTensorEngine::from_spec(const TensorSpec &spec) const
}
}
return builder.build();
- } else if (type.is_double()) {
- double value = spec.cells().empty() ? 0.0 : spec.cells().begin()->second.value;
- return std::make_unique<DoubleValue>(value);
- } else {
- assert(type.is_error());
- return std::make_unique<ErrorValue>();
}
+ return std::make_unique<WrappedSimpleTensor>(eval::SimpleTensor::create(spec));
}
struct CellFunctionFunAdapter : tensor::CellFunction {
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp
index df6ac162d7f..b5c5d9b6a04 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_address_combiner.cpp
@@ -57,31 +57,7 @@ AddressContext::~AddressContext() = default;
eval::ValueType
DenseTensorAddressCombiner::combineDimensions(const eval::ValueType &lhs, const eval::ValueType &rhs)
{
- // NOTE: both lhs and rhs are sorted according to dimension names.
- std::vector<eval::ValueType::Dimension> result;
- auto lhsItr = lhs.dimensions().cbegin();
- auto rhsItr = rhs.dimensions().cbegin();
- while (lhsItr != lhs.dimensions().end() &&
- rhsItr != rhs.dimensions().end()) {
- if (lhsItr->name == rhsItr->name) {
- result.emplace_back(lhsItr->name, std::min(lhsItr->size, rhsItr->size));
- ++lhsItr;
- ++rhsItr;
- } else if (lhsItr->name < rhsItr->name) {
- result.emplace_back(*lhsItr++);
- } else {
- result.emplace_back(*rhsItr++);
- }
- }
- while (lhsItr != lhs.dimensions().end()) {
- result.emplace_back(*lhsItr++);
- }
- while (rhsItr != rhs.dimensions().end()) {
- result.emplace_back(*rhsItr++);
- }
- return (result.empty() ?
- eval::ValueType::double_type() :
- eval::ValueType::tensor_type(std::move(result)));
+ return eval::ValueType::join(lhs, rhs);
}
}
diff --git a/eval/src/vespa/eval/tensor/serialization/common.h b/eval/src/vespa/eval/tensor/serialization/common.h
deleted file mode 100644
index 40b1840be6e..00000000000
--- a/eval/src/vespa/eval/tensor/serialization/common.h
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib::tensor {
-
-enum class SerializeFormat {FLOAT, DOUBLE};
-
-}
diff --git a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
index 4b1ccc8db5d..677fb40b0f4 100644
--- a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
+++ b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.cpp
@@ -7,6 +7,8 @@
#include <cassert>
using vespalib::nbostream;
+using vespalib::eval::ValueType;
+using CellType = vespalib::eval::ValueType::CellType;
namespace vespalib::tensor {
@@ -15,15 +17,7 @@ using Dimension = eval::ValueType::Dimension;
namespace {
-eval::ValueType
-makeValueType(std::vector<Dimension> &&dimensions) {
- return (dimensions.empty() ?
- eval::ValueType::double_type() :
- eval::ValueType::tensor_type(std::move(dimensions)));
-}
-
-size_t
-encodeDimensions(nbostream &stream, const eval::ValueType & type) {
+size_t encodeDimensions(nbostream &stream, const eval::ValueType & type) {
stream.putInt1_4Bytes(type.dimensions().size());
size_t cellsSize = 1;
for (const auto &dimension : type.dimensions()) {
@@ -35,15 +29,13 @@ encodeDimensions(nbostream &stream, const eval::ValueType & type) {
}
template<typename T>
-void
-encodeCells(nbostream &stream, DenseTensorView::CellsRef cells) {
+void encodeCells(nbostream &stream, DenseTensorView::CellsRef cells) {
for (const auto &value : cells) {
stream << static_cast<T>(value);
}
}
-size_t
-decodeDimensions(nbostream & stream, std::vector<Dimension> & dimensions) {
+size_t decodeDimensions(nbostream & stream, std::vector<Dimension> & dimensions) {
vespalib::string dimensionName;
size_t dimensionsSize = stream.getInt1_4Bytes();
size_t dimensionSize;
@@ -58,8 +50,7 @@ decodeDimensions(nbostream & stream, std::vector<Dimension> & dimensions) {
}
template<typename T, typename V>
-void
-decodeCells(nbostream &stream, size_t cellsSize, V & cells) {
+void decodeCells(nbostream &stream, size_t cellsSize, V &cells) {
T cellValue = 0.0;
for (size_t i = 0; i < cellsSize; ++i) {
stream >> cellValue;
@@ -68,13 +59,12 @@ decodeCells(nbostream &stream, size_t cellsSize, V & cells) {
}
template <typename V>
-void decodeCells(SerializeFormat format, nbostream &stream, size_t cellsSize, V & cells)
-{
- switch (format) {
- case SerializeFormat::DOUBLE:
+void decodeCells(CellType cell_type, nbostream &stream, size_t cellsSize, V &cells) {
+ switch (cell_type) {
+ case CellType::DOUBLE:
decodeCells<double>(stream, cellsSize, cells);
break;
- case SerializeFormat::FLOAT:
+ case CellType::FLOAT:
decodeCells<float>(stream, cellsSize, cells);
break;
}
@@ -86,44 +76,41 @@ void
DenseBinaryFormat::serialize(nbostream &stream, const DenseTensorView &tensor)
{
size_t cellsSize = encodeDimensions(stream, tensor.fast_type());
-
DenseTensorView::CellsRef cells = tensor.cellsRef();
assert(cells.size() == cellsSize);
- switch (_format) {
- case SerializeFormat::DOUBLE:
- encodeCells<double>(stream, cells);
- break;
- case SerializeFormat::FLOAT:
- encodeCells<float>(stream, cells);
- break;
+ switch (tensor.fast_type().cell_type()) {
+ case CellType::DOUBLE:
+ encodeCells<double>(stream, cells);
+ break;
+ case CellType::FLOAT:
+ encodeCells<float>(stream, cells);
+ break;
}
}
std::unique_ptr<DenseTensor>
-DenseBinaryFormat::deserialize(nbostream &stream)
+DenseBinaryFormat::deserialize(nbostream &stream, CellType cell_type)
{
std::vector<Dimension> dimensions;
size_t cellsSize = decodeDimensions(stream,dimensions);
DenseTensor::Cells cells;
cells.reserve(cellsSize);
-
- decodeCells(_format, stream, cellsSize, cells);
-
- return std::make_unique<DenseTensor>(makeValueType(std::move(dimensions)), std::move(cells));
+ decodeCells(cell_type, stream, cellsSize, cells);
+ return std::make_unique<DenseTensor>(ValueType::tensor_type(std::move(dimensions), cell_type), std::move(cells));
}
template <typename T>
void
-DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<T> & cells)
+DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<T> &cells, CellType cell_type)
{
std::vector<Dimension> dimensions;
size_t cellsSize = decodeDimensions(stream,dimensions);
cells.clear();
cells.reserve(cellsSize);
- decodeCells(_format, stream, cellsSize, cells);
+ decodeCells(cell_type, stream, cellsSize, cells);
}
-template void DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<double> & cells);
-template void DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<float> & cells);
+template void DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<double> &cells, CellType cell_type);
+template void DenseBinaryFormat::deserializeCellsOnly(nbostream &stream, std::vector<float> &cells, CellType cell_type);
}
diff --git a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.h b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.h
index f9847d37784..9e860b3c1e4 100644
--- a/eval/src/vespa/eval/tensor/serialization/dense_binary_format.h
+++ b/eval/src/vespa/eval/tensor/serialization/dense_binary_format.h
@@ -2,9 +2,9 @@
#pragma once
-#include "common.h"
#include <memory>
#include <vector>
+#include <vespa/eval/eval/value_type.h>
namespace vespalib { class nbostream; }
@@ -19,15 +19,14 @@ class DenseTensorView;
class DenseBinaryFormat
{
public:
- DenseBinaryFormat(SerializeFormat format) : _format(format) { }
- void serialize(nbostream &stream, const DenseTensorView &tensor);
- std::unique_ptr<DenseTensor> deserialize(nbostream &stream);
-
+ using CellType = eval::ValueType::CellType;
+
+ static void serialize(nbostream &stream, const DenseTensorView &tensor);
+ static std::unique_ptr<DenseTensor> deserialize(nbostream &stream, CellType cell_type);
+
// This is a temporary method untill we get full support for typed tensors
template <typename T>
- void deserializeCellsOnly(nbostream &stream, std::vector<T> & cells);
-private:
- SerializeFormat _format;
+ static void deserializeCellsOnly(nbostream &stream, std::vector<T> &cells, CellType cell_type);
};
}
diff --git a/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.cpp b/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.cpp
index cca310176f4..06e3f63c8da 100644
--- a/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.cpp
+++ b/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.cpp
@@ -13,6 +13,7 @@
using vespalib::nbostream;
using vespalib::eval::ValueType;
+using CellType = vespalib::eval::ValueType::CellType;
namespace vespalib::tensor {
@@ -20,10 +21,9 @@ namespace {
vespalib::string undefinedLabel("");
-void
-writeTensorAddress(nbostream &output,
- const eval::ValueType &type,
- const TensorAddress &value)
+void writeTensorAddress(nbostream &output,
+ const eval::ValueType &type,
+ const TensorAddress &value)
{
auto elemItr = value.elements().cbegin();
auto elemItrEnd = value.elements().cend();
@@ -38,78 +38,71 @@ writeTensorAddress(nbostream &output,
assert(elemItr == elemItrEnd);
}
-}
-
+template <typename T>
class SparseBinaryFormatSerializer : public TensorVisitor
{
- uint32_t _numCells;
- nbostream _cells;
- eval::ValueType _type;
-
+private:
+ uint32_t _num_cells;
+ nbostream &_cells;
+ const ValueType &_type;
public:
- SparseBinaryFormatSerializer();
+ SparseBinaryFormatSerializer(nbostream &cells, const ValueType &type);
+ size_t num_cells() const { return _num_cells; }
virtual ~SparseBinaryFormatSerializer() override;
virtual void visit(const TensorAddress &address, double value) override;
- void serialize(nbostream &stream, const Tensor &tensor);
};
-SparseBinaryFormatSerializer::SparseBinaryFormatSerializer()
- : _numCells(0u),
- _cells(),
- _type(eval::ValueType::error_type())
+template <typename T>
+SparseBinaryFormatSerializer<T>::SparseBinaryFormatSerializer(nbostream &cells, const ValueType &type)
+ : _num_cells(0),
+ _cells(cells),
+ _type(type)
{
}
+template <typename T>
+SparseBinaryFormatSerializer<T>::~SparseBinaryFormatSerializer() = default;
-SparseBinaryFormatSerializer::~SparseBinaryFormatSerializer() = default;
-
+template <typename T>
void
-SparseBinaryFormatSerializer::visit(const TensorAddress &address, double value)
+SparseBinaryFormatSerializer<T>::visit(const TensorAddress &address, double value)
{
- ++_numCells;
+ ++_num_cells;
writeTensorAddress(_cells, _type, address);
- _cells << value;
+ _cells << static_cast<T>(value);
}
-
-void
-SparseBinaryFormatSerializer::serialize(nbostream &stream, const Tensor &tensor)
-{
- _type = tensor.type();
- tensor.accept(*this);
- stream.putInt1_4Bytes(_type.dimensions().size());
- for (const auto &dimension : _type.dimensions()) {
+void encodeDimensions(nbostream &stream, const eval::ValueType &type) {
+ stream.putInt1_4Bytes(type.dimensions().size());
+ for (const auto &dimension : type.dimensions()) {
stream.writeSmallString(dimension.name);
}
- stream.putInt1_4Bytes(_numCells);
- stream.write(_cells.peek(), _cells.size());
}
-
-void
-SparseBinaryFormat::serialize(nbostream &stream, const Tensor &tensor)
-{
- SparseBinaryFormatSerializer serializer;
- serializer.serialize(stream, tensor);
+template <typename T>
+size_t encodeCells(nbostream &stream, const Tensor &tensor) {
+ SparseBinaryFormatSerializer<T> serializer(stream, tensor.type());
+ tensor.accept(serializer);
+ return serializer.num_cells();
}
+size_t encodeCells(nbostream &stream, const Tensor &tensor, CellType cell_type) {
+ switch (cell_type) {
+ case CellType::DOUBLE:
+ return encodeCells<double>(stream, tensor);
+ break;
+ case CellType::FLOAT:
+ return encodeCells<float>(stream, tensor);
+ break;
+ }
+ return 0;
+}
-std::unique_ptr<Tensor>
-SparseBinaryFormat::deserialize(nbostream &stream)
-{
+template<typename T>
+void decodeCells(nbostream &stream, size_t dimensionsSize, size_t cellsSize, DirectSparseTensorBuilder &builder) {
+ T cellValue = 0.0;
vespalib::string str;
- size_t dimensionsSize = stream.getInt1_4Bytes();
- std::vector<ValueType::Dimension> dimensions;
- while (dimensions.size() < dimensionsSize) {
- stream.readSmallString(str);
- dimensions.emplace_back(str);
- }
- ValueType type = ValueType::tensor_type(std::move(dimensions));
- DirectSparseTensorBuilder builder(type);
SparseTensorAddressBuilder address;
-
- size_t cellsSize = stream.getInt1_4Bytes();
- double cellValue = 0.0;
for (size_t cellIdx = 0; cellIdx < cellsSize; ++cellIdx) {
address.clear();
for (size_t dimension = 0; dimension < dimensionsSize; ++dimension) {
@@ -121,10 +114,49 @@ SparseBinaryFormat::deserialize(nbostream &stream)
}
}
stream >> cellValue;
- builder.insertCell(address, cellValue);
+ builder.insertCell(address, cellValue, [](double, double v){ return v; });
}
- return builder.build();
}
+void decodeCells(CellType cell_type, nbostream &stream, size_t dimensionsSize, size_t cellsSize, DirectSparseTensorBuilder &builder) {
+ switch (cell_type) {
+ case CellType::DOUBLE:
+ decodeCells<double>(stream, dimensionsSize, cellsSize, builder);
+ break;
+ case CellType::FLOAT:
+ decodeCells<float>(stream, dimensionsSize, cellsSize, builder);
+ break;
+ }
+}
+
+}
+
+void
+SparseBinaryFormat::serialize(nbostream &stream, const Tensor &tensor)
+{
+ const auto &type = tensor.type();
+ encodeDimensions(stream, type);
+ nbostream cells;
+ size_t numCells = encodeCells(cells, tensor, type.cell_type());
+ stream.putInt1_4Bytes(numCells);
+ stream.write(cells.peek(), cells.size());
+}
+
+std::unique_ptr<Tensor>
+SparseBinaryFormat::deserialize(nbostream &stream, CellType cell_type)
+{
+ vespalib::string str;
+ size_t dimensionsSize = stream.getInt1_4Bytes();
+ std::vector<ValueType::Dimension> dimensions;
+ while (dimensions.size() < dimensionsSize) {
+ stream.readSmallString(str);
+ dimensions.emplace_back(str);
+ }
+ ValueType type = ValueType::tensor_type(std::move(dimensions), cell_type);
+ DirectSparseTensorBuilder builder(type);
+ size_t cellsSize = stream.getInt1_4Bytes();
+ decodeCells(cell_type, stream, dimensionsSize, cellsSize, builder);
+ return builder.build();
+}
}
diff --git a/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.h b/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.h
index cd68e7eeda4..0611d7d5a23 100644
--- a/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.h
+++ b/eval/src/vespa/eval/tensor/serialization/sparse_binary_format.h
@@ -3,6 +3,7 @@
#pragma once
#include <memory>
+#include <vespa/eval/eval/value_type.h>
namespace vespalib { class nbostream; }
@@ -11,13 +12,15 @@ namespace vespalib::tensor {
class Tensor;
/**
- * Class for serializing a tensor.
+ * Class for serializing a sparse tensor.
*/
class SparseBinaryFormat
{
public:
+ using CellType = eval::ValueType::CellType;
+
static void serialize(nbostream &stream, const Tensor &tensor);
- static std::unique_ptr<Tensor> deserialize(nbostream &stream);
+ static std::unique_ptr<Tensor> deserialize(nbostream &stream, CellType cell_type);
};
}
diff --git a/eval/src/vespa/eval/tensor/serialization/typed_binary_format.cpp b/eval/src/vespa/eval/tensor/serialization/typed_binary_format.cpp
index 23179d4b908..8d9767374a2 100644
--- a/eval/src/vespa/eval/tensor/serialization/typed_binary_format.cpp
+++ b/eval/src/vespa/eval/tensor/serialization/typed_binary_format.cpp
@@ -16,6 +16,8 @@
LOG_SETUP(".eval.tensor.serialization.typed_binary_format");
using vespalib::nbostream;
+using vespalib::eval::ValueType;
+using CellType = vespalib::eval::ValueType::CellType;
namespace vespalib::tensor {
@@ -31,47 +33,52 @@ constexpr uint32_t MIXED_BINARY_FORMAT_WITH_CELLTYPE = 7u;
constexpr uint32_t DOUBLE_VALUE_TYPE = 0;
constexpr uint32_t FLOAT_VALUE_TYPE = 1;
-uint32_t
-format2Encoding(SerializeFormat format) {
- switch (format) {
- case SerializeFormat::DOUBLE:
- return DOUBLE_VALUE_TYPE;
- case SerializeFormat::FLOAT:
- return FLOAT_VALUE_TYPE;
+uint32_t cell_type_to_encoding(CellType cell_type) {
+ switch (cell_type) {
+ case CellType::DOUBLE:
+ return DOUBLE_VALUE_TYPE;
+ case CellType::FLOAT:
+ return FLOAT_VALUE_TYPE;
}
abort();
}
-SerializeFormat
-encoding2Format(uint32_t serializedType) {
- switch (serializedType) {
- case DOUBLE_VALUE_TYPE:
- return SerializeFormat::DOUBLE;
- case FLOAT_VALUE_TYPE:
- return SerializeFormat::FLOAT;
- default:
- throw IllegalArgumentException(make_string("Received unknown tensor value type = %u. Only 0(double), or 1(float) are legal.", serializedType));
+CellType
+encoding_to_cell_type(uint32_t cell_encoding) {
+ switch (cell_encoding) {
+ case DOUBLE_VALUE_TYPE:
+ return CellType::DOUBLE;
+ case FLOAT_VALUE_TYPE:
+ return CellType::FLOAT;
+ default:
+ throw IllegalArgumentException(make_string("Received unknown tensor value type = %u. Only 0(double), or 1(float) are legal.", cell_encoding));
}
}
}
void
-TypedBinaryFormat::serialize(nbostream &stream, const Tensor &tensor, SerializeFormat format)
+TypedBinaryFormat::serialize(nbostream &stream, const Tensor &tensor)
{
+ auto cell_type = tensor.type().cell_type();
+ bool default_cell_type = (cell_type == CellType::DOUBLE);
if (auto denseTensor = dynamic_cast<const DenseTensorView *>(&tensor)) {
- if (format != SerializeFormat::DOUBLE) {
- stream.putInt1_4Bytes(DENSE_BINARY_FORMAT_WITH_CELLTYPE);
- stream.putInt1_4Bytes(format2Encoding(format));
- DenseBinaryFormat(format).serialize(stream, *denseTensor);
- } else {
+ if (default_cell_type) {
stream.putInt1_4Bytes(DENSE_BINARY_FORMAT_TYPE);
- DenseBinaryFormat(SerializeFormat::DOUBLE).serialize(stream, *denseTensor);
+ } else {
+ stream.putInt1_4Bytes(DENSE_BINARY_FORMAT_WITH_CELLTYPE);
+ stream.putInt1_4Bytes(cell_type_to_encoding(cell_type));
}
+ DenseBinaryFormat::serialize(stream, *denseTensor);
} else if (auto wrapped = dynamic_cast<const WrappedSimpleTensor *>(&tensor)) {
eval::SimpleTensor::encode(wrapped->get(), stream);
} else {
- stream.putInt1_4Bytes(SPARSE_BINARY_FORMAT_TYPE);
+ if (default_cell_type) {
+ stream.putInt1_4Bytes(SPARSE_BINARY_FORMAT_TYPE);
+ } else {
+ stream.putInt1_4Bytes(SPARSE_BINARY_FORMAT_WITH_CELLTYPE);
+ stream.putInt1_4Bytes(cell_type_to_encoding(cell_type));
+ }
SparseBinaryFormat::serialize(stream, tensor);
}
}
@@ -80,40 +87,46 @@ TypedBinaryFormat::serialize(nbostream &stream, const Tensor &tensor, SerializeF
std::unique_ptr<Tensor>
TypedBinaryFormat::deserialize(nbostream &stream)
{
+ auto cell_type = CellType::DOUBLE;
auto read_pos = stream.rp();
auto formatId = stream.getInt1_4Bytes();
- if (formatId == SPARSE_BINARY_FORMAT_TYPE) {
- return SparseBinaryFormat::deserialize(stream);
- }
- if (formatId == DENSE_BINARY_FORMAT_TYPE) {
- return DenseBinaryFormat(SerializeFormat::DOUBLE).deserialize(stream);
- }
- if ((formatId == SPARSE_BINARY_FORMAT_WITH_CELLTYPE) ||
- (formatId == DENSE_BINARY_FORMAT_WITH_CELLTYPE) ||
- (formatId == MIXED_BINARY_FORMAT_TYPE) ||
- (formatId == MIXED_BINARY_FORMAT_WITH_CELLTYPE))
- {
+ switch (formatId) {
+ case SPARSE_BINARY_FORMAT_WITH_CELLTYPE:
+ cell_type = encoding_to_cell_type(stream.getInt1_4Bytes());
+ [[fallthrough]];
+ case SPARSE_BINARY_FORMAT_TYPE:
+ return SparseBinaryFormat::deserialize(stream, cell_type);
+ case DENSE_BINARY_FORMAT_WITH_CELLTYPE:
+ cell_type = encoding_to_cell_type(stream.getInt1_4Bytes());
+ [[fallthrough]];
+ case DENSE_BINARY_FORMAT_TYPE:
+ return DenseBinaryFormat::deserialize(stream, cell_type);
+ case MIXED_BINARY_FORMAT_TYPE:
+ case MIXED_BINARY_FORMAT_WITH_CELLTYPE:
stream.adjustReadPos(read_pos - stream.rp());
return std::make_unique<WrappedSimpleTensor>(eval::SimpleTensor::decode(stream));
+ default:
+ throw IllegalArgumentException(make_string("Received unknown tensor format type = %du.", formatId));
}
- throw IllegalArgumentException(make_string("Received unknown tensor format type = %du.", formatId));
}
template <typename T>
void
-TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<T> & cells)
+TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<T> &cells)
{
+ auto cell_type = CellType::DOUBLE;
auto formatId = stream.getInt1_4Bytes();
- if (formatId == DENSE_BINARY_FORMAT_TYPE) {
- return DenseBinaryFormat(SerializeFormat::DOUBLE).deserializeCellsOnly(stream, cells);
- }
- if (formatId == DENSE_BINARY_FORMAT_WITH_CELLTYPE) {
- return DenseBinaryFormat(encoding2Format(stream.getInt1_4Bytes())).deserializeCellsOnly(stream, cells);
+ switch (formatId) {
+ case DENSE_BINARY_FORMAT_WITH_CELLTYPE:
+ cell_type = encoding_to_cell_type(stream.getInt1_4Bytes());
+ [[fallthrough]];
+ case DENSE_BINARY_FORMAT_TYPE:
+ return DenseBinaryFormat::deserializeCellsOnly(stream, cells, cell_type);
}
abort();
}
-template void TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<double> & cells);
-template void TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<float> & cells);
+template void TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<double> &cells);
+template void TypedBinaryFormat::deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<float> &cells);
}
diff --git a/eval/src/vespa/eval/tensor/serialization/typed_binary_format.h b/eval/src/vespa/eval/tensor/serialization/typed_binary_format.h
index 717d51effef..198b09ae336 100644
--- a/eval/src/vespa/eval/tensor/serialization/typed_binary_format.h
+++ b/eval/src/vespa/eval/tensor/serialization/typed_binary_format.h
@@ -2,7 +2,6 @@
#pragma once
-#include "common.h"
#include <memory>
#include <vector>
@@ -18,16 +17,12 @@ class Tensor;
class TypedBinaryFormat
{
public:
- static void serialize(nbostream &stream, const Tensor &tensor, SerializeFormat format);
- static void serialize(nbostream &stream, const Tensor &tensor) {
- serialize(stream, tensor, SerializeFormat::DOUBLE);
- }
-
+ static void serialize(nbostream &stream, const Tensor &tensor);
static std::unique_ptr<Tensor> deserialize(nbostream &stream);
-
+
// This is a temporary method until we get full support for typed tensors
template <typename T>
- static void deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<T> & cells);
+ static void deserializeCellsOnlyFromDenseTensors(nbostream &stream, std::vector<T> &cells);
};
}
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp
index ded9310b450..bcfbc851e6d 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor.cpp
@@ -64,16 +64,7 @@ SparseTensor::operator==(const SparseTensor &rhs) const
eval::ValueType
SparseTensor::combineDimensionsWith(const SparseTensor &rhs) const
{
- std::vector<eval::ValueType::Dimension> result;
- std::set_union(_type.dimensions().cbegin(), _type.dimensions().cend(),
- rhs._type.dimensions().cbegin(), rhs._type.dimensions().cend(),
- std::back_inserter(result),
- [](const eval::ValueType::Dimension &lhsDim,
- const eval::ValueType::Dimension &rhsDim)
- { return lhsDim.name < rhsDim.name; });
- return (result.empty() ?
- eval::ValueType::double_type() :
- eval::ValueType::tensor_type(std::move(result)));
+ return eval::ValueType::join(_type, rhs._type);
}
const eval::ValueType &
diff --git a/eval/src/vespa/eval/tensor/tensor.cpp b/eval/src/vespa/eval/tensor/tensor.cpp
index 5697458f3ca..51c94aab5b0 100644
--- a/eval/src/vespa/eval/tensor/tensor.cpp
+++ b/eval/src/vespa/eval/tensor/tensor.cpp
@@ -17,9 +17,6 @@ Tensor::supported(TypeList types)
bool sparse = false;
bool dense = false;
for (const eval::ValueType &type: types) {
- if (type.cell_type() != eval::ValueType::CellType::DOUBLE) {
- return false; // non-double cell types not supported
- }
dense = (dense || type.is_double());
for (const auto &dim: type.dimensions()) {
dense = (dense || dim.is_indexed());
diff --git a/fastos/src/vespa/fastos/unix_file.h b/fastos/src/vespa/fastos/unix_file.h
index 3bca340cb90..3dffe1fc089 100644
--- a/fastos/src/vespa/fastos/unix_file.h
+++ b/fastos/src/vespa/fastos/unix_file.h
@@ -10,6 +10,7 @@
#pragma once
#include <vespa/fastos/file.h>
+#include <cerrno>
/**
* This is the generic UNIX implementation of @ref FastOS_FileInterface.
diff --git a/fbench/src/util/clientstatus.h b/fbench/src/util/clientstatus.h
index 9b15cdf4095..f8a223e121a 100644
--- a/fbench/src/util/clientstatus.h
+++ b/fbench/src/util/clientstatus.h
@@ -3,6 +3,7 @@
#include <map>
#include <vector>
+#include <string>
/**
* This is a helper struct that is used by the @ref Client class to
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index f7068e7147e..906a56d3f34 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -145,11 +145,6 @@ public class Flags {
"Configserver RPC authorizer. Allowed values: ['disable', 'log-only', 'enforce']",
"Takes effect on restart of configserver");
- public static final UnboundBooleanFlag ENABLE_TENANT_HOST_APP = defineFeatureFlag(
- "enable-tenant-host-app", false,
- "Enable tenant host infrastructure application",
- "Takes effect immediately");
-
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, String description,
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/ApiAuthenticator.java b/hosted-api/src/main/java/ai/vespa/hosted/api/ApiAuthenticator.java
new file mode 100644
index 00000000000..674faaf0ee7
--- /dev/null
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/ApiAuthenticator.java
@@ -0,0 +1,8 @@
+package ai.vespa.hosted.api;
+
+public interface ApiAuthenticator {
+
+ /** Returns a client authenticated to talk to the hosted Vespa API. */
+ ControllerHttpClient controller();
+
+}
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java b/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
index 5a38154b7c0..3d85d531f73 100644
--- a/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
@@ -6,7 +6,9 @@ import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.security.KeyUtils;
import com.yahoo.security.SslContextBuilder;
+import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
@@ -63,11 +65,21 @@ public abstract class ControllerHttpClient {
}
/** Creates an HTTP client against the given endpoint, which uses the given key to authenticate as the given application. */
+ public static ControllerHttpClient withSignatureKey(URI endpoint, String privateKey, ApplicationId id) {
+ return new SigningControllerHttpClient(endpoint, privateKey, id);
+ }
+
+ /** Creates an HTTP client against the given endpoint, which uses the given key to authenticate as the given application. */
public static ControllerHttpClient withSignatureKey(URI endpoint, Path privateKeyFile, ApplicationId id) {
return new SigningControllerHttpClient(endpoint, privateKeyFile, id);
}
/** Creates an HTTP client against the given endpoint, which uses the given private key and certificate identity. */
+ public static ControllerHttpClient withKeyAndCertificate(URI endpoint, String privateKey, String certificate) {
+ return new MutualTlsControllerHttpClient(endpoint, privateKey, certificate);
+ }
+
+ /** Creates an HTTP client against the given endpoint, which uses the given private key and certificate identity. */
public static ControllerHttpClient withKeyAndCertificate(URI endpoint, Path privateKeyFile, Path certificateFile) {
return new MutualTlsControllerHttpClient(endpoint, privateKeyFile, certificateFile);
}
@@ -92,14 +104,14 @@ public abstract class ControllerHttpClient {
/** Deactivates the deployment of the given application in the given zone. */
public String deactivate(ApplicationId id, ZoneId zone) {
- return asText(send(request(HttpRequest.newBuilder(deploymentPath(id, zone))
- .timeout(Duration.ofSeconds(10)),
- DELETE)));
+ return asString(send(request(HttpRequest.newBuilder(deploymentPath(id, zone))
+ .timeout(Duration.ofSeconds(10)),
+ DELETE)));
}
- /** Returns the default {@link Environment#dev} {@link ZoneId}, to use for development deployments. */
- public ZoneId devZone() {
- Inspector rootObject = toInspector(send(request(HttpRequest.newBuilder(defaultRegionPath())
+ /** Returns the default {@link ZoneId} for the given environment, if any. */
+ public ZoneId defaultZone(Environment environment) {
+ Inspector rootObject = toInspector(send(request(HttpRequest.newBuilder(defaultRegionPath(environment))
.timeout(Duration.ofSeconds(10)),
GET)));
return ZoneId.from("dev", rootObject.field("name").asString());
@@ -113,6 +125,11 @@ public abstract class ControllerHttpClient {
.field("compileVersion").asString();
}
+ /** Returns the test config for functional and verification tests of the indicated Vespa deployment. */
+ public TestConfig testConfig(ApplicationId id, ZoneId zone) {
+ return TestConfig.fromJson(asBytes(send(request(HttpRequest.newBuilder(testConfigPath(id, zone)), GET))));
+ }
+
/** Returns the sorted list of log entries after the given after from the deployment job of the given ids. */
public DeploymentLog deploymentLog(ApplicationId id, ZoneId zone, long run, long after) {
return toDeploymentLog(send(request(HttpRequest.newBuilder(runPath(id, zone, run, after))
@@ -125,6 +142,7 @@ public abstract class ControllerHttpClient {
return deploymentLog(id, zone, run, -1);
}
+ /** Returns an authenticated request from the given input. Override this for, e.g., request signing. */
protected HttpRequest request(HttpRequest.Builder request, Method method, Supplier<InputStream> data) {
return request.method(method.name(), ofInputStream(data)).build();
}
@@ -169,15 +187,22 @@ public abstract class ControllerHttpClient {
"deploy", jobNameOf(zone));
}
+ private URI jobPath(ApplicationId id, ZoneId zone) {
+ return concatenated(instancePath(id), "job", jobNameOf(zone));
+ }
+
+ private URI testConfigPath(ApplicationId id, ZoneId zone) {
+ return concatenated(jobPath(id, zone), "test-config");
+ }
+
private URI runPath(ApplicationId id, ZoneId zone, long run, long after) {
- return withQuery(concatenated(instancePath(id),
- "job", jobNameOf(zone),
+ return withQuery(concatenated(jobPath(id, zone),
"run", Long.toString(run)),
"after", Long.toString(after));
}
- private URI defaultRegionPath() {
- return concatenated(endpoint, "zone", "v1", "environment", Environment.dev.value(), "default");
+ private URI defaultRegionPath(Environment environment) {
+ return concatenated(endpoint, "zone", "v1", "environment", environment.value(), "default");
}
private static URI concatenated(URI base, String... parts) {
@@ -189,9 +214,8 @@ public abstract class ControllerHttpClient {
+ URLEncoder.encode(name, UTF_8) + "=" + URLEncoder.encode(value, UTF_8));
}
- // TODO jvenstad: remove when vaas is no longer part of region names.
private static String jobNameOf(ZoneId zone) {
- return zone.environment().value() + "-" + zone.region().value().replaceAll("vaas-", "");
+ return zone.environment().value() + "-" + zone.region().value();
}
private HttpResponse<byte[]> send(HttpRequest request) {
@@ -236,9 +260,15 @@ public abstract class ControllerHttpClient {
return streamer;
}
- private static String asText(HttpResponse<byte[]> response) {
+ /** Returns the response body as a String, or throws if the status code is non-2XX. */
+ private static String asString(HttpResponse<byte[]> response) {
+ return new String(asBytes(response), UTF_8);
+ }
+
+ /** Returns the response body as a byte array, or throws if the status code is non-2XX. */
+ private static byte[] asBytes(HttpResponse<byte[]> response) {
toInspector(response);
- return new String(response.body(), UTF_8);
+ return response.body();
}
/** Returns an {@link Inspector} for the assumed JSON formatted response, or throws if the status code is non-2XX. */
@@ -299,9 +329,13 @@ public abstract class ControllerHttpClient {
private final RequestSigner signer;
- private SigningControllerHttpClient(URI endpoint, Path privateKeyFile, ApplicationId id) {
+ private SigningControllerHttpClient(URI endpoint, String privateKey, ApplicationId id) {
super(endpoint, HttpClient.newBuilder());
- this.signer = new RequestSigner(unchecked(() -> Files.readString(privateKeyFile, UTF_8)), id.serializedForm());
+ this.signer = new RequestSigner(privateKey, id.serializedForm());
+ }
+
+ private SigningControllerHttpClient(URI endpoint, Path privateKeyFile, ApplicationId id) {
+ this(endpoint, unchecked(() -> Files.readString(privateKeyFile, UTF_8)), id);
}
@Override
@@ -317,7 +351,18 @@ public abstract class ControllerHttpClient {
private MutualTlsControllerHttpClient(URI endpoint, Path privateKeyFile, Path certificateFile) {
super(endpoint,
- HttpClient.newBuilder().sslContext(new SslContextBuilder().withKeyStore(privateKeyFile, certificateFile).build()));
+ HttpClient.newBuilder()
+ .sslContext(new SslContextBuilder().withKeyStore(privateKeyFile,
+ certificateFile)
+ .build()));
+ }
+
+ private MutualTlsControllerHttpClient(URI endpoint, String privateKey, String certificate) {
+ super(endpoint,
+ HttpClient.newBuilder()
+ .sslContext(new SslContextBuilder().withKeyStore(KeyUtils.fromPemEncodedPrivateKey(privateKey),
+ X509CertificateUtils.certificateListFromPem(certificate))
+ .build()));
}
}
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/EndpointAuthenticator.java b/hosted-api/src/main/java/ai/vespa/hosted/api/EndpointAuthenticator.java
new file mode 100644
index 00000000000..62b1d2b4c92
--- /dev/null
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/EndpointAuthenticator.java
@@ -0,0 +1,20 @@
+package ai.vespa.hosted.api;
+
+import javax.net.ssl.SSLContext;
+import java.net.http.HttpRequest;
+import java.util.Optional;
+
+/**
+ * Adds environment dependent authentication to HTTP request against Vespa deployments.
+ *
+ * @author jonmv
+ */
+public interface EndpointAuthenticator {
+
+ /** Returns an SSLContext which provides authentication against a Vespa endpoint. */
+ SSLContext sslContext();
+
+ /** Adds necessary authentication to the given HTTP request builder, to pass the data plane of a Vespa endpoint. */
+ HttpRequest.Builder authenticated(HttpRequest.Builder request);
+
+}
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/Properties.java b/hosted-api/src/main/java/ai/vespa/hosted/api/Properties.java
new file mode 100644
index 00000000000..61893a30e7e
--- /dev/null
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/Properties.java
@@ -0,0 +1,55 @@
+package ai.vespa.hosted.api;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+
+import java.net.URI;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Optional;
+
+/**
+ * Utilities and common definitions of system properties defining a Vespa application project.
+ *
+ * @author jonmv
+ */
+public class Properties {
+
+ public static ApplicationId application() {
+ return ApplicationId.from(requireNonBlankProperty("tenant"),
+ requireNonBlankProperty("application"),
+ getNonBlankProperty("instance").orElse("default"));
+ }
+
+ public static Optional<Environment> environment() {
+ return getNonBlankProperty("environment").map(Environment::from);
+ }
+
+ public static Optional<RegionName> region() {
+ return getNonBlankProperty("region").map(RegionName::from);
+ }
+
+ public static URI endpoint() {
+ return URI.create(requireNonBlankProperty("endpoint"));
+ }
+
+ public static Path privateKeyFile() {
+ return Paths.get(requireNonBlankProperty("privateKeyFile"));
+ }
+
+ public static Path certificateFile() {
+ return Paths.get(requireNonBlankProperty("certificateFile"));
+ }
+
+ /** Returns the system property with the given name if it is set, or empty. */
+ public static Optional<String> getNonBlankProperty(String name) {
+ return Optional.ofNullable(System.getProperty(name)).filter(value -> ! value.isBlank());
+ }
+
+ /** Returns the system property with the given name if it is set, or throws. */
+ public static String requireNonBlankProperty(String name) {
+ return getNonBlankProperty(name).orElseThrow(() -> new IllegalStateException("Missing required property '" + name + "'"));
+ }
+
+}
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/RequestVerifier.java b/hosted-api/src/main/java/ai/vespa/hosted/api/RequestVerifier.java
index dc53439ef3b..9d85ec9bf6b 100644
--- a/hosted-api/src/main/java/ai/vespa/hosted/api/RequestVerifier.java
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/RequestVerifier.java
@@ -29,6 +29,7 @@ public class RequestVerifier {
this(pemPublicKey, Clock.systemUTC());
}
+ /** Creates a new request verifier from the given PEM encoded ECDSA public key, with the given clock. */
public RequestVerifier(String pemPublicKey, Clock clock) {
this.verifier = SignatureUtils.createVerifier(KeyUtils.fromPemEncodedPublicKey(pemPublicKey), SHA256_WITH_ECDSA);
this.clock = clock;
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/TestConfig.java b/hosted-api/src/main/java/ai/vespa/hosted/api/TestConfig.java
new file mode 100644
index 00000000000..897b5d3236d
--- /dev/null
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/TestConfig.java
@@ -0,0 +1,67 @@
+package ai.vespa.hosted.api;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.JsonDecoder;
+import com.yahoo.slime.ObjectTraverser;
+import com.yahoo.slime.Slime;
+
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Config required to run a functional or verification test of a Vespa deployment.
+ *
+ * @author jvenstad
+ */
+public class TestConfig {
+
+ private final ApplicationId application;
+ private final ZoneId zone;
+ private final SystemName system;
+ private final Map<ZoneId, Map<String, URI>> deployments;
+
+ public TestConfig(ApplicationId application, ZoneId zone, SystemName system, Map<ZoneId, Map<String, URI>> deployments) {
+ if ( ! deployments.containsKey(zone))
+ throw new IllegalArgumentException("Config must contain a deployment for its zone, but only does for " + deployments.keySet());
+ this.application = requireNonNull(application);
+ this.zone = requireNonNull(zone);
+ this.system = requireNonNull(system);
+ this.deployments = deployments.entrySet().stream()
+ .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(),
+ entry -> Map.copyOf(entry.getValue())));
+ }
+
+ public static TestConfig fromJson(byte[] jsonBytes) {
+ Inspector config = new JsonDecoder().decode(new Slime(), jsonBytes).get();
+ ApplicationId application = ApplicationId.fromSerializedForm(config.field("application").asString());
+ ZoneId zone = ZoneId.from(config.field("zone").asString());
+ SystemName system = SystemName.from(config.field("system").asString());
+ Map<ZoneId, Map<String, URI>> deployments = new HashMap<>();
+ config.field("clusterEndpoints").traverse((ObjectTraverser) (zoneId, endpointsObject) -> {
+ Map<String, URI> endpoints = new HashMap<>();
+ endpointsObject.traverse((ObjectTraverser) (cluster, uri) -> endpoints.put(cluster, URI.create(uri.asString())));
+ deployments.put(ZoneId.from(zoneId), endpoints);
+ });
+ return new TestConfig(application, zone, system, deployments);
+ }
+
+ /** Returns the full id of the application to test. */
+ public ApplicationId application() { return application; }
+
+ /** Returns the zone of the deployment to test. */
+ public ZoneId zone() { return zone; }
+
+ /** Returns an immutable view of deployments, per zone, of the application to test. */
+ public Map<ZoneId, Map<String, URI>> deployments() { return deployments; }
+
+ /** Returns the hosted Vespa system this is run against. */
+ public SystemName system() { return system; }
+
+}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
index becfd9a54ce..054fa704ecb 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/core/VespaMetrics.java
@@ -41,19 +41,16 @@ import static com.yahoo.log.LogLevel.DEBUG;
public class VespaMetrics {
private static final Logger log = Logger.getLogger(VespaMetrics.class.getPackage().getName());
- // MUST be the same as the constant defined in config-model
public static final ConsumerId VESPA_CONSUMER_ID = toConsumerId("Vespa");
public static final DimensionId METRIC_TYPE_DIMENSION_ID = toDimensionId("metrictype");
public static final DimensionId INSTANCE_DIMENSION_ID = toDimensionId("instance");
- private static final Set<ConsumerId> DEFAULT_CONSUMERS = Collections.singleton(VESPA_CONSUMER_ID);
-
private final MetricsConsumers metricsConsumers;
private static final MetricsFormatter formatter = new MetricsFormatter(false, false);
- public VespaMetrics(MetricsConsumers metricsConsumers, VespaServices vespaServices) {
+ public VespaMetrics(MetricsConsumers metricsConsumers) {
this.metricsConsumers = metricsConsumers;
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/TestUtil.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/TestUtil.java
index 6f86be3aa30..5e8322c4c01 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/TestUtil.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/TestUtil.java
@@ -31,7 +31,7 @@ public class TestUtil {
MetricsConsumers consumers,
ApplicationDimensions applicationDimensions,
NodeDimensions nodeDimensions) {
- VespaMetrics metrics = new VespaMetrics(consumers, vespaServices);
+ VespaMetrics metrics = new VespaMetrics(consumers);
return new MetricsManager(vespaServices, metrics, new ExternalMetrics(consumers),
applicationDimensions, nodeDimensions);
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/rpc/IntegrationTester.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/rpc/IntegrationTester.java
index 9507f01491e..df1ef9e5035 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/rpc/IntegrationTester.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/rpc/IntegrationTester.java
@@ -68,7 +68,7 @@ public class IntegrationTester implements AutoCloseable {
vespaServices = new VespaServices(servicesConfig(), monitoringConfig(), null);
MetricsConsumers consumers = new MetricsConsumers(consumersConfig());
- VespaMetrics vespaMetrics = new VespaMetrics(consumers, vespaServices);
+ VespaMetrics vespaMetrics = new VespaMetrics(consumers);
ExternalMetrics externalMetrics = new ExternalMetrics(consumers);
ApplicationDimensions appDimensions = new ApplicationDimensions(applicationDimensionsConfig());
NodeDimensions nodeDimensions = new NodeDimensions(nodeDimensionsConfig());
diff --git a/metrics/src/vespa/metrics/metricset.cpp b/metrics/src/vespa/metrics/metricset.cpp
index 9fb731d7583..b4238dc0a7c 100644
--- a/metrics/src/vespa/metrics/metricset.cpp
+++ b/metrics/src/vespa/metrics/metricset.cpp
@@ -8,6 +8,7 @@
#include <list>
#include <cassert>
#include <algorithm>
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".metrics.metricsset");
diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java
index 25a24792432..c7210e6710a 100644
--- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java
+++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java
@@ -40,7 +40,7 @@ public class VespaImportTestCase {
assertEquals(2, model.expressions().size());
assertEquals("reduce(reduce(input1 * input2, sum, name) * constant1, max, x) * constant2",
model.expressions().get("foo1").getRoot().toString());
- assertEquals("reduce(reduce(input1 * input2, sum, name) * constant1asLarge, max, x) * constant2",
+ assertEquals("reduce(reduce(input1 * input2, sum, name) * constant(constant1asLarge), max, x) * constant2",
model.expressions().get("foo2").getRoot().toString());
List<ImportedMlFunction> functions = model.outputExpressions();
diff --git a/model-integration/src/test/models/vespa/example.model b/model-integration/src/test/models/vespa/example.model
index 6d660732db9..269ed83b695 100644
--- a/model-integration/src/test/models/vespa/example.model
+++ b/model-integration/src/test/models/vespa/example.model
@@ -19,7 +19,7 @@ model example {
}
function foo2() {
- expression: reduce(sum(input1 * input2, name) * constant1asLarge, max, x) * constant2
+ expression: reduce(sum(input1 * input2, name) * constant(constant1asLarge), max, x) * constant2
}
} \ No newline at end of file
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
index 6b3c09e812a..2a40428cad2 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
@@ -9,6 +9,8 @@ import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
+import java.time.Duration;
+import java.util.List;
import java.util.Optional;
public interface DockerOperations {
@@ -48,4 +50,9 @@ public interface DockerOperations {
void stopServices(NodeAgentContext context);
Optional<ContainerStats> getContainerStats(NodeAgentContext context);
+
+ boolean noManagedContainersRunning();
+
+ /** Deletes the local images that are currently not in use by any container and not recently used. */
+ boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
index 2f5d6ae60d8..954ba25895a 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
@@ -15,12 +15,14 @@ import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeMembers
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import com.yahoo.vespa.hosted.node.admin.task.util.network.IPAddresses;
+import com.yahoo.vespa.hosted.node.admin.task.util.network.IPAddressesImpl;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -47,6 +49,10 @@ public class DockerOperationsImpl implements DockerOperations {
private final ProcessExecuter processExecuter;
private final IPAddresses ipAddresses;
+ public DockerOperationsImpl(Docker docker) {
+ this(docker, new ProcessExecuter(), new IPAddressesImpl());
+ }
+
public DockerOperationsImpl(Docker docker, ProcessExecuter processExecuter, IPAddresses ipAddresses) {
this.docker = docker;
this.processExecuter = processExecuter;
@@ -323,6 +329,16 @@ public class DockerOperationsImpl implements DockerOperations {
command.withSharedVolume(Paths.get("/var/zpe"), context.pathInNodeUnderVespaHome("var/zpe"));
}
+ @Override
+ public boolean noManagedContainersRunning() {
+ return docker.noManagedContainersRunning(MANAGER_NAME);
+ }
+
+ @Override
+ public boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) {
+ return docker.deleteUnusedDockerImages(excludes, minImageAgeToDelete);
+ }
+
/** Returns whether given nodeType is a Docker host for infrastructure nodes */
private static boolean isInfrastructureHost(NodeType nodeType) {
return nodeType == NodeType.config ||
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
index ca9083e9d27..00ec985ba0c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
@@ -9,7 +9,7 @@ import java.util.Set;
/**
* NodeAdmin manages the life cycle of NodeAgents.
- * @author dybis
+ * @author Haakon Dybdahl
*/
public interface NodeAdmin {
@@ -19,6 +19,9 @@ public interface NodeAdmin {
/** Gather node agent and its docker container metrics and forward them to the {@code MetricReceiverWrapper} */
void updateNodeAgentMetrics();
+ /** Gather node admin metrics and forward them to the {@code MetricReceiverWrapper} */
+ void updateNodeAdminMetrics();
+
/**
* Attempts to freeze/unfreeze all NodeAgents and itself. To freeze a NodeAgent means that
* they will not pick up any changes from NodeRepository.
@@ -29,7 +32,7 @@ public interface NodeAdmin {
boolean setFrozen(boolean frozen);
/**
- * Returns whether the NodeAdmin itself is currently frozen, meaning it will not pick up any changes
+ * Returns whether NodeAdmin itself is currently frozen, meaning it will not pick up any changes
* from NodeRepository.
*/
boolean isFrozen();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index 5b5d13ca346..0d520241ac8 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -43,6 +43,9 @@ public class NodeAdminImpl implements NodeAdmin {
private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>();
private final GaugeWrapper numberOfContainersInLoadImageState;
+ private final GaugeWrapper jvmHeapUsed;
+ private final GaugeWrapper jvmHeapFree;
+ private final GaugeWrapper jvmHeapTotal;
private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) {
@@ -70,6 +73,10 @@ public class NodeAdminImpl implements NodeAdmin {
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
+
+ this.jvmHeapUsed = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.used");
+ this.jvmHeapFree = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.free");
+ this.jvmHeapTotal = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.total");
}
@Override
@@ -113,6 +120,17 @@ public class NodeAdminImpl implements NodeAdmin {
}
@Override
+ public void updateNodeAdminMetrics() {
+ Runtime runtime = Runtime.getRuntime();
+ long freeMemory = runtime.freeMemory();
+ long totalMemory = runtime.totalMemory();
+ long usedMemory = totalMemory - freeMemory;
+ jvmHeapFree.sample(freeMemory);
+ jvmHeapUsed.sample(usedMemory);
+ jvmHeapTotal.sample(totalMemory);
+ }
+
+ @Override
public boolean setFrozen(boolean wantFrozen) {
if (wantFrozen != previousWantFrozen) {
if (wantFrozen) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index 41c4544c533..2cd15a3ebe4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -81,6 +81,7 @@ public class NodeAdminStateUpdater {
try {
if (suspendedStates.contains(currentState)) return;
nodeAdmin.updateNodeAgentMetrics();
+ nodeAdmin.updateNodeAdminMetrics();
} catch (Throwable e) {
log.log(Level.WARNING, "Metric fetcher scheduler failed", e);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java
index 77510f7b6ef..17d8126a5c9 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/systemd/SystemCtl.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.task.util.systemd;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.hosted.node.admin.task.util.process.Terminal;
import java.util.Objects;
@@ -58,7 +59,7 @@ public class SystemCtl {
public boolean serviceExists(TaskContext context, String unit) {
return terminal.newCommandLine(context)
- .add("systemctl").add("list-unit-files").add(unit + ".service").executeSilently()
+ .add("systemctl", "list-unit-files", unit + ".service").executeSilently()
.mapOutput(output -> {
// Last line of the form: "1 unit files listed."
Matcher matcher = UNIT_FILES_LISTED_PATTERN.matcher(output);
@@ -70,6 +71,15 @@ public class SystemCtl {
});
}
+ /** Returns true if the unit exists and is active (i.e. running). unit is e.g. "docker". */
+ public boolean isActive(TaskContext context, String unit) {
+ return terminal.newCommandLine(context)
+ .add("systemctl", "--quiet", "is-active", unit + ".service")
+ .ignoreExitCode()
+ .executeSilently()
+ .map(CommandResult::getExitCode) == 0;
+ }
+
public class SystemCtlEnable extends SystemCtlCommand {
private SystemCtlEnable(String unit) {
super("enable", unit);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
index 31e64dc886f..e2ad9e3de97 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
@@ -91,6 +91,10 @@ public class DockerMock implements Docker {
return new ProcessResult(0, null, "");
}
+ @Override
+ public boolean noManagedContainersRunning(String manager) {
+ return false;
+ }
public class StartContainerCommandMock implements CreateContainerCommand {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
index a6d311604fd..58c576d3f44 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
@@ -1,12 +1,9 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.lb;
-import com.google.common.collect.ImmutableSortedSet;
-import com.yahoo.config.provision.RotationName;
import com.yahoo.vespa.hosted.provision.maintenance.LoadBalancerExpirer;
import java.util.Objects;
-import java.util.Set;
/**
* Represents a load balancer for an application's cluster. This is immutable.
@@ -17,13 +14,11 @@ public class LoadBalancer {
private final LoadBalancerId id;
private final LoadBalancerInstance instance;
- private final Set<RotationName> rotations;
private final boolean inactive;
- public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, Set<RotationName> rotations, boolean inactive) {
+ public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, boolean inactive) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.instance = Objects.requireNonNull(instance, "instance must be non-null");
- this.rotations = ImmutableSortedSet.copyOf(Objects.requireNonNull(rotations, "rotations must be non-null"));
this.inactive = inactive;
}
@@ -32,11 +27,6 @@ public class LoadBalancer {
return id;
}
- /** The rotations of which this is a member */
- public Set<RotationName> rotations() {
- return rotations;
- }
-
/** The instance associated with this */
public LoadBalancerInstance instance() {
return instance;
@@ -52,7 +42,7 @@ public class LoadBalancer {
/** Return a copy of this that is set inactive */
public LoadBalancer deactivate() {
- return new LoadBalancer(id, instance, rotations, true);
+ return new LoadBalancer(id, instance, true);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
index 9efde8cf673..013fd169f45 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
-import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -51,8 +50,7 @@ public class InactiveExpirer extends Expirer {
@Override
protected boolean isExpired(Node node) {
return super.isExpired(node)
- || node.allocation().get().owner().instance().isTester()
- || node.type() == NodeType.host; // TODO: Remove after removing tenant hosts from zone-app
+ || node.allocation().get().owner().instance().isTester();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 18ae7e17d6d..25549abe9ed 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -174,7 +174,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
rebootInterval = Duration.ofDays(30);
nodeRetirerInterval = Duration.ofMinutes(30);
metricsInterval = Duration.ofMinutes(1);
- infrastructureProvisionInterval = Duration.ofMinutes(3);
+ infrastructureProvisionInterval = Duration.ofMinutes(1);
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
loadBalancerExpiry = Duration.ofHours(1);
reservationExpiry = Duration.ofMinutes(20); // Need to be long enough for deployment to be finished for all config model versions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
index 4c5310d69b6..46571fd0deb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
@@ -13,7 +13,6 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.LinkedHashSet;
-import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -30,8 +29,6 @@ import java.util.stream.Collectors;
*/
public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
- private static final ApplicationId ZONE_APPLICATION_ID = ApplicationId.from("hosted-vespa", "routing", "default");
-
private final Clock clock;
private Instant previousRun;
@@ -47,9 +44,9 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
Instant windowEnd = clock.instant();
Instant windowStart = previousRun;
previousRun = windowEnd;
- return nodeRepository().getNodes().stream()
+ return nodeRepository().getNodes(NodeType.tenant).stream()
.filter(node -> hasManualStateChangeSince(windowStart, node))
- .flatMap(node -> owner(node).stream())
+ .flatMap(node -> node.allocation().map(Allocation::owner).stream())
.collect(Collectors.toCollection(LinkedHashSet::new));
}
@@ -58,13 +55,6 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
.anyMatch(event -> event.agent() == Agent.operator && event.at().isAfter(instant));
}
- private Optional<ApplicationId> owner(Node node) {
- if (node.allocation().isPresent()) return node.allocation().map(Allocation::owner);
-
- // TODO: Remove after removing tenant hosts from zone-app
- return node.type() == NodeType.host ? Optional.of(ZONE_APPLICATION_ID) : Optional.empty();
- }
-
/**
* Deploy in the maintenance thread to avoid scheduling multiple deployments of the same application if it takes
* longer to deploy than the (short) maintenance interval of this
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
index 17f2d7364a6..a4b915a6128 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.hosted.provision.persistence;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.RotationName;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
@@ -35,7 +34,6 @@ public class LoadBalancerSerializer {
private static final String portsField = "ports";
private static final String networksField = "networks";
private static final String realsField = "reals";
- private static final String rotationsField = "rotations";
private static final String nameField = "name";
private static final String ipAddressField = "ipAddress";
private static final String portField = "port";
@@ -58,11 +56,6 @@ public class LoadBalancerSerializer {
realObject.setString(ipAddressField, real.ipAddress());
realObject.setLong(portField, real.port());
});
- Cursor rotationArray = root.setArray(rotationsField);
- loadBalancer.rotations().forEach(rotation -> {
- Cursor rotationObject = rotationArray.addObject();
- rotationObject.setString(nameField, rotation.value());
- });
root.setBool(inactiveField, loadBalancer.inactive());
try {
@@ -89,11 +82,6 @@ public class LoadBalancerSerializer {
Set<String> networks = new LinkedHashSet<>();
object.field(networksField).traverse((ArrayTraverser) (i, network) -> networks.add(network.asString()));
- Set<RotationName> rotations = new LinkedHashSet<>();
- object.field(rotationsField).traverse((ArrayTraverser) (i, rotation) -> {
- rotations.add(RotationName.from(rotation.field(nameField).asString()));
- });
-
return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()),
new LoadBalancerInstance(
HostName.from(object.field(hostnameField).asString()),
@@ -102,7 +90,6 @@ public class LoadBalancerSerializer {
networks,
reals
),
- rotations,
object.field(inactiveField).asBool());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index f5f8ed53d2a..372dca84a53 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -57,7 +57,7 @@ public class LoadBalancerProvisioner {
LoadBalancerInstance instance = create(application, kv.getKey().id(), kv.getValue());
// Load balancer is always re-activated here to avoid reallocation if an application/cluster is
// deleted and then redeployed.
- LoadBalancer loadBalancer = new LoadBalancer(id, instance, kv.getKey().rotations(), false);
+ LoadBalancer loadBalancer = new LoadBalancer(id, instance, false);
loadBalancers.put(loadBalancer.id(), loadBalancer);
db.writeLoadBalancer(loadBalancer);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
index 69e13f77a09..d31834567ab 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
@@ -76,11 +76,7 @@ public class LoadBalancersResponse extends HttpResponse {
realObject.setLong("port", real.port());
});
- Cursor rotationArray = lbObject.setArray("rotations");
- lb.rotations().forEach(rotation -> {
- Cursor rotationObject = rotationArray.addObject();
- rotationObject.setString("name", rotation.value());
- });
+ lbObject.setArray("rotations"); // To avoid changing the API. This can be removed when clients stop expecting this
lbObject.setBool("inactive", lb.inactive());
});
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZoneAppMigrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZoneAppMigrationTest.java
deleted file mode 100644
index ffdcea973e5..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZoneAppMigrationTest.java
+++ /dev/null
@@ -1,171 +0,0 @@
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Capacity;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.HostSpec;
-import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-import java.util.stream.Collector;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static com.yahoo.config.provision.ClusterSpec.Type.container;
-import static com.yahoo.config.provision.ClusterSpec.Type.content;
-import static org.junit.Assert.assertEquals;
-
-/**
- * This is a temporary test to verify the requirements needed for a successful migration of tenant
- * host nodes out of the zone-application.
- *
- * TODO: Remove after removing tenant hosts from zone-app
- *
- * @author freva
- */
-public class ZoneAppMigrationTest {
-
- private final ManualClock clock = new ManualClock();
- private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
- private final InactiveExpirer inactiveExpirer = new InactiveExpirer(tester.nodeRepository(), clock, Duration.ofDays(99));
-
- private final Version version = Version.fromString("7.42.23");
-
- private final ApplicationId zoneApp = ApplicationId.from("hosted-vespa", "routing", "default");
- private final ApplicationId proxyHostApp = ApplicationId.from("hosted-vespa", "proxy-host", "default");
- private final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default");
- private final ApplicationId app1 = tester.makeApplicationId();
- private final ApplicationId app2 = tester.makeApplicationId();
-
-
- @Test
- public void tenant_host_deallocation_test() {
- assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size());
- assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size());
- assertEquals(15, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active).size());
-
- Set<Node> tenantNodes = Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant));
-
- // Activate zone-app with only proxy nodes, all tenant hosts become inactive, no change to other nodes
- tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing"));
- assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size());
- assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.inactive).size());
- assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)));
-
- // All tenant hosts become dirty, no change to other nodes
- inactiveExpirer.maintain();
- assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size());
- assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.dirty).size());
- assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)));
- // No reboot generation incrementation
- assertEquals(0, tester.nodeRepository().getNodes(NodeType.host).stream().mapToLong(node -> node.status().reboot().wanted()).sum());
-
- tester.nodeRepository().getNodes(NodeType.host)
- .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin"));
- assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size());
- assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.ready).size());
- assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)));
-
- tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host"));
- assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size());
- assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size());
- assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)));
-
- // All tenant hosts are allocated to tenant host application
- assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.host)),
- Set.copyOf(tester.nodeRepository().getNodes(tenantHostApp)));
-
- // All proxy nodes are still allocated to zone-app
- assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.proxy)),
- Set.copyOf(tester.nodeRepository().getNodes(zoneApp)));
- }
-
- @Test
- public void conflicting_type_allocation_test() {
- // Re-allocate tenant host from zone-app to tenant-host app
- tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing"));
- inactiveExpirer.maintain();
- tester.nodeRepository().getNodes(NodeType.host)
- .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin"));
- tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host"));
-
- // Re-deploying zone-app with both type proxy and host has no effect (no tenant hosts are re-allocated from tenant-host app)
- Set<Node> allNodes = Set.copyOf(tester.nodeRepository().getNodes());
- List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing");
- List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin");
- List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList());
- tester.activate(zoneApp, zoneAppHostSpecs);
- assertEquals(0, nodeAdminHostSpecs.size());
- assertEquals(allNodes, Set.copyOf(tester.nodeRepository().getNodes()));
-
- // Provision another host and redeploy zone-app
- Node newHost = tester.makeReadyNodes(1, "large", NodeType.host).get(0);
- proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing");
- nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin");
- zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList());
- tester.activate(zoneApp, zoneAppHostSpecs);
-
- assertEquals(1, nodeAdminHostSpecs.size()); // The newly provisioned host is prepared
- newHost = tester.nodeRepository().getNode(newHost.hostname()).orElseThrow(); // Update newHost after it has been allocated
- Set<Node> allNodesWithNewHost = concat(allNodes, Set.of(newHost), Collectors.toSet());
- assertEquals(allNodesWithNewHost, Set.copyOf(tester.nodeRepository().getNodes()));
- // The new host is allocated to zone-app, while the old ones are still allocated to tenant-host app
- assertEquals(zoneApp, newHost.allocation().get().owner());
- }
-
- @Before
- public void setup() {
- tester.makeReadyNodes(5, "large", NodeType.proxyhost);
- tester.makeReadyNodes(5, "large", NodeType.proxy);
- tester.makeReadyNodes(20, "large", NodeType.host, 3);
-
- tester.activate(proxyHostApp, prepareSystemApplication(proxyHostApp, NodeType.proxyhost, "proxy-host"));
- List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing");
- List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin");
- List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList());
- tester.activate(zoneApp, zoneAppHostSpecs);
-
- activateTenantApplication(app1, 3, 4);
- activateTenantApplication(app2, 5, 3);
- }
-
- private List<HostSpec> prepareSystemApplication(ApplicationId applicationId, NodeType nodeType, String clusterId) {
- return tester.prepare(applicationId,
- ClusterSpec.request(container, ClusterSpec.Id.from(clusterId), version, false, Set.of()),
- Capacity.fromRequiredNodeType(nodeType),
- 1);
- }
-
- private void activateTenantApplication(ApplicationId app, int numContainerNodes, int numContentNodes) {
- List<HostSpec> combinedHostSpecs = new ArrayList<>(numContainerNodes + numContentNodes);
-
- combinedHostSpecs.addAll(tester.prepare(app,
- ClusterSpec.request(container, ClusterSpec.Id.from("web"), version, false, Set.of()),
- Capacity.fromCount(numContainerNodes, new NodeResources(2, 2, 50)),
- 1));
-
- combinedHostSpecs.addAll(tester.prepare(app,
- ClusterSpec.request(content, ClusterSpec.Id.from("store"), version, false, Set.of()),
- Capacity.fromCount(numContentNodes, new NodeResources(1, 4, 50)),
- 1));
-
- tester.activate(app, combinedHostSpecs);
- }
-
- private <T, R, A> R concat(Collection<T> c1, Collection<T> c2, Collector<? super T, A, R> collector) {
- return Stream.concat(c1.stream(), c2.stream())
- .collect(collector);
- }
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
index 6de93c2ae65..460764b50db 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java
@@ -5,7 +5,6 @@ import com.google.common.collect.ImmutableSet;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.RotationName;
import com.yahoo.vespa.hosted.provision.lb.DnsZone;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
@@ -39,8 +38,6 @@ public class LoadBalancerSerializerTest {
new Real(HostName.from("real-2"),
"127.0.0.2",
4080))),
- ImmutableSet.of(RotationName.from("eu-cluster"),
- RotationName.from("us-cluster")),
false);
LoadBalancer serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer));
@@ -49,7 +46,6 @@ public class LoadBalancerSerializerTest {
assertEquals(loadBalancer.instance().dnsZone(), serialized.instance().dnsZone());
assertEquals(loadBalancer.instance().ports(), serialized.instance().ports());
assertEquals(loadBalancer.instance().networks(), serialized.instance().networks());
- assertEquals(loadBalancer.rotations(), serialized.rotations());
assertEquals(loadBalancer.inactive(), serialized.inactive());
assertEquals(loadBalancer.instance().reals(), serialized.instance().reals());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index 58c0b3ed9cc..f97460713a5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -61,7 +61,6 @@ public class LoadBalancerProvisionerTest {
assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 0).port());
assertEquals("127.0.0.2", get(loadBalancers.get().get(0).instance().reals(), 1).ipAddress());
assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 1).port());
- assertEquals(rotationsCluster1, loadBalancers.get().get(0).rotations());
// A container is failed
Supplier<List<Node>> containers = () -> tester.getNodes(app1).type(ClusterSpec.Type.container).asList();
@@ -105,7 +104,6 @@ public class LoadBalancerProvisionerTest {
.map(Real::hostname)
.sorted()
.collect(Collectors.toList());
- assertEquals(rotationsCluster2, loadBalancers.get().get(1).rotations());
assertEquals(activeContainers, reals);
// Application is removed and load balancer is deactivated
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java
index db9fe76dc62..c53d35c16dc 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java
@@ -35,11 +35,7 @@ public class VespaModelUtil {
public static final ApplicationId TENANT_HOST_APPLICATION_ID =
ApplicationId.from("hosted-vespa", "tenant-host", "default");
- // TODO: Remove after removing tenant hosts from zone-app
- public static final ApplicationId ZONE_APPLICATION_ID =
- ApplicationId.from("hosted-vespa", "routing", "default");
public static final ClusterId ADMIN_CLUSTER_ID = new ClusterId("admin");
- public static final ClusterId NODE_ADMIN_CLUSTER_ID = new ClusterId("node-admin");
public static final ServiceType SLOBROK_SERVICE_TYPE = new ServiceType("slobrok");
public static final ServiceType CLUSTER_CONTROLLER_SERVICE_TYPE = new ServiceType("container-clustercontroller");
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
index 7190473dd41..065defec1cd 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
@@ -84,12 +84,6 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
}
- // TODO: Remove after removing tenant hosts from zone-app
- if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.ZONE_APPLICATION_ID) &&
- clusterApi.clusterId().equals(VespaModelUtil.NODE_ADMIN_CLUSTER_ID)) {
- return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
- }
-
return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
}
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
index 6fc826c1b5f..d834034c9a8 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
@@ -52,15 +52,6 @@ public class HostedVespaClusterPolicyTest {
policy.getConcurrentSuspensionLimit(clusterApi));
}
- @Test // TODO: Remove after removing tenant hosts from zone-app
- public void testNodeAdminSuspensionLimit() {
- when(applicationApi.applicationId()).thenReturn(VespaModelUtil.ZONE_APPLICATION_ID);
- when(clusterApi.clusterId()).thenReturn(VespaModelUtil.NODE_ADMIN_CLUSTER_ID);
- when(clusterApi.isStorageCluster()).thenReturn(false);
- assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT,
- policy.getConcurrentSuspensionLimit(clusterApi));
- }
-
@Test
public void testTenantHostSuspensionLimit() {
when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID);
diff --git a/pom.xml b/pom.xml
index 120547fbfed..984c0cdf7a2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -120,6 +120,7 @@
<module>standalone-container</module>
<module>statistics</module>
<module>storage</module>
+ <module>tenant-auth</module>
<module>tenant-base</module>
<module>tenant-cd</module>
<module>testutil</module>
diff --git a/processing/src/main/java/com/yahoo/processing/request/CompoundName.java b/processing/src/main/java/com/yahoo/processing/request/CompoundName.java
index 43add084542..976fb3e2796 100644
--- a/processing/src/main/java/com/yahoo/processing/request/CompoundName.java
+++ b/processing/src/main/java/com/yahoo/processing/request/CompoundName.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import static com.yahoo.text.Lowercase.toLowerCase;
@@ -79,17 +80,19 @@ public final class CompoundName {
}
private static List<String> parse(String s) {
- ArrayList<String> l = new ArrayList<>();
+ ArrayList<String> l = null;
+
int p = 0;
final int m = s.length();
for (int i = 0; i < m; i++) {
if (s.charAt(i) == '.') {
+ if (l == null) l = new ArrayList<>(8);
l.add(s.substring(p, i));
p = i + 1;
}
}
if (p == 0) {
- l.add(s);
+ return ImmutableList.of(s);
} else if (p < m) {
l.add(s.substring(p, m));
} else {
diff --git a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
index 268fe63ba4c..672e7f78784 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/blueprintbuilder.cpp
@@ -4,6 +4,7 @@
#include "blueprintbuilder.h"
#include "termdatafromnode.h"
#include "same_element_builder.h"
+#include <vespa/searchcorespi/index/indexsearchable.h>
#include <vespa/searchlib/query/tree/customtypevisitor.h>
#include <vespa/searchlib/queryeval/leaf_blueprints.h>
#include <vespa/searchlib/queryeval/intermediate_blueprints.h>
diff --git a/searchcore/src/vespa/searchcore/proton/matching/fakesearchcontext.h b/searchcore/src/vespa/searchcore/proton/matching/fakesearchcontext.h
index 02aedf15d6e..fe9c20112f4 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/fakesearchcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/fakesearchcontext.h
@@ -52,7 +52,7 @@ public:
search::queryeval::ISourceSelector &selector() { return *_selector; }
// Implements ISearchContext
- search::queryeval::Searchable &getIndexes() override {
+ IndexSearchable &getIndexes() override {
return *_indexes;
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h b/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
index 2965e3796bf..dc840dc79ff 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/isearchcontext.h
@@ -6,6 +6,8 @@
#include <memory>
+namespace searchcorespi { class IndexSearchable; }
+
namespace proton::matching {
/**
@@ -31,13 +33,14 @@ public:
ISearchContext & operator = (const ISearchContext &) = delete;
typedef search::queryeval::Searchable Searchable;
+ using IndexSearchable = searchcorespi::IndexSearchable;
/**
* Obtain the index fields searchable.
*
* @return index fields searchable.
**/
- virtual Searchable &getIndexes() = 0;
+ virtual IndexSearchable &getIndexes() = 0;
/**
* Obtain the attribute fields searchable.
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
index 4a944dc3214..5d1e2212c83 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
@@ -9,6 +9,7 @@
#include <vespa/log/log.h>
LOG_SETUP(".proton.matching.match_tools");
#include <vespa/searchlib/query/tree/querytreecreator.h>
+#include <vespa/searchcorespi/index/indexsearchable.h>
using search::attribute::IAttributeContext;
using search::queryeval::IRequestContext;
@@ -158,7 +159,7 @@ MatchToolsFactory(QueryLimiter & queryLimiter,
_hardDoom(hardDoom),
_query(),
_match_limiter(),
- _queryEnv(indexEnv, attributeContext, rankProperties),
+ _queryEnv(indexEnv, attributeContext, rankProperties, searchContext.getIndexes()),
_mdl(),
_rankSetup(rankSetup),
_featureOverrides(featureOverrides),
diff --git a/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.cpp b/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.cpp
index d4320c87ab2..ec48ee7164b 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "queryenvironment.h"
+#include <vespa/searchlib/index/i_field_length_inspector.h>
using search::attribute::IAttributeContext;
using search::fef::IIndexEnvironment;
@@ -11,12 +12,14 @@ namespace proton::matching {
QueryEnvironment::QueryEnvironment(const IIndexEnvironment &indexEnv,
const IAttributeContext &attrContext,
- const Properties &properties)
+ const Properties &properties,
+ const search::index::IFieldLengthInspector &field_length_inspector)
: _indexEnv(indexEnv),
_attrContext(attrContext),
_properties(properties),
_locations(1),
- _terms()
+ _terms(),
+ _field_length_inspector(field_length_inspector)
{
}
@@ -53,6 +56,12 @@ QueryEnvironment::getAttributeContext() const
return _attrContext;
}
+double
+QueryEnvironment::get_average_field_length(const vespalib::string &field_name) const
+{
+ return _field_length_inspector.get_field_length_info(field_name).get_average_field_length();
+}
+
const search::fef::IIndexEnvironment &
QueryEnvironment::getIndexEnvironment() const
{
diff --git a/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.h b/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.h
index d79ba1796f7..8f958870d52 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/queryenvironment.h
@@ -6,6 +6,8 @@
#include <vespa/searchlib/fef/properties.h>
#include <vespa/searchlib/fef/location.h>
+namespace search::index { class IFieldLengthInspector; }
+
namespace proton::matching {
/**
@@ -19,6 +21,7 @@ private:
search::fef::Properties _properties;
std::vector<const search::fef::Location *> _locations;
std::vector<const search::fef::ITermData *> _terms;
+ const search::index::IFieldLengthInspector &_field_length_inspector;
QueryEnvironment(const QueryEnvironment &);
QueryEnvironment &operator=(const QueryEnvironment &);
@@ -33,7 +36,8 @@ public:
**/
QueryEnvironment(const search::fef::IIndexEnvironment &indexEnv,
const search::attribute::IAttributeContext &attrContext,
- const search::fef::Properties &properties);
+ const search::fef::Properties &properties,
+ const search::index::IFieldLengthInspector &field_length_inspector);
/**
* Used to edit the list of terms by the one setting up this query
@@ -71,6 +75,8 @@ public:
// inherited from search::fef::IQueryEnvironment
const search::attribute::IAttributeContext & getAttributeContext() const override;
+ double get_average_field_length(const vespalib::string &field_name) const override;
+
// inherited from search::fef::IQueryEnvironment
const search::fef::IIndexEnvironment & getIndexEnvironment() const override;
diff --git a/searchcore/src/vespa/searchcore/proton/matching/querynodes.cpp b/searchcore/src/vespa/searchcore/proton/matching/querynodes.cpp
index 6d810594aa7..bb8a669f91a 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/querynodes.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/querynodes.cpp
@@ -34,10 +34,10 @@ ProtonTermData & ProtonTermData::operator = (const ProtonTermData &) = default;
ProtonTermData::~ProtonTermData() = default;
void
-ProtonTermData::setDocumentFrequency(double freq)
+ProtonTermData::propagate_document_frequency(uint32_t matching_doc_count, uint32_t total_doc_count)
{
for (size_t i = 0; i < _fields.size(); ++i) {
- _fields[i].setDocFreq(freq);
+ _fields[i].setDocFreq(matching_doc_count, total_doc_count);
}
}
@@ -97,10 +97,9 @@ void
ProtonTermData::setDocumentFrequency(uint32_t estHits, uint32_t docIdLimit)
{
if (docIdLimit > 1) {
- double hits = estHits;
- setDocumentFrequency(hits / (docIdLimit - 1));
+ propagate_document_frequency(estHits, docIdLimit - 1);
} else {
- setDocumentFrequency(0.0);
+ propagate_document_frequency(0, 1);
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/querynodes.h b/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
index 8cf65c1e67b..6454845b247 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/querynodes.h
@@ -47,7 +47,7 @@ public:
private:
std::vector<FieldEntry> _fields;
- void setDocumentFrequency(double docFreq);
+ void propagate_document_frequency(uint32_t matching_count_doc, uint32_t total_doc_count);
protected:
void resolve(const ViewResolver &resolver,
diff --git a/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.cpp b/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.cpp
index d3a0ec4726f..16c86e8a4f5 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/same_element_builder.cpp
@@ -4,6 +4,7 @@
#include "querynodes.h"
#include <vespa/searchlib/query/tree/customtypevisitor.h>
#include <vespa/searchlib/queryeval/leaf_blueprints.h>
+#include <vespa/searchcorespi/index/indexsearchable.h>
using search::queryeval::Blueprint;
using search::queryeval::EmptyBlueprint;
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
index d53adf19f76..9445a0a5206 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
@@ -2,9 +2,10 @@
#include "disk_mem_usage_filter.h"
#include "i_disk_mem_usage_listener.h"
-#include <vespa/log/log.h>
#include <vespa/searchcore/proton/common/hw_info.h>
+#include <sstream>
+#include <vespa/log/log.h>
LOG_SETUP(".proton.server.disk_mem_usage_filter");
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index 772be9049db..06f19eb06cc 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -37,6 +37,7 @@
#include <vespa/searchlib/aggregation/forcelink.hpp>
#include <vespa/searchlib/expression/forcelink.hpp>
+#include <sstream>
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.proton");
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchcontext.cpp b/searchcore/src/vespa/searchcore/proton/server/searchcontext.cpp
index ea09c60bd52..d9207ef70e1 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchcontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchcontext.cpp
@@ -3,10 +3,11 @@
#include "searchcontext.h"
using search::queryeval::Searchable;
+using searchcorespi::IndexSearchable;
namespace proton {
-Searchable &
+IndexSearchable &
SearchContext::getIndexes()
{
return *_indexSearchable;
@@ -23,7 +24,7 @@ uint32_t SearchContext::getDocIdLimit()
return _docIdLimit;
}
-SearchContext::SearchContext(const Searchable::SP &indexSearchable, uint32_t docIdLimit)
+SearchContext::SearchContext(const std::shared_ptr<IndexSearchable> &indexSearchable, uint32_t docIdLimit)
: _indexSearchable(indexSearchable),
_attributeBlueprintFactory(),
_docIdLimit(docIdLimit)
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchcontext.h b/searchcore/src/vespa/searchcore/proton/server/searchcontext.h
index 71475c3decd..b9ea6b334b3 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchcontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/searchcontext.h
@@ -16,16 +16,16 @@ class SearchContext : public matching::ISearchContext
{
private:
/// Snapshot of the indexes used.
- Searchable::SP _indexSearchable;
+ std::shared_ptr<IndexSearchable> _indexSearchable;
search::AttributeBlueprintFactory _attributeBlueprintFactory;
uint32_t _docIdLimit;
- Searchable &getIndexes() override;
+ IndexSearchable &getIndexes() override;
Searchable &getAttributes() override;
uint32_t getDocIdLimit() override;
public:
- SearchContext(const Searchable::SP &indexSearchable, uint32_t docIdLimit);
+ SearchContext(const std::shared_ptr<IndexSearchable> &indexSearchable, uint32_t docIdLimit);
};
} // namespace proton
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionNode.java
index c4f3a75f2f8..2aedec2109b 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionNode.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionNode.java
@@ -5,6 +5,7 @@ import com.yahoo.searchlib.rankingexpression.Reference;
import com.yahoo.searchlib.rankingexpression.evaluation.Context;
import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue;
import com.yahoo.searchlib.rankingexpression.evaluation.Value;
+import com.yahoo.searchlib.rankingexpression.transform.TensorMaxMinTransformer;
import com.yahoo.tensor.TensorType;
import com.yahoo.tensor.evaluation.TypeContext;
import com.yahoo.tensor.functions.Join;
@@ -67,6 +68,11 @@ public final class FunctionNode extends CompositeNode {
@Override
public TensorType type(TypeContext<Reference> context) {
+ // Check if this node should be interpreted as tensor reduce, as this impacts the type
+ ExpressionNode thisTransformed = TensorMaxMinTransformer.transformFunctionNode(this, context);
+ if (thisTransformed != this)
+ return thisTransformed.type(context);
+
if (arguments.expressions().size() == 0)
return TensorType.empty;
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/IfNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/IfNode.java
index 28dc623be72..92c6d6f8638 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/IfNode.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/IfNode.java
@@ -85,7 +85,9 @@ public final class IfNode extends CompositeNode {
return trueType.dimensionwiseGeneralizationWith(falseType).orElseThrow(() ->
new IllegalArgumentException("An if expression must produce compatible types in both " +
"alternatives, but the 'true' type is " + trueType + " while the " +
- "'false' type is " + falseType)
+ "'false' type is " + falseType +
+ "\n'true' branch: " + trueExpression +
+ "\n'false' branch: " + falseExpression)
);
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/ReferenceNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/ReferenceNode.java
index eb8d2229a6d..e15ce158e83 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/ReferenceNode.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/ReferenceNode.java
@@ -95,7 +95,13 @@ public final class ReferenceNode extends CompositeNode {
@Override
public TensorType type(TypeContext<Reference> context) {
- TensorType type = context.getType(reference);
+ TensorType type = null;
+ try {
+ type = context.getType(reference);
+ }
+ catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(reference + " is invalid", e);
+ }
if (type == null)
throw new IllegalArgumentException("Unknown feature '" + toString() + "'");
return type;
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/ExpressionTransformer.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/ExpressionTransformer.java
index 22d314bcb28..31567ba120b 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/ExpressionTransformer.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/ExpressionTransformer.java
@@ -10,7 +10,7 @@ import java.util.List;
/**
* Superclass of expression transformers. The scope (lifetime) of a transformer instance is a single compilation
- * of alle the expressions in one rank profile.
+ * of all the expressions in one rank profile.
*
* @author bratseth
*/
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/TensorTransformer.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TensorMaxMinTransformer.java
index 5d03c323803..979c5b0f88c 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/expressiontransforms/TensorTransformer.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TensorMaxMinTransformer.java
@@ -1,54 +1,40 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.searchdefinition.expressiontransforms;
+package com.yahoo.searchlib.rankingexpression.transform;
-import com.yahoo.searchdefinition.RankProfile;
-import com.yahoo.searchdefinition.RankingConstant;
-import com.yahoo.searchdefinition.document.Attribute;
import com.yahoo.searchlib.rankingexpression.Reference;
-import com.yahoo.searchlib.rankingexpression.evaluation.Context;
-import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue;
-import com.yahoo.searchlib.rankingexpression.evaluation.MapContext;
-import com.yahoo.searchlib.rankingexpression.evaluation.StringValue;
-import com.yahoo.searchlib.rankingexpression.evaluation.TensorValue;
-import com.yahoo.searchlib.rankingexpression.evaluation.Value;
import com.yahoo.searchlib.rankingexpression.rule.CompositeNode;
import com.yahoo.searchlib.rankingexpression.rule.ExpressionNode;
import com.yahoo.searchlib.rankingexpression.rule.FunctionNode;
import com.yahoo.searchlib.rankingexpression.rule.NameNode;
import com.yahoo.searchlib.rankingexpression.rule.ReferenceNode;
import com.yahoo.searchlib.rankingexpression.rule.TensorFunctionNode;
-import com.yahoo.searchlib.rankingexpression.transform.ExpressionTransformer;
-import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import com.yahoo.tensor.evaluation.TypeContext;
import com.yahoo.tensor.functions.Reduce;
-import java.util.List;
import java.util.Optional;
/**
- * Transforms and simplifies tensor expressions.
- *
- * Currently transforms min(tensor,dim) and max(tensor,dim) to
+ * Transforms min(tensor,dim) and max(tensor,dim) to
* reduce(tensor,min/max,dim). This is necessary as the backend does
* not recognize these forms of min and max.
*
* @author lesters
*/
-public class TensorTransformer extends ExpressionTransformer<RankProfileTransformContext> {
+public class TensorMaxMinTransformer<CONTEXT extends TransformContext> extends ExpressionTransformer<CONTEXT> {
@Override
- public ExpressionNode transform(ExpressionNode node, RankProfileTransformContext context) {
+ public ExpressionNode transform(ExpressionNode node, CONTEXT context) {
if (node instanceof CompositeNode) {
node = transformChildren((CompositeNode) node, context);
}
if (node instanceof FunctionNode) {
- node = transformFunctionNode((FunctionNode) node, context);
+ node = transformFunctionNode((FunctionNode) node, context.types());
}
return node;
}
- private ExpressionNode transformFunctionNode(FunctionNode node, RankProfileTransformContext context) {
+ public static ExpressionNode transformFunctionNode(FunctionNode node, TypeContext<Reference> context) {
switch (node.getFunction()) {
case min:
case max:
@@ -62,14 +48,14 @@ public class TensorTransformer extends ExpressionTransformer<RankProfileTransfor
* argument returns a tensor type and the second argument is a valid
* dimension in the tensor.
*/
- private ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node, RankProfileTransformContext context) {
+ private static ExpressionNode transformMaxAndMinFunctionNode(FunctionNode node, TypeContext<Reference> context) {
if (node.children().size() != 2) {
return node;
}
ExpressionNode arg1 = node.children().get(0);
Optional<String> dimension = dimensionName(node.children().get(1));
if (dimension.isPresent()) {
- TensorType type = arg1.type(context.types());
+ TensorType type = arg1.type(context);
if (type.dimension(dimension.get()).isPresent()) {
return replaceMaxAndMinFunction(node);
}
@@ -77,7 +63,7 @@ public class TensorTransformer extends ExpressionTransformer<RankProfileTransfor
return node;
}
- private Optional<String> dimensionName(ExpressionNode node) {
+ private static Optional<String> dimensionName(ExpressionNode node) {
if (node instanceof ReferenceNode) {
Reference reference = ((ReferenceNode)node).reference();
if (reference.isIdentifier())
@@ -93,7 +79,7 @@ public class TensorTransformer extends ExpressionTransformer<RankProfileTransfor
}
}
- private ExpressionNode replaceMaxAndMinFunction(FunctionNode node) {
+ private static ExpressionNode replaceMaxAndMinFunction(FunctionNode node) {
ExpressionNode arg1 = node.children().get(0);
ExpressionNode arg2 = node.children().get(1);
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TransformContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TransformContext.java
index 7485ce69f98..0113a650277 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TransformContext.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/transform/TransformContext.java
@@ -1,7 +1,9 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchlib.rankingexpression.transform;
+import com.yahoo.searchlib.rankingexpression.Reference;
import com.yahoo.searchlib.rankingexpression.evaluation.Value;
+import com.yahoo.tensor.evaluation.TypeContext;
import java.util.Map;
@@ -13,11 +15,19 @@ import java.util.Map;
public class TransformContext {
private final Map<String, Value> constants;
+ private final TypeContext<Reference> types;
- public TransformContext(Map<String, Value> constants) {
+ public TransformContext(Map<String, Value> constants, TypeContext<Reference> types) {
this.constants = constants;
+ this.types = types;
}
public Map<String, Value> constants() { return constants; }
+ /**
+ * Returns the types known in this context. We may have type information for references
+ * for which no value is available
+ */
+ public TypeContext<Reference> types() { return types; }
+
}
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/TypeResolutionTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/TypeResolutionTestCase.java
index a08d510eec4..88838b5aed0 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/TypeResolutionTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/TypeResolutionTestCase.java
@@ -53,7 +53,9 @@ public class TypeResolutionTestCase {
}
catch (IllegalArgumentException expected) {
assertEquals("An if expression must produce compatible types in both alternatives, " +
- "but the 'true' type is tensor(x[]) while the 'false' type is tensor(y[])",
+ "but the 'true' type is tensor(x[]) while the 'false' type is tensor(y[])" +
+ "\n'true' branch: query(x1)" +
+ "\n'false' branch: query(y1)",
expected.getMessage());
}
catch (ParseException e) {
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/ConstantDereferencerTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/ConstantDereferencerTestCase.java
index 1f28f0b0129..a41fb02f784 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/ConstantDereferencerTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/ConstantDereferencerTestCase.java
@@ -2,6 +2,7 @@
package com.yahoo.searchlib.rankingexpression.transform;
import com.yahoo.searchlib.rankingexpression.RankingExpression;
+import com.yahoo.searchlib.rankingexpression.evaluation.MapTypeContext;
import com.yahoo.searchlib.rankingexpression.evaluation.Value;
import com.yahoo.searchlib.rankingexpression.parser.ParseException;
import org.junit.Test;
@@ -24,7 +25,7 @@ public class ConstantDereferencerTestCase {
constants.put("a", Value.parse("1.0"));
constants.put("b", Value.parse("2"));
constants.put("c", Value.parse("3.5"));
- TransformContext context = new TransformContext(constants);
+ TransformContext context = new TransformContext(constants, new MapTypeContext());
assertEquals("1.0 + 2.0 + 3.5", c.transform(new RankingExpression("a + b + c"), context).toString());
assertEquals("myFunction(1.0,2.0)", c.transform(new RankingExpression("myFunction(a, b)"), context).toString());
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/SimplifierTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/SimplifierTestCase.java
index 8fac3395ac0..f4b1b0ceee2 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/SimplifierTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/transform/SimplifierTestCase.java
@@ -1,9 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchlib.rankingexpression.transform;
+import com.yahoo.log.event.Collection;
import com.yahoo.searchlib.rankingexpression.RankingExpression;
import com.yahoo.searchlib.rankingexpression.evaluation.Context;
import com.yahoo.searchlib.rankingexpression.evaluation.MapContext;
+import com.yahoo.searchlib.rankingexpression.evaluation.MapTypeContext;
import com.yahoo.searchlib.rankingexpression.parser.ParseException;
import com.yahoo.searchlib.rankingexpression.rule.CompositeNode;
import org.junit.Test;
@@ -20,7 +22,7 @@ public class SimplifierTestCase {
@Test
public void testSimplify() throws ParseException {
Simplifier s = new Simplifier();
- TransformContext c = new TransformContext(Collections.emptyMap());
+ TransformContext c = new TransformContext(Collections.emptyMap(), new MapTypeContext());
assertEquals("a + b", s.transform(new RankingExpression("a + b"), c).toString());
assertEquals("6.5", s.transform(new RankingExpression("1.0 + 2.0 + 3.5"), c).toString());
assertEquals("6.5", s.transform(new RankingExpression("1.0 + ( 2.0 + 3.5 )"), c).toString());
@@ -45,7 +47,7 @@ public class SimplifierTestCase {
@Test
public void testSimplifyComplexExpression() throws ParseException {
RankingExpression initial = new RankingExpression("sqrt(if (if (INFERRED * 0.9 < INFERRED, GMP, (1 + 1.1) * INFERRED) < INFERRED * INFERRED - INFERRED, if (GMP < 85.80799542793133 * GMP, INFERRED, if (GMP < GMP, tanh(INFERRED), log(76.89956221113943))), tanh(tanh(INFERRED))) * sqrt(sqrt(GMP + INFERRED)) * GMP ) + 13.5 * (1 - GMP) * pow(GMP * 0.1, 2 + 1.1 * 0)");
- TransformContext c = new TransformContext(Collections.emptyMap());
+ TransformContext c = new TransformContext(Collections.emptyMap(), new MapTypeContext());
RankingExpression simplified = new Simplifier().transform(initial, c);
Context context = new MapContext();
@@ -70,7 +72,7 @@ public class SimplifierTestCase {
@Test
public void testParenthesisPreservation() throws ParseException {
Simplifier s = new Simplifier();
- TransformContext c = new TransformContext(Collections.emptyMap());
+ TransformContext c = new TransformContext(Collections.emptyMap(), new MapTypeContext());
CompositeNode transformed = (CompositeNode)s.transform(new RankingExpression("a + (b + c) / 100000000.0"), c).getRoot();
assertEquals("a + (b + c) / 100000000.0", transformed.toString());
}
diff --git a/searchlib/src/tests/common/bitvector/bitvector_test.cpp b/searchlib/src/tests/common/bitvector/bitvector_test.cpp
index 22a47952acb..e61c21bee1c 100644
--- a/searchlib/src/tests/common/bitvector/bitvector_test.cpp
+++ b/searchlib/src/tests/common/bitvector/bitvector_test.cpp
@@ -548,38 +548,39 @@ TEST("requireThatGrowWorks")
v.setBit(103);
EXPECT_EQUAL(200u, v.size());
+ EXPECT_EQUAL(1023u, v.capacity());
v.invalidateCachedCount();
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
- EXPECT_TRUE(v.reserve(204));
+ EXPECT_TRUE(v.reserve(1024));
EXPECT_EQUAL(200u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
EXPECT_FALSE(v.extend(202));
EXPECT_EQUAL(202u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
EXPECT_FALSE(v.shrink(200));
EXPECT_EQUAL(200u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
- EXPECT_FALSE(v.reserve(204));
+ EXPECT_FALSE(v.reserve(2047));
EXPECT_EQUAL(200u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
EXPECT_FALSE(v.shrink(202));
EXPECT_EQUAL(202u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71,103]", v));
EXPECT_EQUAL(4u, v.countTrueBits());
EXPECT_FALSE(v.shrink(100));
EXPECT_EQUAL(100u, v.size());
- EXPECT_EQUAL(204u, v.capacity());
+ EXPECT_EQUAL(2047u, v.capacity());
EXPECT_TRUE(assertBV("[7,39,71]", v));
EXPECT_EQUAL(3u, v.countTrueBits());
g.transferHoldLists(1);
diff --git a/searchlib/src/tests/diskindex/fusion/.gitignore b/searchlib/src/tests/diskindex/fusion/.gitignore
index 8526d6faa38..d9a33665c43 100644
--- a/searchlib/src/tests/diskindex/fusion/.gitignore
+++ b/searchlib/src/tests/diskindex/fusion/.gitignore
@@ -34,4 +34,5 @@ sdump5
/usage.out
/zwordc0coll.out
/zwordf0field.out
+/fldump[2-4]
searchlib_fusion_test_app
diff --git a/searchlib/src/tests/diskindex/fusion/CMakeLists.txt b/searchlib/src/tests/diskindex/fusion/CMakeLists.txt
index e1882dcb226..a58e47ca5f1 100644
--- a/searchlib/src/tests/diskindex/fusion/CMakeLists.txt
+++ b/searchlib/src/tests/diskindex/fusion/CMakeLists.txt
@@ -1,9 +1,11 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+find_package(GTest REQUIRED)
vespa_add_executable(searchlib_fusion_test_app TEST
SOURCES
fusion_test.cpp
DEPENDS
searchlib
+ GTest::GTest
AFTER
searchlib_vespa-index-inspect_app
)
diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
index 16d2a04df2e..4779ddcb10d 100644
--- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
+++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
@@ -17,8 +17,9 @@
#include <vespa/vespalib/btree/btreenode.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
#include <vespa/vespalib/btree/btreeroot.hpp>
-#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP("fusion_test");
@@ -41,16 +42,27 @@ using namespace index;
namespace diskindex {
-class Test : public vespalib::TestApp
+class MyMockFieldLengthInspector : public IFieldLengthInspector {
+ FieldLengthInfo get_field_length_info(const vespalib::string& field_name) const override {
+ if (field_name == "f0") {
+ return FieldLengthInfo(3.5, 21);
+ } else {
+ return FieldLengthInfo();
+ }
+ }
+};
+
+class FusionTest : public ::testing::Test
{
-private:
+protected:
Schema _schema;
const Schema & getSchema() const { return _schema; }
void requireThatFusionIsWorking(const vespalib::string &prefix, bool directio, bool readmmap);
+ void make_empty_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector);
+ void merge_empty_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources);
public:
- Test();
- int Main() override;
+ FusionTest();
};
namespace {
@@ -85,42 +97,6 @@ toString(FieldPositionsIterator posItr, bool hasElements = false, bool hasWeight
return ss.str();
}
-
-#if 0
-vespalib::string
-toString(DocIdAndFeatures &features)
-{
- vespalib::asciistream ss;
- ss << "{";
- std::vector<search::index::WordDocFieldElementFeatures>::const_iterator
- element = features._elements.begin();
- std::vector<search::index::WordDocFieldElementWordPosFeatures>::
- const_iterator position = features._wordPositions.begin();
- for (; field != fielde; ++field) {
- ss << "f=" << field->getFieldId() << "{";
- uint32_t numElements = field->getNumElements();
- while (numElements--) {
- ss << "e=" << element->getElementId() << ","
- << "ew=" << element->getWeight() << ","
- << "el=" << element->getElementLen() << "{";
- uint32_t numOccs = element->getNumOccs();
- while (numOccs--) {
- ss << position->getWordPos();
- if (numOccs != 0)
- ss << ",";
- }
- ss << "}";
- if (numElements != 0)
- ss << ",";
- }
- ss << "}";
- }
- ss << "}";
- return ss.str();
-}
-#endif
-
-
void
validateDiskIndex(DiskIndex &dw, bool f2HasElements, bool f3HasWeights)
{
@@ -133,112 +109,112 @@ validateDiskIndex(DiskIndex &dw, bool f2HasElements, bool f3HasWeights)
{
uint32_t id1(schema.getIndexFieldId("f0"));
LR::UP lr1(dw.lookup(id1, "c"));
- EXPECT_TRUE(lr1.get() != NULL);
+ ASSERT_TRUE(lr1);
PH::UP wh1(dw.readPostingList(*lr1));
- EXPECT_TRUE(wh1.get() != NULL);
+ ASSERT_TRUE(wh1);
TermFieldMatchData f0;
TermFieldMatchDataArray a;
a.add(&f0);
SB::UP sbap(wh1->createIterator(lr1->counts, a));
sbap->initFullRange();
- EXPECT_EQUAL("{1000000:}", toString(f0.getIterator()));
+ EXPECT_EQ(vespalib::string("{1000000:}"), toString(f0.getIterator()));
EXPECT_TRUE(sbap->seek(10));
sbap->unpack(10);
- EXPECT_EQUAL("{7:2}", toString(f0.getIterator()));
+ EXPECT_EQ(vespalib::string("{7:2}"), toString(f0.getIterator()));
}
{
uint32_t id1(schema.getIndexFieldId("f2"));
LR::UP lr1(dw.lookup(id1, "ax"));
- EXPECT_TRUE(lr1.get() != NULL);
+ ASSERT_TRUE(lr1);
PH::UP wh1(dw.readPostingList(*lr1));
- EXPECT_TRUE(wh1.get() != NULL);
+ ASSERT_TRUE(wh1);
TermFieldMatchData f2;
TermFieldMatchDataArray a;
a.add(&f2);
SB::UP sbap(wh1->createIterator(lr1->counts, a));
sbap->initFullRange();
- EXPECT_EQUAL("{1000000:}", toString(f2.getIterator()));
+ EXPECT_EQ(vespalib::string("{1000000:}"), toString(f2.getIterator()));
EXPECT_TRUE(sbap->seek(10));
sbap->unpack(10);
if (f2HasElements) {
- EXPECT_EQUAL("{3:0[e=0,l=3],0[e=1,l=1]}",
- toString(f2.getIterator(), true));
+ EXPECT_EQ(vespalib::string("{3:0[e=0,l=3],0[e=1,l=1]}"),
+ toString(f2.getIterator(), true));
} else {
- EXPECT_EQUAL("{3:0[e=0,l=3]}",
- toString(f2.getIterator(), true));
+ EXPECT_EQ(vespalib::string("{3:0[e=0,l=3]}"),
+ toString(f2.getIterator(), true));
}
}
{
- uint32_t id1(schema.getIndexFieldId("f3"));;
+ uint32_t id1(schema.getIndexFieldId("f3"));
LR::UP lr1(dw.lookup(id1, "wx"));
- EXPECT_TRUE(lr1.get() != NULL);
+ ASSERT_TRUE(lr1);
PH::UP wh1(dw.readPostingList(*lr1));
- EXPECT_TRUE(wh1.get() != NULL);
+ ASSERT_TRUE(wh1);
TermFieldMatchData f3;
TermFieldMatchDataArray a;
a.add(&f3);
SB::UP sbap(wh1->createIterator(lr1->counts, a));
sbap->initFullRange();
- EXPECT_EQUAL("{1000000:}", toString(f3.getIterator()));
+ EXPECT_EQ(vespalib::string("{1000000:}"), toString(f3.getIterator()));
EXPECT_TRUE(sbap->seek(10));
sbap->unpack(10);
if (f3HasWeights) {
- EXPECT_EQUAL("{2:0[e=0,w=4,l=2]}",
- toString(f3.getIterator(), true, true));
+ EXPECT_EQ(vespalib::string("{2:0[e=0,w=4,l=2]}"),
+ toString(f3.getIterator(), true, true));
} else {
- EXPECT_EQUAL("{2:0[e=0,w=1,l=2]}",
- toString(f3.getIterator(), true, true));
+ EXPECT_EQ(vespalib::string("{2:0[e=0,w=1,l=2]}"),
+ toString(f3.getIterator(), true, true));
}
}
{
uint32_t id1(schema.getIndexFieldId("f3"));;
LR::UP lr1(dw.lookup(id1, "zz"));
- EXPECT_TRUE(lr1.get() != NULL);
+ ASSERT_TRUE(lr1);
PH::UP wh1(dw.readPostingList(*lr1));
- EXPECT_TRUE(wh1.get() != NULL);
+ ASSERT_TRUE(wh1);
TermFieldMatchData f3;
TermFieldMatchDataArray a;
a.add(&f3);
SB::UP sbap(wh1->createIterator(lr1->counts, a));
sbap->initFullRange();
- EXPECT_EQUAL("{1000000:}", toString(f3.getIterator()));
+ EXPECT_EQ(vespalib::string("{1000000:}"), toString(f3.getIterator()));
EXPECT_TRUE(sbap->seek(11));
sbap->unpack(11);
if (f3HasWeights) {
- EXPECT_EQUAL("{1:0[e=0,w=-27,l=1]}",
- toString(f3.getIterator(), true, true));
+ EXPECT_EQ(vespalib::string("{1:0[e=0,w=-27,l=1]}"),
+ toString(f3.getIterator(), true, true));
} else {
- EXPECT_EQUAL("{1:0[e=0,w=1,l=1]}",
- toString(f3.getIterator(), true, true));
+ EXPECT_EQ(vespalib::string("{1:0[e=0,w=1,l=1]}"),
+ toString(f3.getIterator(), true, true));
}
}
{
uint32_t id1(schema.getIndexFieldId("f3"));;
LR::UP lr1(dw.lookup(id1, "zz0"));
- EXPECT_TRUE(lr1.get() != NULL);
+ ASSERT_TRUE(lr1);
PH::UP wh1(dw.readPostingList(*lr1));
- EXPECT_TRUE(wh1.get() != NULL);
+ ASSERT_TRUE(wh1);
TermFieldMatchData f3;
TermFieldMatchDataArray a;
a.add(&f3);
SB::UP sbap(wh1->createIterator(lr1->counts, a));
sbap->initFullRange();
- EXPECT_EQUAL("{1000000:}", toString(f3.getIterator()));
+ EXPECT_EQ(vespalib::string("{1000000:}"), toString(f3.getIterator()));
EXPECT_TRUE(sbap->seek(12));
sbap->unpack(12);
if (f3HasWeights) {
- EXPECT_EQUAL("{1:0[e=0,w=0,l=1]}",
+ EXPECT_EQ(vespalib::string("{1:0[e=0,w=0,l=1]}"),
toString(f3.getIterator(), true, true));
} else {
- EXPECT_EQUAL("{1:0[e=0,w=1,l=1]}",
- toString(f3.getIterator(), true, true));
+ EXPECT_EQ(vespalib::string("{1:0[e=0,w=1,l=1]}"),
+ toString(f3.getIterator(), true, true));
}
}
}
void
-Test::requireThatFusionIsWorking(const vespalib::string &prefix, bool directio, bool readmmap)
+FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool directio, bool readmmap)
{
Schema schema;
Schema schema2;
@@ -335,106 +311,124 @@ Test::requireThatFusionIsWorking(const vespalib::string &prefix, bool directio,
tuneFileIndexing._write.setWantDirectIO();
tuneFileSearch._read.setWantDirectIO();
}
- if (readmmap)
+ if (readmmap) {
tuneFileSearch._read.setWantMemoryMap();
+ }
ib.open(numDocs, numWords, mock_field_length_inspector, tuneFileIndexing, fileHeaderContext);
fic.dump(ib);
ib.close();
vespalib::string tsName = dump2dir + "/.teststamp";
typedef search::FileKit FileKit;
- EXPECT_TRUE(FileKit::createStamp(tsName));
- EXPECT_TRUE(FileKit::hasStamp(tsName));
- EXPECT_TRUE(FileKit::removeStamp(tsName));
- EXPECT_FALSE(FileKit::hasStamp(tsName));
+ ASSERT_TRUE(FileKit::createStamp(tsName));
+ ASSERT_TRUE(FileKit::hasStamp(tsName));
+ ASSERT_TRUE(FileKit::removeStamp(tsName));
+ ASSERT_FALSE(FileKit::hasStamp(tsName));
vespalib::ThreadStackExecutor executor(4, 0x10000);
do {
DiskIndex dw2(prefix + "dump2");
- if (!EXPECT_TRUE(dw2.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw2, true, true));
+ ASSERT_TRUE(dw2.setup(tuneFileSearch));
+ validateDiskIndex(dw2, true, true);
} while (0);
do {
std::vector<vespalib::string> sources;
SelectorArray selector(numDocs, 0);
sources.push_back(prefix + "dump2");
- if (!EXPECT_TRUE(Fusion::merge(schema, prefix + "dump3", sources, selector,
- dynamicKPosOcc,
- tuneFileIndexing,fileHeaderContext, executor)))
- return;
+ ASSERT_TRUE(Fusion::merge(schema, prefix + "dump3", sources, selector,
+ dynamicKPosOcc,
+ tuneFileIndexing,fileHeaderContext, executor));
} while (0);
do {
DiskIndex dw3(prefix + "dump3");
- if (!EXPECT_TRUE(dw3.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw3, true, true));
+ ASSERT_TRUE(dw3.setup(tuneFileSearch));
+ validateDiskIndex(dw3, true, true);
} while (0);
do {
std::vector<vespalib::string> sources;
SelectorArray selector(numDocs, 0);
sources.push_back(prefix + "dump3");
- if (!EXPECT_TRUE(Fusion::merge(schema2, prefix + "dump4", sources, selector,
- dynamicKPosOcc,
- tuneFileIndexing, fileHeaderContext, executor)))
- return;
+ ASSERT_TRUE(Fusion::merge(schema2, prefix + "dump4", sources, selector,
+ dynamicKPosOcc,
+ tuneFileIndexing, fileHeaderContext, executor));
} while (0);
do {
DiskIndex dw4(prefix + "dump4");
- if (!EXPECT_TRUE(dw4.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw4, true, false));
+ ASSERT_TRUE(dw4.setup(tuneFileSearch));
+ validateDiskIndex(dw4, true, false);
} while (0);
do {
std::vector<vespalib::string> sources;
SelectorArray selector(numDocs, 0);
sources.push_back(prefix + "dump3");
- if (!EXPECT_TRUE(Fusion::merge(schema3, prefix + "dump5", sources, selector,
- dynamicKPosOcc,
- tuneFileIndexing, fileHeaderContext, executor)))
- return;
+ ASSERT_TRUE(Fusion::merge(schema3, prefix + "dump5", sources, selector,
+ dynamicKPosOcc,
+ tuneFileIndexing, fileHeaderContext, executor));
} while (0);
do {
DiskIndex dw5(prefix + "dump5");
- if (!EXPECT_TRUE(dw5.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw5, false, false));
+ ASSERT_TRUE(dw5.setup(tuneFileSearch));
+ validateDiskIndex(dw5, false, false);
} while (0);
do {
std::vector<vespalib::string> sources;
SelectorArray selector(numDocs, 0);
sources.push_back(prefix + "dump3");
- if (!EXPECT_TRUE(Fusion::merge(schema, prefix + "dump6", sources, selector,
- !dynamicKPosOcc,
- tuneFileIndexing, fileHeaderContext, executor)))
- return;
+ ASSERT_TRUE(Fusion::merge(schema, prefix + "dump6", sources, selector,
+ !dynamicKPosOcc,
+ tuneFileIndexing, fileHeaderContext, executor));
} while (0);
do {
DiskIndex dw6(prefix + "dump6");
- if (!EXPECT_TRUE(dw6.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw6, true, true));
+ ASSERT_TRUE(dw6.setup(tuneFileSearch));
+ validateDiskIndex(dw6, true, true);
} while (0);
do {
std::vector<vespalib::string> sources;
SelectorArray selector(numDocs, 0);
sources.push_back(prefix + "dump2");
- if (!EXPECT_TRUE(Fusion::merge(schema, prefix + "dump3", sources, selector,
- dynamicKPosOcc,
- tuneFileIndexing, fileHeaderContext, executor)))
- return;
+ ASSERT_TRUE(Fusion::merge(schema, prefix + "dump3", sources, selector,
+ dynamicKPosOcc,
+ tuneFileIndexing, fileHeaderContext, executor));
} while (0);
do {
DiskIndex dw3(prefix + "dump3");
- if (!EXPECT_TRUE(dw3.setup(tuneFileSearch)))
- break;
- TEST_DO(validateDiskIndex(dw3, true, true));
+ ASSERT_TRUE(dw3.setup(tuneFileSearch));
+ validateDiskIndex(dw3, true, true);
} while (0);
}
-Test::Test()
- : _schema()
+void
+FusionTest::make_empty_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector)
+{
+ FieldIndexCollection fic(_schema, field_length_inspector);
+ uint32_t numDocs = 1;
+ uint32_t numWords = 1;
+ IndexBuilder ib(_schema);
+ TuneFileIndexing tuneFileIndexing;
+ DummyFileHeaderContext fileHeaderContext;
+ ib.setPrefix(dump_dir);
+ ib.open(numDocs, numWords, field_length_inspector, tuneFileIndexing, fileHeaderContext);
+ fic.dump(ib);
+ ib.close();
+}
+
+void
+FusionTest::merge_empty_indexes(const vespalib::string &dump_dir, const std::vector<vespalib::string> &sources)
+{
+ vespalib::ThreadStackExecutor executor(4, 0x10000);
+ TuneFileIndexing tuneFileIndexing;
+ DummyFileHeaderContext fileHeaderContext;
+ SelectorArray selector(1, 0);
+ ASSERT_TRUE(Fusion::merge(_schema, dump_dir, sources, selector,
+ false,
+ tuneFileIndexing, fileHeaderContext, executor));
+}
+
+FusionTest::FusionTest()
+ : ::testing::Test(),
+ _schema()
{
_schema.addIndexField(Schema::IndexField("f0", DataType::STRING));
_schema.addIndexField(Schema::IndexField("f1", DataType::STRING));
@@ -442,25 +436,59 @@ Test::Test()
_schema.addIndexField(Schema::IndexField("f3", DataType::STRING, CollectionType::WEIGHTEDSET));
}
-int
-Test::Main()
+TEST_F(FusionTest, require_that_normal_fusion_is_working)
{
- TEST_INIT("fusion_test");
+ requireThatFusionIsWorking("", false, false);
+}
- if (_argc > 0) {
- DummyFileHeaderContext::setCreator(_argv[0]);
- }
+TEST_F(FusionTest, require_that_directio_fusion_is_working)
+{
+ requireThatFusionIsWorking("d", true, false);
+}
+
+TEST_F(FusionTest, require_that_mmap_fusion_is_working)
+{
+ requireThatFusionIsWorking("m", false, true);
+}
+
+TEST_F(FusionTest, require_that_directiommap_fusion_is_working)
+{
+ requireThatFusionIsWorking("dm", true, true);
+}
- TEST_DO(requireThatFusionIsWorking("", false, false));
- TEST_DO(requireThatFusionIsWorking("d", true, false));
- TEST_DO(requireThatFusionIsWorking("m", false, true));
- TEST_DO(requireThatFusionIsWorking("dm", true, true));
+namespace {
- TEST_DONE();
+void clean_field_length_testdirs()
+{
+ vespalib::rmdir("fldump2", true);
+ vespalib::rmdir("fldump3", true);
+ vespalib::rmdir("fldump4", true);
}
}
+TEST_F(FusionTest, require_that_average_field_length_is_preserved)
+{
+ clean_field_length_testdirs();
+ make_empty_index("fldump2", MockFieldLengthInspector());
+ make_empty_index("fldump3", MyMockFieldLengthInspector());
+ merge_empty_indexes("fldump4", {"fldump2", "fldump3"});
+ DiskIndex disk_index("fldump4");
+ ASSERT_TRUE(disk_index.setup(TuneFileSearch()));
+ EXPECT_EQ(3.5, disk_index.get_field_length_info("f0").get_average_field_length());
+ clean_field_length_testdirs();
}
-TEST_APPHOOK(search::diskindex::Test);
+}
+
+}
+
+int
+main(int argc, char* argv[])
+{
+ if (argc > 0) {
+ search::index::DummyFileHeaderContext::setCreator(argv[0]);
+ }
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/searchlib/src/tests/features/bm25/bm25_test.cpp b/searchlib/src/tests/features/bm25/bm25_test.cpp
index 84bafcfa0ed..eb2f46650a6 100644
--- a/searchlib/src/tests/features/bm25/bm25_test.cpp
+++ b/searchlib/src/tests/features/bm25/bm25_test.cpp
@@ -11,6 +11,7 @@
using namespace search::features;
using namespace search::fef;
+using namespace search::fef::objectstore;
using CollectionType = FieldInfo::CollectionType;
using StringVector = std::vector<vespalib::string>;
@@ -40,12 +41,13 @@ struct Bm25BlueprintTest : public ::testing::Test {
EXPECT_FALSE(blueprint->setup(index_env, params));
}
- void expect_setup_succeed(const StringVector& params) {
+ Blueprint::SP expect_setup_succeed(const StringVector& params) {
auto blueprint = make_blueprint();
test::DummyDependencyHandler deps(*blueprint);
EXPECT_TRUE(blueprint->setup(index_env, params));
EXPECT_EQ(0, deps.input.size());
EXPECT_EQ(StringVector({"score"}), deps.output);
+ return blueprint;
}
};
@@ -63,6 +65,18 @@ TEST_F(Bm25BlueprintTest, blueprint_setup_fails_when_parameter_list_is_not_valid
expect_setup_fail({"is", "ia"}); // wrong parameter number
}
+TEST_F(Bm25BlueprintTest, blueprint_setup_fails_when_k1_param_is_malformed)
+{
+ index_env.getProperties().add("bm25(is).k1", "malformed");
+ expect_setup_fail({"is"});
+}
+
+TEST_F(Bm25BlueprintTest, blueprint_setup_fails_when_b_param_is_malformed)
+{
+ index_env.getProperties().add("bm25(is).b", "malformed");
+ expect_setup_fail({"is"});
+}
+
TEST_F(Bm25BlueprintTest, blueprint_setup_succeeds_for_index_field)
{
expect_setup_succeed({"is"});
@@ -70,11 +84,40 @@ TEST_F(Bm25BlueprintTest, blueprint_setup_succeeds_for_index_field)
expect_setup_succeed({"iws"});
}
+TEST_F(Bm25BlueprintTest, blueprint_can_prepare_shared_state_with_average_field_length)
+{
+ auto blueprint = expect_setup_succeed({"is"});
+ test::QueryEnvironment query_env;
+ query_env.get_avg_field_lengths()["is"] = 10;
+ ObjectStore store;
+ blueprint->prepareSharedState(query_env, store);
+ EXPECT_DOUBLE_EQ(10, as_value<double>(*store.get("bm25.afl.is")));
+}
+
+struct Scorer {
+
+ double avg_field_length;
+ double k1_param;
+ double b_param;
+
+ Scorer() :
+ avg_field_length(10),
+ k1_param(1.2),
+ b_param(0.75)
+ {}
+
+ feature_t score(feature_t num_occs, feature_t field_length, double inverse_doc_freq) const {
+ return inverse_doc_freq * (num_occs * (1 + k1_param)) /
+ (num_occs + (k1_param * ((1 - b_param) + b_param * field_length / avg_field_length)));
+ }
+};
struct Bm25ExecutorTest : public ::testing::Test {
BlueprintFactory factory;
FtFeatureTest test;
test::MatchDataBuilder::UP match_data;
+ Scorer scorer;
+ static constexpr uint32_t total_doc_count = 100;
Bm25ExecutorTest()
: factory(),
@@ -84,19 +127,24 @@ struct Bm25ExecutorTest : public ::testing::Test {
setup_search_features(factory);
test.getIndexEnv().getBuilder().addField(FieldType::INDEX, CollectionType::SINGLE, "foo");
test.getIndexEnv().getBuilder().addField(FieldType::INDEX, CollectionType::SINGLE, "bar");
- test.getQueryEnv().getBuilder().addIndexNode({"foo"});
- test.getQueryEnv().getBuilder().addIndexNode({"foo"});
- test.getQueryEnv().getBuilder().addIndexNode({"bar"});
-
+ add_query_term("foo", 25);
+ add_query_term("foo", 35);
+ add_query_term("bar", 45);
+ test.getQueryEnv().getBuilder().set_avg_field_length("foo", 10);
+ }
+ void add_query_term(const vespalib::string& field_name, uint32_t matching_doc_count) {
+ auto* term = test.getQueryEnv().getBuilder().addIndexNode({field_name});
+ term->field(0).setDocFreq(matching_doc_count, total_doc_count);
+ }
+ void setup() {
EXPECT_TRUE(test.setup());
-
match_data = test.createMatchDataBuilder();
clear_term(0, 0);
clear_term(1, 0);
clear_term(2, 1);
}
bool execute(feature_t exp_score) {
- return test.execute(exp_score);
+ return test.execute(exp_score, 0.000001);
}
void clear_term(uint32_t term_id, uint32_t field_id) {
auto* tfmd = match_data->getTermFieldMatchData(term_id, field_id);
@@ -111,35 +159,81 @@ struct Bm25ExecutorTest : public ::testing::Test {
tfmd->setFieldLength(field_length);
}
- feature_t get_score(feature_t num_occs, feature_t field_length) const {
- return (num_occs * 2.2) / (num_occs + (1.2 * (0.25 + 0.75 * field_length / 10.0)));
+ double idf(uint32_t matching_doc_count) const {
+ return Bm25Executor::calculate_inverse_document_frequency(matching_doc_count, total_doc_count);
+ }
+
+ feature_t score(feature_t num_occs, feature_t field_length, double inverse_doc_freq) const {
+ return scorer.score(num_occs, field_length, inverse_doc_freq);
}
};
TEST_F(Bm25ExecutorTest, score_is_calculated_for_a_single_term)
{
+ setup();
prepare_term(0, 0, 3, 20);
- EXPECT_TRUE(execute(get_score(3.0, 20)));
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25))));
}
TEST_F(Bm25ExecutorTest, score_is_calculated_for_multiple_terms)
{
+ setup();
prepare_term(0, 0, 3, 20);
prepare_term(1, 0, 7, 5);
- EXPECT_TRUE(execute(get_score(3.0, 20) + get_score(7.0, 5.0)));
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25)) + score(7.0, 5.0, idf(35))));
}
TEST_F(Bm25ExecutorTest, term_that_does_not_match_document_is_ignored)
{
+ setup();
prepare_term(0, 0, 3, 20);
- prepare_term(1, 0, 7, 5, 123);
- EXPECT_TRUE(execute(get_score(3.0, 20)));
+ uint32_t unmatched_doc_id = 123;
+ prepare_term(1, 0, 7, 5, unmatched_doc_id);
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25))));
}
TEST_F(Bm25ExecutorTest, term_searching_another_field_is_ignored)
{
+ setup();
prepare_term(2, 1, 3, 20);
EXPECT_TRUE(execute(0.0));
}
+TEST_F(Bm25ExecutorTest, uses_average_field_length_from_shared_state_if_found)
+{
+ test.getQueryEnv().getObjectStore().add("bm25.afl.foo", std::make_unique<AnyWrapper<double>>(15));
+ setup();
+ prepare_term(0, 0, 3, 20);
+ scorer.avg_field_length = 15;
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25))));
+}
+
+TEST_F(Bm25ExecutorTest, calculates_inverse_document_frequency)
+{
+ EXPECT_DOUBLE_EQ(std::log(1 + (99 + 0.5) / (1 + 0.5)),
+ Bm25Executor::calculate_inverse_document_frequency(1, 100));
+ EXPECT_DOUBLE_EQ(std::log(1 + (60 + 0.5) / (40 + 0.5)),
+ Bm25Executor::calculate_inverse_document_frequency(40, 100));
+ EXPECT_DOUBLE_EQ(std::log(1 + (0.5) / (100 + 0.5)),
+ Bm25Executor::calculate_inverse_document_frequency(100, 100));
+}
+
+TEST_F(Bm25ExecutorTest, k1_param_can_be_overriden)
+{
+ test.getIndexEnv().getProperties().add("bm25(foo).k1", "2.5");
+ setup();
+ prepare_term(0, 0, 3, 20);
+ scorer.k1_param = 2.5;
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25))));
+}
+
+TEST_F(Bm25ExecutorTest, b_param_can_be_overriden)
+{
+ test.getIndexEnv().getProperties().add("bm25(foo).b", "0.9");
+ setup();
+ prepare_term(0, 0, 3, 20);
+ scorer.b_param = 0.9;
+ EXPECT_TRUE(execute(score(3.0, 20, idf(25))));
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/features/imported_dot_product/imported_dot_product_test.cpp b/searchlib/src/tests/features/imported_dot_product/imported_dot_product_test.cpp
index bbd6d8e61b3..6ef680e0505 100644
--- a/searchlib/src/tests/features/imported_dot_product/imported_dot_product_test.cpp
+++ b/searchlib/src/tests/features/imported_dot_product/imported_dot_product_test.cpp
@@ -108,9 +108,9 @@ struct ArrayFixture : FixtureBase {
}
template <typename ExpectedType>
- void check_prepare_state_output(const vespalib::tensor::Tensor & tensor, vespalib::tensor::SerializeFormat format, const ExpectedType & expected) {
+ void check_prepare_state_output(const vespalib::tensor::Tensor & tensor, const ExpectedType & expected) {
vespalib::nbostream os;
- vespalib::tensor::TypedBinaryFormat::serialize(os, tensor, format);
+ vespalib::tensor::TypedBinaryFormat::serialize(os, tensor);
vespalib::string input_vector(os.c_str(), os.size());
check_prepare_state_output(".tensor", input_vector, expected);
}
@@ -197,26 +197,26 @@ TEST_F("prepareSharedState emits double vector for double imported attribute", A
TEST_F("prepareSharedState handles tensor as float from tensor for double imported attribute", ArrayFixture) {
f.setup_float_mappings(BasicType::DOUBLE);
- vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor(x[3])"), {10.1, 20.2, 30.3});
- f.template check_prepare_state_output(tensor, vespalib::tensor::SerializeFormat::FLOAT, dotproduct::ArrayParam<double>({10.1, 20.2, 30.3}));
+ vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor<float>(x[3])"), {10.1, 20.2, 30.3});
+ f.template check_prepare_state_output(tensor, dotproduct::ArrayParam<double>({10.1, 20.2, 30.3}));
}
TEST_F("prepareSharedState handles tensor as double from tensor for double imported attribute", ArrayFixture) {
f.setup_float_mappings(BasicType::DOUBLE);
vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor(x[3])"), {10.1, 20.2, 30.3});
- f.template check_prepare_state_output(tensor, vespalib::tensor::SerializeFormat::DOUBLE, dotproduct::ArrayParam<double>({10.1, 20.2, 30.3}));
+ f.template check_prepare_state_output(tensor, dotproduct::ArrayParam<double>({10.1, 20.2, 30.3}));
}
TEST_F("prepareSharedState handles tensor as float from tensor for float imported attribute", ArrayFixture) {
f.setup_float_mappings(BasicType::FLOAT);
- vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor(x[3])"), {10.1, 20.2, 30.3});
- f.template check_prepare_state_output(tensor, vespalib::tensor::SerializeFormat::FLOAT, dotproduct::ArrayParam<float>({10.1, 20.2, 30.3}));
+ vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor<float>(x[3])"), {10.1, 20.2, 30.3});
+ f.template check_prepare_state_output(tensor, dotproduct::ArrayParam<float>({10.1, 20.2, 30.3}));
}
TEST_F("prepareSharedState handles tensor as double from tensor for float imported attribute", ArrayFixture) {
f.setup_float_mappings(BasicType::FLOAT);
vespalib::tensor::DenseTensor tensor(vespalib::eval::ValueType::from_spec("tensor(x[3])"), {10.1, 20.2, 30.3});
- f.template check_prepare_state_output(tensor, vespalib::tensor::SerializeFormat::DOUBLE, dotproduct::ArrayParam<float>({10.1, 20.2, 30.3}));
+ f.template check_prepare_state_output(tensor, dotproduct::ArrayParam<float>({10.1, 20.2, 30.3}));
}
TEST_F("Dense i32/i64 array dot product can be evaluated with pre-parsed object parameter", ArrayFixture) {
diff --git a/searchlib/src/tests/features/prod_features.cpp b/searchlib/src/tests/features/prod_features.cpp
index 626a470cb5c..70250b05bf1 100644
--- a/searchlib/src/tests/features/prod_features.cpp
+++ b/searchlib/src/tests/features/prod_features.cpp
@@ -1968,8 +1968,10 @@ Test::testTerm()
.addField(FieldType::INDEX, CollectionType::SINGLE, "idx2") // field 1
.addField(FieldType::ATTRIBUTE, CollectionType::SINGLE, "attr"); // field 2
ft.getQueryEnv().getBuilder().addAllFields().setUniqueId(0);
- ft.getQueryEnv().getBuilder().addAllFields().setUniqueId(1).setWeight(search::query::Weight(200)).lookupField(0)->setDocFreq(0.5);
- ft.getQueryEnv().getBuilder().addAttributeNode("attr")->setUniqueId(2).setWeight(search::query::Weight(400)).lookupField(2)->setDocFreq(0.25);
+ ft.getQueryEnv().getBuilder().addAllFields().setUniqueId(1)
+ .setWeight(search::query::Weight(200)).lookupField(0)->setDocFreq(50, 100);
+ ft.getQueryEnv().getBuilder().addAttributeNode("attr")->setUniqueId(2)
+ .setWeight(search::query::Weight(400)).lookupField(2)->setDocFreq(25, 100);
// setup connectedness between term 1 and term 0
ft.getQueryEnv().getProperties().add("vespa.term.1.connexity", "0");
ft.getQueryEnv().getProperties().add("vespa.term.1.connexity", "0.7");
diff --git a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
index 9ed94c02287..3a0c334fbba 100644
--- a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
+++ b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp
@@ -50,7 +50,7 @@ void testSetup(State &state) {
{
int i = 1;
for (SFR iter(state.term); iter.valid(); iter.next()) {
- iter.get().setDocFreq(0.25 * i++);
+ iter.get().setDocFreq(25 * i++, 100);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
index 3e260e8453a..75a0e4b8c71 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
@@ -25,24 +25,28 @@ SingleBoolAttribute::~SingleBoolAttribute()
getGenerationHolder().clearHoldLists();
}
-bool
-SingleBoolAttribute::addDoc(DocId & doc) {
- size_t needSize = getNumDocs() + 1;
- bool incGen = false;
- if (_bv.capacity() < needSize) {
+void
+SingleBoolAttribute::ensureRoom(DocId docIdLimit) {
+ if (_bv.capacity() < docIdLimit) {
const GrowStrategy & gs = this->getConfig().getGrowStrategy();
- uint32_t newSize = needSize + (needSize * gs.getDocsGrowFactor()) + gs.getDocsGrowDelta();
- incGen = _bv.reserve(newSize);
+ uint32_t newSize = docIdLimit + (docIdLimit * gs.getDocsGrowFactor()) + gs.getDocsGrowDelta();
+ bool incGen = _bv.reserve(newSize);
+ if (incGen) {
+ incGeneration();
+ }
}
- incGen = _bv.extend(needSize) || incGen;
+}
+
+bool
+SingleBoolAttribute::addDoc(DocId & doc) {
+ DocId docIdLimit = getNumDocs()+1;
+ ensureRoom(docIdLimit);
+ bool incGen = _bv.extend(docIdLimit);
+ assert( ! incGen);
incNumDocs();
doc = getNumDocs() - 1;
updateUncommittedDocIdLimit(doc);
- if (incGen) {
- incGeneration();
- } else {
- removeAllOldGenerations();
- }
+ removeAllOldGenerations();
return true;
}
@@ -85,7 +89,7 @@ SingleBoolAttribute::onCommit() {
void
SingleBoolAttribute::onAddDocs(DocId docIdLimit) {
- _bv.reserve(docIdLimit);
+ ensureRoom(docIdLimit);
}
void
diff --git a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.h b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.h
index 789948838cb..20ec0b6d077 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.h
@@ -101,6 +101,7 @@ protected:
return false;
}
private:
+ void ensureRoom(DocId docIdLimit);
int8_t getFromEnum(EnumHandle) const override {
return 0;
}
diff --git a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
index dcee48aed1a..3de0c1f1320 100644
--- a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
@@ -9,14 +9,22 @@ using vespalib::GenerationHeldBase;
using vespalib::GenerationHeldAlloc;
using vespalib::GenerationHolder;
-//////////////////////////////////////////////////////////////////////
-// Parameterized Constructor
-//////////////////////////////////////////////////////////////////////
+namespace {
+
+size_t computeCapacity(size_t capacity, size_t allocatedBytes) {
+ size_t possibleCapacity = (allocatedBytes * 8) - 1;
+ assert(possibleCapacity >= capacity);
+ return possibleCapacity;
+}
+
+}
+
AllocatedBitVector::AllocatedBitVector(Index numberOfElements) :
BitVector(),
_capacityBits(numberOfElements),
_alloc(allocatePaddedAndAligned(numberOfElements))
{
+ _capacityBits = computeCapacity(_capacityBits, _alloc.size());
init(_alloc.get(), 0, numberOfElements);
clear();
}
@@ -33,6 +41,7 @@ AllocatedBitVector::AllocatedBitVector(Index numberOfElements, Index capacityBit
_capacityBits(capacityBits),
_alloc(allocatePaddedAndAligned(0, numberOfElements, capacityBits))
{
+ _capacityBits = computeCapacity(_capacityBits, _alloc.size());
init(_alloc.get(), 0, numberOfElements);
clear();
if (rhsSize > 0) {
@@ -58,6 +67,7 @@ AllocatedBitVector::AllocatedBitVector(const BitVector & rhs, Index capacity_) :
_capacityBits(capacity_),
_alloc(allocatePaddedAndAligned(0, rhs.size(), capacity_))
{
+ _capacityBits = computeCapacity(_capacityBits, _alloc.size());
memcpy(_alloc.get(), rhs.getStart(), rhs.sizeBytes());
init(_alloc.get(), 0, rhs.size());
}
@@ -65,7 +75,7 @@ AllocatedBitVector::AllocatedBitVector(const BitVector & rhs, Index capacity_) :
//////////////////////////////////////////////////////////////////////
// Destructor
//////////////////////////////////////////////////////////////////////
-AllocatedBitVector::~AllocatedBitVector() { }
+AllocatedBitVector::~AllocatedBitVector() = default;
void
AllocatedBitVector::cleanup()
@@ -78,8 +88,8 @@ AllocatedBitVector::cleanup()
void
AllocatedBitVector::resize(Index newLength)
{
- _capacityBits = newLength;
- _alloc = allocatePaddedAndAligned(_capacityBits);
+ _alloc = allocatePaddedAndAligned(newLength);
+ _capacityBits = computeCapacity(newLength, _alloc.size());
init(_alloc.get(), 0, newLength);
clear();
}
diff --git a/searchlib/src/vespa/searchlib/common/allocatedbitvector.h b/searchlib/src/vespa/searchlib/common/allocatedbitvector.h
index 6de255c48c9..c52c52354a1 100644
--- a/searchlib/src/vespa/searchlib/common/allocatedbitvector.h
+++ b/searchlib/src/vespa/searchlib/common/allocatedbitvector.h
@@ -32,7 +32,7 @@ public:
AllocatedBitVector(Index numberOfElements, Alloc buffer, size_t offset);
/**
- * Creates a new bitvector with room for numberOfElements bits.
+ * Creates a new bitvector with size of numberOfElements bits and at least a capacity of capacity.
* Copies what it can from the original vector. This is used for extending vector.
*/
AllocatedBitVector(Index numberOfElements, Index capacity, const void * rhsBuf, size_t rhsSize);
diff --git a/searchlib/src/vespa/searchlib/common/bitvector.cpp b/searchlib/src/vespa/searchlib/common/bitvector.cpp
index 2c45bc8f69a..7296842f2c1 100644
--- a/searchlib/src/vespa/searchlib/common/bitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvector.cpp
@@ -52,7 +52,7 @@ BitVector::allocatePaddedAndAligned(Index start, Index end, Index capacity)
assert(alloc.size()/sizeof(Word) >= words);
// Clear padding
size_t usedBytes = numBytes(end - start);
- memset(static_cast<char *>(alloc.get()) + usedBytes, 0, sz - usedBytes);
+ memset(static_cast<char *>(alloc.get()) + usedBytes, 0, alloc.size() - usedBytes);
return alloc;
}
diff --git a/searchlib/src/vespa/searchlib/common/growablebitvector.h b/searchlib/src/vespa/searchlib/common/growablebitvector.h
index ff5d878063d..e13e3b42e3d 100644
--- a/searchlib/src/vespa/searchlib/common/growablebitvector.h
+++ b/searchlib/src/vespa/searchlib/common/growablebitvector.h
@@ -9,8 +9,7 @@ namespace search {
class GrowableBitVector : public AllocatedBitVector
{
public:
- GrowableBitVector(Index newSize, Index newCapacity,
- GenerationHolder &generationHolder);
+ GrowableBitVector(Index newSize, Index newCapacity, GenerationHolder &generationHolder);
/** Will return true if a a buffer is held */
bool reserve(Index newCapacity);
diff --git a/searchlib/src/vespa/searchlib/common/partialbitvector.cpp b/searchlib/src/vespa/searchlib/common/partialbitvector.cpp
index e57396c0dfa..e1dc144541e 100644
--- a/searchlib/src/vespa/searchlib/common/partialbitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/partialbitvector.cpp
@@ -29,6 +29,6 @@ PartialBitVector::PartialBitVector(const BitVector & org, Index start, Index end
setBit(size());
}
-PartialBitVector::~PartialBitVector() { }
+PartialBitVector::~PartialBitVector() = default;
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index f17f9459ff9..46fcdafc585 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -91,7 +91,7 @@ void
LogDataStore::updateSerialNum()
{
LockGuard guard(_updateLock);
- if (getPrevActive(guard) != NULL) {
+ if (getPrevActive(guard) != nullptr) {
if (getActive(guard).getSerialNum() <
getPrevActive(guard)->getLastPersistedSerialNum()) {
getActive(guard).setSerialNum(getPrevActive(guard)->getLastPersistedSerialNum());
@@ -234,7 +234,7 @@ LogDataStore::lastSyncToken() const
uint64_t lastSerial(getActive(guard).getLastPersistedSerialNum());
if (lastSerial == 0) {
const FileChunk * prev = getPrevActive(guard);
- if (prev != NULL) {
+ if (prev != nullptr) {
lastSerial = prev->getLastPersistedSerialNum();
}
}
@@ -274,7 +274,7 @@ LogDataStore::remove(uint64_t serialNum, uint32_t lid)
if (lm.valid()) {
_fileChunks[lm.getFileId()]->remove(lid, lm.size());
}
- lm = getActive(guard).append(serialNum, lid, NULL, 0);
+ lm = getActive(guard).append(serialNum, lid, nullptr, 0);
assert( lm.empty() );
_lidInfo[lid] = lm;
}
@@ -327,7 +327,7 @@ LogDataStore::getMaxCompactGain() const
void
LogDataStore::flush(uint64_t syncToken)
{
- WriteableFileChunk * active = NULL;
+ WriteableFileChunk * active = nullptr;
std::unique_ptr<FileChunkHolder> activeHolder;
assert(syncToken == _initFlushSyncToken);
{
@@ -604,7 +604,7 @@ LogDataStore::getDiskBloat() const
/// Do not count the holes in the last file as bloat
if (i != _active) {
const FileChunk * chunk = _fileChunks[i.getId()].get();
- if (chunk != NULL) {
+ if (chunk != nullptr) {
sz += chunk->getDiskBloat();
}
}
@@ -916,7 +916,7 @@ LogDataStore::scanDir(const vespalib::string &dir, const vespalib::string &suffi
if (file.size() > suffix.size() &&
file.find(suffix.c_str()) == file.size() - suffix.size()) {
vespalib::string base(file.substr(0, file.find(suffix.c_str())));
- char *err(NULL);
+ char *err(nullptr);
errno = 0;
NameId baseId(strtoul(base.c_str(), &err, 10));
if ((errno == 0) && (err[0] == '\0')) {
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
index c4d1e8bbdb4..4ab747d115d 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
@@ -89,7 +89,7 @@ public:
const search::common::FileHeaderContext &fileHeaderContext,
transactionlog::SyncProxy &tlSyncer, const IBucketizer::SP & bucketizer, bool readOnly = false);
- ~LogDataStore();
+ ~LogDataStore() override;
// Implements IDataStore API
ssize_t read(uint32_t lid, vespalib::DataBuffer & buffer) const override;
@@ -220,7 +220,7 @@ private:
const FileChunk * getPrevActive(const LockGuard & guard) const {
assert(guard.locks(_updateLock));
(void) guard;
- return ( !_prevActive.isActive() ) ? _fileChunks[_prevActive.getId()].get() : NULL;
+ return ( !_prevActive.isActive() ) ? _fileChunks[_prevActive.getId()].get() : nullptr;
}
void setActive(const LockGuard & guard, FileId fileId) {
assert(guard.locks(_updateLock));
diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp
index 91f5c37b817..50517cf09e2 100644
--- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp
@@ -3,7 +3,7 @@
#include "writeablefilechunk.h"
#include "data_store_file_chunk_stats.h"
#include "summaryexceptions.h"
-#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/vespalib/data/databuffer.h>
@@ -14,8 +14,7 @@
#include <vespa/log/log.h>
LOG_SETUP(".search.writeablefilechunk");
-using vespalib::makeTask;
-using vespalib::makeClosure;
+using vespalib::makeLambdaTask;
using vespalib::FileHeader;
using vespalib::make_string;
using vespalib::LockGuard;
@@ -45,7 +44,6 @@ class PendingChunk
uint64_t _dataOffset;
uint32_t _dataLen;
public:
- typedef std::shared_ptr<PendingChunk> SP;
PendingChunk(uint64_t lastSerial, uint64_t dataOffset, uint32_t dataLen);
~PendingChunk();
vespalib::nbostream & getSerializedIdx() { return _idx; }
@@ -59,7 +57,6 @@ public:
class ProcessedChunk
{
public:
- typedef std::unique_ptr<ProcessedChunk> UP;
ProcessedChunk(uint32_t chunkId, uint32_t alignment)
: _chunkId(chunkId),
_payLoad(0),
@@ -77,7 +74,7 @@ private:
};
WriteableFileChunk::
-WriteableFileChunk(vespalib::ThreadExecutor &executor,
+WriteableFileChunk(vespalib::Executor &executor,
FileId fileId, NameId nameId,
const vespalib::string &baseName,
SerialNum initialSerialNum,
@@ -155,6 +152,7 @@ WriteableFileChunk::openIdx() {
}
return file;
}
+
WriteableFileChunk::~WriteableFileChunk()
{
if (!frozen()) {
@@ -177,7 +175,7 @@ WriteableFileChunk::updateLidMap(const LockGuard &guard, ISetLid &ds, uint64_t s
{
size_t sz = FileChunk::updateLidMap(guard, ds, serialNum, docIdLimit);
_nextChunkId = _chunkInfo.size();
- _active.reset( new Chunk(_nextChunkId++, Chunk::Config(_config.getMaxChunkBytes())));
+ _active = std::make_unique<Chunk>(_nextChunkId++, Chunk::Config(_config.getMaxChunkBytes()));
_serialNum = getLastPersistedSerialNum();
_firstChunkIdToBeWritten = _active->getId();
setDiskFootprint(0);
@@ -188,7 +186,7 @@ WriteableFileChunk::updateLidMap(const LockGuard &guard, ISetLid &ds, uint64_t s
void
WriteableFileChunk::restart(uint32_t nextChunkId)
{
- _executor.execute(makeTask(makeClosure(this, &WriteableFileChunk::fileWriter, nextChunkId)));
+ _executor.execute(makeLambdaTask([this, nextChunkId] {fileWriter(nextChunkId);}));
}
namespace {
@@ -219,7 +217,7 @@ WriteableFileChunk::read(LidInfoWithLidV::const_iterator begin, size_t count, IB
const LidInfoWithLid & li = *(begin + i);
uint32_t chunk = li.getChunkId();
if ((chunk >= _chunkInfo.size()) || !_chunkInfo[chunk].valid()) {
- ChunkMap::const_iterator found = _chunkMap.find(chunk);
+ auto found = _chunkMap.find(chunk);
vespalib::ConstBufferRef buffer;
if (found != _chunkMap.end()) {
buffer = found->second->getLid(li.getLid());
@@ -234,8 +232,8 @@ WriteableFileChunk::read(LidInfoWithLidV::const_iterator begin, size_t count, IB
}
}
for (auto & it : chunksOnFile) {
- LidInfoWithLidV::const_iterator first = find_first(begin, it.first);
- LidInfoWithLidV::const_iterator last = seek_past(first, begin + count, it.first);
+ auto first = find_first(begin, it.first);
+ auto last = seek_past(first, begin + count, it.first);
FileChunk::read(first, last - first, it.second, visitor);
}
} else {
@@ -250,7 +248,7 @@ WriteableFileChunk::read(uint32_t lid, SubChunkId chunkId, vespalib::DataBuffer
if (!frozen()) {
LockGuard guard(_lock);
if ((chunkId >= _chunkInfo.size()) || !_chunkInfo[chunkId].valid()) {
- ChunkMap::const_iterator found = _chunkMap.find(chunkId);
+ auto found = _chunkMap.find(chunkId);
if (found != _chunkMap.end()) {
return found->second->read(lid, buffer);
} else {
@@ -268,13 +266,13 @@ WriteableFileChunk::read(uint32_t lid, SubChunkId chunkId, vespalib::DataBuffer
void
WriteableFileChunk::internalFlush(uint32_t chunkId, uint64_t serialNum)
{
- Chunk * active(NULL);
+ Chunk * active(nullptr);
{
LockGuard guard(_lock);
active = _chunkMap[chunkId].get();
}
- ProcessedChunk::UP tmp(new ProcessedChunk(chunkId, _alignment));
+ auto tmp = std::make_unique<ProcessedChunk>(chunkId, _alignment);
if (_alignment > 1) {
tmp->getBuf().ensureFree(active->getMaxPackSize(_config.getCompression()) + _alignment - 1);
}
@@ -293,12 +291,12 @@ WriteableFileChunk::internalFlush(uint32_t chunkId, uint64_t serialNum)
}
void
-WriteableFileChunk::enque(ProcessedChunk::UP tmp)
+WriteableFileChunk::enque(ProcessedChunkUP tmp)
{
LOG(debug, "enqueing %p", tmp.get());
MonitorGuard guard(_writeMonitor);
_writeQ.push_back(std::move(tmp));
- if (_writeTaskIsRunning == false) {
+ if ( ! _writeTaskIsRunning) {
_writeTaskIsRunning = true;
uint32_t nextChunkId = _firstChunkIdToBeWritten;
guard.signal();
@@ -359,12 +357,12 @@ WriteableFileChunk::insertChunks(ProcessedChunkMap & orderedChunks, ProcessedChu
{
(void) nextChunkId;
for (auto &chunk : newChunks) {
- if (chunk.get() != 0) {
+ if (chunk) {
assert(chunk->getChunkId() >= nextChunkId);
assert(orderedChunks.find(chunk->getChunkId()) == orderedChunks.end());
orderedChunks[chunk->getChunkId()] = std::move(chunk);
} else {
- orderedChunks[std::numeric_limits<uint32_t>::max()] = ProcessedChunk::UP();
+ orderedChunks[std::numeric_limits<uint32_t>::max()] = ProcessedChunkUP();
}
}
}
@@ -375,7 +373,7 @@ WriteableFileChunk::fetchNextChain(ProcessedChunkMap & orderedChunks, const uint
ProcessedChunkQ chunks;
while (!orderedChunks.empty() &&
((orderedChunks.begin()->first == (firstChunkId+chunks.size())) ||
- (orderedChunks.begin()->second.get() == NULL)))
+ !orderedChunks.begin()->second))
{
chunks.push_back(std::move(orderedChunks.begin()->second));
orderedChunks.erase(orderedChunks.begin());
@@ -393,8 +391,7 @@ WriteableFileChunk::computeChunkMeta(const LockGuard & guard,
const ChunkMeta cmeta(offset, tmp.getPayLoad(), active.getLastSerial(), active.count());
assert((size_t(tmp.getBuf().getData())%_alignment) == 0);
assert((dataLen%_alignment) == 0);
- PendingChunk::SP pcsp;
- pcsp.reset(new PendingChunk(active.getLastSerial(), offset, dataLen));
+ auto pcsp = std::make_shared<PendingChunk>(active.getLastSerial(), offset, dataLen);
PendingChunk &pc(*pcsp.get());
nbostream &os(pc.getSerializedIdx());
cmeta.serialize(os);
@@ -424,8 +421,7 @@ WriteableFileChunk::computeChunkMeta(ProcessedChunkQ & chunks, size_t startPos,
LockGuard guard(_lock);
if (!_pendingChunks.empty()) {
- const PendingChunk::SP pcsp(_pendingChunks.back());
- const PendingChunk &pc(*pcsp.get());
+ const PendingChunk & pc = *_pendingChunks.back();
assert(pc.getLastSerial() >= lastSerial);
lastSerial = pc.getLastSerial();
}
@@ -454,7 +450,7 @@ WriteableFileChunk::writeData(const ProcessedChunkQ & chunks, size_t sz)
{
vespalib::DataBuffer buf(0ul, _alignment);
buf.ensureFree(sz);
- for (const ProcessedChunk::UP & chunk : chunks) {
+ for (const auto & chunk : chunks) {
buf.writeBytes(chunk->getBuf().getData(), chunk->getBuf().getDataLen());
}
@@ -540,15 +536,15 @@ WriteableFileChunk::freeze()
{
if (!frozen()) {
waitForAllChunksFlushedToDisk();
- enque(ProcessedChunk::UP());
- _executor.sync();
+ enque(ProcessedChunkUP());
{
MonitorGuard guard(_writeMonitor);
while (_writeTaskIsRunning) {
guard.wait(10);
}
- assert(_writeQ.empty());
}
+ assert(_writeQ.empty());
+ assert(_chunkMap.empty());
{
MonitorGuard guard(_lock);
setDiskFootprint(getDiskFootprint(guard));
@@ -632,7 +628,7 @@ int32_t WriteableFileChunk::flushLastIfNonEmpty(bool force)
chunkId = _active->getId();
_chunkMap[chunkId] = std::move(_active);
assert(_nextChunkId < LidInfo::getChunkIdLimit());
- _active.reset(new Chunk(_nextChunkId++, Chunk::Config(_config.getMaxChunkBytes())));
+ _active = std::make_unique<Chunk>(_nextChunkId++, Chunk::Config(_config.getMaxChunkBytes()));
}
return chunkId;
}
@@ -643,10 +639,7 @@ WriteableFileChunk::flush(bool block, uint64_t syncToken)
int32_t chunkId = flushLastIfNonEmpty(syncToken > _serialNum);
if (chunkId >= 0) {
setSerialNum(syncToken);
- _executor.execute(makeTask(makeClosure(this,
- &WriteableFileChunk::internalFlush,
- static_cast<uint32_t>(chunkId),
- _serialNum)));
+ _executor.execute(makeLambdaTask([this, chunkId, serialNum=_serialNum] { internalFlush(chunkId, serialNum); }));
} else {
if (block) {
MonitorGuard guard(_lock);
@@ -656,7 +649,6 @@ WriteableFileChunk::flush(bool block, uint64_t syncToken)
}
}
if (block) {
- _executor.sync();
waitForChunkFlushedToDisk(chunkId);
}
}
@@ -693,10 +685,7 @@ WriteableFileChunk::waitForAllChunksFlushedToDisk() const
}
LidInfo
-WriteableFileChunk::append(uint64_t serialNum,
- uint32_t lid,
- const void * buffer,
- size_t len)
+WriteableFileChunk::append(uint64_t serialNum, uint32_t lid, const void * buffer, size_t len)
{
assert( !frozen() );
if ( ! _active->hasRoom(len)) {
@@ -818,8 +807,7 @@ WriteableFileChunk::needFlushPendingChunks(const MonitorGuard & guard, uint64_t
assert(guard.monitors(_lock));
if (_pendingChunks.empty())
return false;
- const PendingChunk::SP pcsp(_pendingChunks.front());
- const PendingChunk &pc(*pcsp.get());
+ const PendingChunk & pc = *_pendingChunks.front();
if (pc.getLastSerial() > serialNum)
return false;
bool datWritten = datFileLen >= pc.getDataOffset() + pc.getDataLen();
@@ -868,8 +856,7 @@ WriteableFileChunk::unconditionallyFlushPendingChunks(const vespalib::LockGuard
for (;;) {
if (!needFlushPendingChunks(guard, serialNum, datFileLen))
break;
- PendingChunk::SP pcsp;
- pcsp.swap(_pendingChunks.front());
+ std::shared_ptr<PendingChunk> pcsp = std::move(_pendingChunks.front());
_pendingChunks.pop_front();
const PendingChunk &pc(*pcsp.get());
assert(_pendingIdx >= pc.getIdxLen());
diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
index 4a2ebfc42df..2c300bc9035 100644
--- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
+++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
@@ -3,7 +3,7 @@
#pragma once
#include "filechunk.h"
-#include <vespa/vespalib/util/threadexecutor.h>
+#include <vespa/vespalib/util/executor.h>
#include <vespa/searchlib/transactionlog/syncproxy.h>
#include <vespa/fastos/file.h>
#include <map>
@@ -42,7 +42,7 @@ public:
public:
typedef std::unique_ptr<WriteableFileChunk> UP;
- WriteableFileChunk(vespalib::ThreadExecutor & executor, FileId fileId, NameId nameId,
+ WriteableFileChunk(vespalib::Executor & executor, FileId fileId, NameId nameId,
const vespalib::string & baseName, uint64_t initialSerialNum,
uint32_t docIdLimit, const Config & config,
const TuneFileSummary &tune, const common::FileHeaderContext &fileHeaderContext,
@@ -128,7 +128,7 @@ private:
bool _writeTaskIsRunning;
vespalib::Monitor _writeMonitor;
ProcessedChunkQ _writeQ;
- vespalib::ThreadExecutor & _executor;
+ vespalib::Executor & _executor;
ProcessedChunkMap _orderedChunks;
BucketDensityComputer _bucketMap;
};
diff --git a/searchlib/src/vespa/searchlib/features/attributefeature.cpp b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
index 1e18a2d3af8..4fff5ae5b3f 100644
--- a/searchlib/src/vespa/searchlib/features/attributefeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
@@ -324,7 +324,7 @@ AttributeBlueprint::createInstance() const
}
#define CREATE_AND_RETURN_IF_SINGLE_NUMERIC(a, T) \
- if (dynamic_cast<const SingleValueNumericAttribute<T> *>(a) != NULL) { \
+ if (dynamic_cast<const SingleValueNumericAttribute<T> *>(a) != nullptr) { \
return stash.create<SingleAttributeExecutor<SingleValueNumericAttribute<T>>>(*static_cast<const SingleValueNumericAttribute<T> *>(a)); \
}
@@ -333,7 +333,7 @@ namespace {
fef::FeatureExecutor &
createAttributeExecutor(const IAttributeVector *attribute, const vespalib::string &attrName, const vespalib::string &extraParam, vespalib::Stash &stash)
{
- if (attribute == NULL) {
+ if (attribute == nullptr) {
LOG(warning, "The attribute vector '%s' was not found in the attribute manager, returning default values.",
attrName.c_str());
std::vector<feature_t> values(4, 0.0f);
@@ -382,7 +382,7 @@ createTensorAttributeExecutor(const IAttributeVector *attribute, const vespalib:
const ValueType &tensorType,
vespalib::Stash &stash)
{
- if (attribute == NULL) {
+ if (attribute == nullptr) {
LOG(warning, "The attribute vector '%s' was not found in the attribute manager."
" Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(tensorType, stash);
diff --git a/searchlib/src/vespa/searchlib/features/attributefeature.h b/searchlib/src/vespa/searchlib/features/attributefeature.h
index 47597823f08..e1e3ddf7300 100644
--- a/searchlib/src/vespa/searchlib/features/attributefeature.h
+++ b/searchlib/src/vespa/searchlib/features/attributefeature.h
@@ -22,7 +22,7 @@ private:
public:
AttributeBlueprint();
- ~AttributeBlueprint();
+ ~AttributeBlueprint() override;
void visitDumpFeatures(const fef::IIndexEnvironment & env, fef::IDumpFeatureVisitor & visitor) const override;
fef::Blueprint::UP createInstance() const override;
diff --git a/searchlib/src/vespa/searchlib/features/bm25_feature.cpp b/searchlib/src/vespa/searchlib/features/bm25_feature.cpp
index a9430db09c3..e89655a75bb 100644
--- a/searchlib/src/vespa/searchlib/features/bm25_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/bm25_feature.cpp
@@ -3,39 +3,58 @@
#include "bm25_feature.h"
#include <vespa/searchlib/fef/itermdata.h>
#include <vespa/searchlib/fef/itermfielddata.h>
+#include <vespa/searchlib/fef/objectstore.h>
+#include <vespa/searchlib/fef/properties.h>
+#include <cmath>
#include <memory>
+#include <stdexcept>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".features.bm25_feature");
namespace search::features {
+using fef::AnyWrapper;
using fef::Blueprint;
using fef::FeatureExecutor;
using fef::FieldInfo;
using fef::ITermData;
using fef::ITermFieldData;
using fef::MatchDataDetails;
+using fef::objectstore::as_value;
Bm25Executor::Bm25Executor(const fef::FieldInfo& field,
- const fef::IQueryEnvironment& env)
+ const fef::IQueryEnvironment& env,
+ double avg_field_length,
+ double k1_param,
+ double b_param)
: FeatureExecutor(),
_terms(),
- _avg_field_length(10),
- _k1_param(1.2),
- _b_param(0.75)
+ _avg_field_length(avg_field_length),
+ _k1_param(k1_param),
+ _b_param(b_param)
{
- // TODO: Don't use hard coded avg_field_length
- // TODO: Add support for setting k1 and b
for (size_t i = 0; i < env.getNumTerms(); ++i) {
const ITermData* term = env.getTerm(i);
for (size_t j = 0; j < term->numFields(); ++j) {
const ITermFieldData& term_field = term->field(j);
if (field.id() == term_field.getFieldId()) {
- // TODO: Add proper calculation of IDF
- _terms.emplace_back(term_field.getHandle(MatchDataDetails::Cheap), 1.0);
+ // TODO: Add support for using significance instead of default idf if specified in the query
+ _terms.emplace_back(term_field.getHandle(MatchDataDetails::Cheap),
+ calculate_inverse_document_frequency(term_field.get_matching_doc_count(),
+ term_field.get_total_doc_count()));
}
}
}
}
+double
+Bm25Executor::calculate_inverse_document_frequency(uint32_t matching_doc_count, uint32_t total_doc_count)
+{
+ return std::log(1 + (static_cast<double>(total_doc_count - matching_doc_count + 0.5) /
+ static_cast<double>(matching_doc_count + 0.5)));
+}
+
void
Bm25Executor::handle_bind_match_data(const fef::MatchData& match_data)
{
@@ -62,10 +81,31 @@ Bm25Executor::execute(uint32_t doc_id)
outputs().set_number(0, score);
}
+bool
+Bm25Blueprint::lookup_param(const fef::Properties& props, const vespalib::string& param, double& result) const
+{
+ vespalib::string key = getBaseName() + "(" + _field->name() + ")." + param;
+ auto value = props.lookup(key);
+ if (value.found()) {
+ try {
+ result = std::stod(value.get());
+ } catch (const std::invalid_argument& ex) {
+ LOG(warning, "Not able to convert rank property '%s': '%s' to a double value",
+ key.c_str(), value.get().c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+double constexpr default_k1_param = 1.2;
+double constexpr default_b_param = 0.75;
Bm25Blueprint::Bm25Blueprint()
: Blueprint("bm25"),
- _field(nullptr)
+ _field(nullptr),
+ _k1_param(default_k1_param),
+ _b_param(default_b_param)
{
}
@@ -89,14 +129,44 @@ Bm25Blueprint::setup(const fef::IIndexEnvironment& env, const fef::ParameterList
const auto& field_name = params[0].getValue();
_field = env.getFieldByName(field_name);
+ if (!lookup_param(env.getProperties(), "k1", _k1_param)) {
+ return false;
+ }
+ if (!lookup_param(env.getProperties(), "b", _b_param)) {
+ return false;
+ }
+
describeOutput("score", "The bm25 score for all terms searching in the given index field");
return (_field != nullptr);
}
+namespace {
+
+vespalib::string
+make_avg_field_length_key(const vespalib::string& base_name, const vespalib::string& field_name)
+{
+ return base_name + ".afl." + field_name;
+}
+
+}
+
+void
+Bm25Blueprint::prepareSharedState(const fef::IQueryEnvironment& env, fef::IObjectStore& store) const
+{
+ vespalib::string key = make_avg_field_length_key(getBaseName(), _field->name());
+ if (store.get(key) == nullptr) {
+ store.add(key, std::make_unique<AnyWrapper<double>>(env.get_average_field_length(_field->name())));
+ }
+}
+
fef::FeatureExecutor&
Bm25Blueprint::createExecutor(const fef::IQueryEnvironment& env, vespalib::Stash& stash) const
{
- return stash.create<Bm25Executor>(*_field, env);
+ const auto* lookup_result = env.getObjectStore().get(make_avg_field_length_key(getBaseName(), _field->name()));
+ double avg_field_length = lookup_result != nullptr ?
+ as_value<double>(*lookup_result) :
+ env.get_average_field_length(_field->name());
+ return stash.create<Bm25Executor>(*_field, env, avg_field_length, _k1_param, _b_param);
}
}
diff --git a/searchlib/src/vespa/searchlib/features/bm25_feature.h b/searchlib/src/vespa/searchlib/features/bm25_feature.h
index 457cfea4c87..533c7487a2f 100644
--- a/searchlib/src/vespa/searchlib/features/bm25_feature.h
+++ b/searchlib/src/vespa/searchlib/features/bm25_feature.h
@@ -30,7 +30,12 @@ private:
public:
Bm25Executor(const fef::FieldInfo& field,
- const fef::IQueryEnvironment& env);
+ const fef::IQueryEnvironment& env,
+ double avg_field_length,
+ double k1_param,
+ double b_param);
+
+ double static calculate_inverse_document_frequency(uint32_t matching_doc_count, uint32_t total_doc_count);
void handle_bind_match_data(const fef::MatchData& match_data) override;
void execute(uint32_t docId) override;
@@ -43,6 +48,10 @@ public:
class Bm25Blueprint : public fef::Blueprint {
private:
const fef::FieldInfo* _field;
+ double _k1_param;
+ double _b_param;
+
+ bool lookup_param(const fef::Properties& props, const vespalib::string& param, double& result) const;
public:
Bm25Blueprint();
@@ -53,6 +62,7 @@ public:
return fef::ParameterDescriptions().desc().indexField(fef::ParameterCollection::ANY);
}
bool setup(const fef::IIndexEnvironment& env, const fef::ParameterList& params) override;
+ void prepareSharedState(const fef::IQueryEnvironment& env, fef::IObjectStore& store) const override;
fef::FeatureExecutor& createExecutor(const fef::IQueryEnvironment& env, vespalib::Stash& stash) const override;
};
diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
index 1f51ee5cef6..1560d043be2 100644
--- a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
@@ -43,16 +43,32 @@ VectorBase<DimensionVType, DimensionHType, ComponentType, HashMapComparator>::sy
return *this;
}
-template VectorBase<int64_t, int64_t, double> & VectorBase<int64_t, int64_t, double>::syncMap();
+template class VectorBase<int64_t, int64_t, double>;
+template class VectorBase<uint32_t, uint32_t, double>;
+
+template class IntegerVectorT<int64_t>;
template <typename Vector, typename Buffer>
-DotProductExecutorByCopy<Vector, Buffer>::DotProductExecutorByCopy(const IAttributeVector * attribute, Vector queryVector) :
+DotProductExecutorByCopy<Vector, Buffer>::DotProductExecutorByCopy(const IAttributeVector * attribute, const Vector & queryVector) :
FeatureExecutor(),
_attribute(attribute),
- _queryVector(std::move(queryVector)),
- _end(_queryVector.syncMap().getDimMap().end()),
- _buffer()
+ _queryVector(queryVector),
+ _end(_queryVector.getDimMap().end()),
+ _buffer(),
+ _backing()
+{
+ _buffer.allocate(_attribute->getMaxValueCount());
+}
+
+template <typename Vector, typename Buffer>
+DotProductExecutorByCopy<Vector, Buffer>::DotProductExecutorByCopy(const IAttributeVector * attribute, std::unique_ptr<Vector> queryVector) :
+ FeatureExecutor(),
+ _attribute(attribute),
+ _queryVector(*queryVector),
+ _end(_queryVector.getDimMap().end()),
+ _buffer(),
+ _backing(std::move(queryVector))
{
_buffer.allocate(_attribute->getMaxValueCount());
}
@@ -79,10 +95,10 @@ StringVector::StringVector() = default;
StringVector::~StringVector() = default;
template <typename BaseType>
-DotProductExecutorBase<BaseType>::DotProductExecutorBase(V queryVector)
+DotProductExecutorBase<BaseType>::DotProductExecutorBase(const V & queryVector)
: FeatureExecutor(),
- _queryVector(std::move(queryVector)),
- _end(_queryVector.syncMap().getDimMap().end())
+ _queryVector(queryVector),
+ _end(_queryVector.getDimMap().end())
{
}
@@ -104,9 +120,18 @@ void DotProductExecutorBase<BaseType>::execute(uint32_t docId) {
}
template <typename A>
-DotProductExecutor<A>::DotProductExecutor(const A * attribute, V queryVector) :
- DotProductExecutorBase<typename A::BaseType>(std::move(queryVector)),
- _attribute(attribute)
+DotProductExecutor<A>::DotProductExecutor(const A * attribute, const V & queryVector) :
+ DotProductExecutorBase<typename A::BaseType>(queryVector),
+ _attribute(attribute),
+ _backing()
+{
+}
+
+template <typename A>
+DotProductExecutor<A>::DotProductExecutor(const A * attribute, std::unique_ptr<V> queryVector) :
+ DotProductExecutorBase<typename A::BaseType>(*queryVector),
+ _attribute(attribute),
+ _backing(std::move(queryVector))
{
}
@@ -127,19 +152,32 @@ public:
using V = VectorBase<EnumHandle, EnumHandle, feature_t>;
private:
const IWeightedIndexVector * _attribute;
- V _queryVector;
+ const V & _queryVector;
const typename V::HashMap::const_iterator _end;
+ std::unique_ptr<V> _backing;
public:
- DotProductExecutorByEnum(const IWeightedIndexVector * attribute, V queryVector);
+ DotProductExecutorByEnum(const IWeightedIndexVector * attribute, const V & queryVector);
+ DotProductExecutorByEnum(const IWeightedIndexVector * attribute, std::unique_ptr<V> queryVector);
~DotProductExecutorByEnum() override;
void execute(uint32_t docId) override;
};
-DotProductExecutorByEnum::DotProductExecutorByEnum(const IWeightedIndexVector * attribute, V queryVector)
+DotProductExecutorByEnum::DotProductExecutorByEnum(const IWeightedIndexVector * attribute, const V & queryVector)
+ : FeatureExecutor(),
+ _attribute(attribute),
+ _queryVector(queryVector),
+ _end(_queryVector.getDimMap().end()),
+ _backing()
+{
+}
+
+
+DotProductExecutorByEnum::DotProductExecutorByEnum(const IWeightedIndexVector * attribute, std::unique_ptr<V> queryVector)
: FeatureExecutor(),
_attribute(attribute),
- _queryVector(std::move(queryVector)),
- _end(_queryVector.syncMap().getDimMap().end())
+ _queryVector(*queryVector),
+ _end(_queryVector.getDimMap().end()),
+ _backing(std::move(queryVector))
{
}
@@ -351,51 +389,6 @@ size_t SparseDotProductByContentFillExecutor<BaseType>::getAttributeValues(uint3
}
-DotProductBlueprint::DotProductBlueprint() :
- Blueprint("dotProduct"),
- _defaultAttribute(),
- _queryVector()
-{ }
-
-DotProductBlueprint::~DotProductBlueprint() = default;
-
-vespalib::string
-DotProductBlueprint::getAttribute(const IQueryEnvironment & env) const
-{
- Property prop = env.getProperties().lookup(getBaseName(), _defaultAttribute + ".override.name");
- if (prop.found() && !prop.get().empty()) {
- return prop.get();
- }
- return _defaultAttribute;
-}
-
-void
-DotProductBlueprint::visitDumpFeatures(const IIndexEnvironment &, IDumpFeatureVisitor &) const
-{
-}
-
-bool
-DotProductBlueprint::setup(const IIndexEnvironment & env, const ParameterList & params)
-{
- _defaultAttribute = params[0].getValue();
- _queryVector = params[1].getValue();
- describeOutput("scalar", "The result after calculating the dot product of the vector represented by the weighted set "
- "and the vector sent down with the query");
- env.hintAttributeAccess(_defaultAttribute);
- return true;
-}
-
-ParameterDescriptions
-DotProductBlueprint::getDescriptions() const {
- return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::normalTypeSet(), ParameterCollection::ANY).string();
-}
-
-Blueprint::UP
-DotProductBlueprint::createInstance() const
-{
- return std::make_unique<DotProductBlueprint>();
-}
-
namespace {
template <typename T, typename AsT = T>
@@ -503,7 +496,7 @@ createForDirectArrayImpl(const IAttributeVector * attribute,
if (supportsGetRawValues<A,VT>(*iattr)) {
using ExactA = MultiValueNumericAttribute<A, VT>;
- const ExactA * exactA = dynamic_cast<const ExactA *>(iattr);
+ auto * exactA = dynamic_cast<const ExactA *>(iattr);
if (exactA != nullptr) {
return stash.create<dotproduct::array::DotProductExecutor<ExactA>>(exactA, values);
}
@@ -577,6 +570,35 @@ createForDirectArray(const IAttributeVector * attribute,
return createForDirectArrayImpl<A>(attribute, arguments.values, arguments.indexes, stash);
}
+template <typename A, typename V>
+FeatureExecutor &
+createForDirectWSetImpl(const IAttributeVector * attribute, V && vector, vespalib::Stash & stash)
+{
+ using namespace dotproduct::wset;
+ using T = typename A::BaseType;
+ const A * iattr = dynamic_cast<const A *>(attribute);
+ using VT = multivalue::WeightedValue<T>;
+ using ExactA = MultiValueNumericAttribute<A, VT>;
+ if (!attribute->isImported() && (iattr != nullptr) && supportsGetRawValues<A, VT>(*iattr)) {
+ auto * exactA = dynamic_cast<const ExactA *>(iattr);
+ if (exactA != nullptr) {
+ return stash.create<DotProductExecutor<ExactA>>(exactA, std::forward<V>(vector));
+ }
+ return stash.create<DotProductExecutor<A>>(iattr, std::forward<V>(vector));
+ }
+ return stash.create<DotProductExecutorByCopy<IntegerVectorT<T>, WeightedIntegerContent>>(attribute, std::forward<V>(vector));
+}
+
+template <typename T>
+FeatureExecutor &
+createForDirectIntegerWSet(const IAttributeVector * attribute, const dotproduct::wset::IntegerVectorT<T> & vector, vespalib::Stash & stash)
+{
+ using namespace dotproduct::wset;
+ return vector.empty()
+ ? stash.create<SingleZeroValueExecutor>()
+ : createForDirectWSetImpl<IntegerAttributeTemplate<T>>(attribute, vector, stash);
+}
+
FeatureExecutor &
createFromObject(const IAttributeVector * attribute, const fef::Anything & object, vespalib::Stash &stash)
{
@@ -609,6 +631,35 @@ createFromObject(const IAttributeVector * attribute, const fef::Anything & objec
break;
}
}
+ } else if (attribute->getCollectionType() == attribute::CollectionType::WSET) {
+ using namespace dotproduct::wset;
+ if (attribute->hasEnum()) {
+ const auto & vector = dynamic_cast<const EnumVector &>(object);
+ if (vector.empty()) {
+ return stash.create<SingleZeroValueExecutor>();
+ }
+ const auto * getEnumHandles = dynamic_cast<const IWeightedIndexVector *>(attribute);
+ if (supportsGetEnumHandles(getEnumHandles)) {
+ return stash.create<DotProductExecutorByEnum>(getEnumHandles, vector);
+ }
+ return stash.create<DotProductExecutorByCopy<EnumVector, WeightedEnumContent>>(attribute, vector);
+ } else {
+ if (attribute->isStringType()) {
+ const auto & vector = dynamic_cast<const StringVector &>(object);
+ if (vector.empty()) {
+ return stash.create<SingleZeroValueExecutor>();
+ }
+ return stash.create<DotProductExecutorByCopy<StringVector, WeightedConstCharContent>>(attribute, vector);
+ } else if (attribute->isIntegerType()) {
+ if (attribute->getBasicType() == BasicType::INT32) {
+ return createForDirectIntegerWSet<int32_t>(attribute, dynamic_cast<const IntegerVectorT<int32_t> &>(object), stash);
+ } else if (attribute->getBasicType() == BasicType::INT64) {
+ return createForDirectIntegerWSet<int64_t>(attribute, dynamic_cast<const IntegerVectorT<int64_t> &>(object), stash);
+ } else if (attribute->getBasicType() == BasicType::INT8) {
+ return createForDirectIntegerWSet<int8_t>(attribute, dynamic_cast<const IntegerVectorT<int8_t> &>(object), stash);
+ }
+ }
+ }
}
// TODO: Add support for creating executor for weighted set string / integer attribute
// where the query vector is represented as an object instead of a string.
@@ -654,60 +705,43 @@ createTypedArrayExecutor(const IAttributeVector * attribute, const Property & pr
return nullptr;
}
-template <typename A, typename V>
-FeatureExecutor *
-createForDirectWSetImpl(const IAttributeVector * attribute, V vector, vespalib::Stash & stash)
-{
- using namespace dotproduct::wset;
- using T = typename A::BaseType;
- const A * iattr = dynamic_cast<const A *>(attribute);
- using VT = multivalue::WeightedValue<T>;
- using ExactA = MultiValueNumericAttribute<A, VT>;
- if (!attribute->isImported() && (iattr != nullptr) && supportsGetRawValues<A, VT>(*iattr)) {
- const ExactA * exactA = dynamic_cast<const ExactA *>(iattr);
- if (exactA != nullptr) {
- return &stash.create<DotProductExecutor<ExactA>>(exactA, std::move(vector));
- }
- return &stash.create<DotProductExecutor<A>>(iattr, std::move(vector));
- }
- return &stash.create<DotProductExecutorByCopy<IntegerVectorT<T>, WeightedIntegerContent>>(attribute, std::move(vector));
-}
-
template <typename T>
-FeatureExecutor *
+FeatureExecutor &
createForDirectIntegerWSet(const IAttributeVector * attribute, const Property & prop, vespalib::Stash & stash)
{
using namespace dotproduct::wset;
- IntegerVectorT<T> vector;
- WeightedSetParser::parse(prop.get(), vector);
- return vector.empty()
- ? &stash.create<SingleZeroValueExecutor>()
+ auto vector = std::make_unique<IntegerVectorT<T>>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ return vector->empty()
+ ? stash.create<SingleZeroValueExecutor>()
: createForDirectWSetImpl<IntegerAttributeTemplate<T>>(attribute, std::move(vector), stash);
}
-
-FeatureExecutor *
+FeatureExecutor &
createTypedWsetExecutor(const IAttributeVector * attribute, const Property & prop, vespalib::Stash & stash) {
using namespace dotproduct::wset;
if (attribute->hasEnum()) {
- EnumVector vector(attribute);
- WeightedSetParser::parse(prop.get(), vector);
- if (vector.empty()) {
- return &stash.create<SingleZeroValueExecutor>();
+ auto vector = std::make_unique<EnumVector>(attribute);
+ WeightedSetParser::parse(prop.get(), *vector);
+ if (vector->empty()) {
+ return stash.create<SingleZeroValueExecutor>();
}
- const IWeightedIndexVector * getEnumHandles = dynamic_cast<const IWeightedIndexVector *>(attribute);
+ vector->syncMap();
+ auto * getEnumHandles = dynamic_cast<const IWeightedIndexVector *>(attribute);
if (supportsGetEnumHandles(getEnumHandles)) {
- return &stash.create<DotProductExecutorByEnum>(getEnumHandles, std::move(vector));
+ return stash.create<DotProductExecutorByEnum>(getEnumHandles, std::move(vector));
}
- return &stash.create<DotProductExecutorByCopy<EnumVector, WeightedEnumContent>>(attribute, std::move(vector));
+ return stash.create<DotProductExecutorByCopy<EnumVector, WeightedEnumContent>>(attribute, std::move(vector));
} else {
if (attribute->isStringType()) {
- StringVector vector;
- WeightedSetParser::parse(prop.get(), vector);
- if (vector.empty()) {
- return &stash.create<SingleZeroValueExecutor>();
+ auto vector = std::make_unique<StringVector>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ if (vector->empty()) {
+ return stash.create<SingleZeroValueExecutor>();
}
- return &stash.create<DotProductExecutorByCopy<StringVector, WeightedConstCharContent>>(attribute, std::move(vector));
+ vector->syncMap();
+ return stash.create<DotProductExecutorByCopy<StringVector, WeightedConstCharContent>>(attribute, std::move(vector));
} else if (attribute->isIntegerType()) {
if (attribute->getBasicType() == BasicType::INT32) {
return createForDirectIntegerWSet<int32_t>(attribute, prop, stash);
@@ -718,7 +752,7 @@ createTypedWsetExecutor(const IAttributeVector * attribute, const Property & pro
}
}
}
- return nullptr;
+ return stash.create<SingleZeroValueExecutor>();
}
FeatureExecutor &
@@ -726,7 +760,7 @@ createFromString(const IAttributeVector * attribute, const Property & prop, vesp
{
FeatureExecutor * executor = nullptr;
if (attribute->getCollectionType() == attribute::CollectionType::WSET) {
- executor = createTypedWsetExecutor(attribute, prop, stash);
+ executor = &createTypedWsetExecutor(attribute, prop, stash);
} else if (attribute->getCollectionType() == attribute::CollectionType::ARRAY) {
executor = createTypedArrayExecutor(attribute, prop, stash);
}
@@ -830,18 +864,35 @@ createQueryVector(const IQueryEnvironment & env, const IAttributeVector * attrib
Property prop = env.getProperties().lookup(baseName, queryVector);
if (prop.found() && !prop.get().empty()) {
if (attribute->isStringType() && attribute->hasEnum()) {
- dotproduct::wset::EnumVector vector(attribute);
- WeightedSetParser::parse(prop.get(), vector);
+ auto vector = std::make_unique<dotproduct::wset::EnumVector>(attribute);
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ arguments = std::move(vector);
} else if (attribute->isIntegerType()) {
if (attribute->hasEnum()) {
- dotproduct::wset::EnumVector vector(attribute);
- WeightedSetParser::parse(prop.get(), vector);
+ auto vector = std::make_unique<dotproduct::wset::EnumVector>(attribute);
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ arguments = std::move(vector);
} else {
- dotproduct::wset::IntegerVector vector;
- WeightedSetParser::parse(prop.get(), vector);
+ if (attribute->getBasicType() == BasicType::INT32) {
+ auto vector = std::make_unique<dotproduct::wset::IntegerVectorT<int32_t>>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ arguments = std::move(vector);
+ } else if (attribute->getBasicType() == BasicType::INT64) {
+ auto vector = std::make_unique<dotproduct::wset::IntegerVectorT<int64_t>>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ arguments = std::move(vector);
+ } else if (attribute->getBasicType() == BasicType::INT8) {
+ auto vector = std::make_unique<dotproduct::wset::IntegerVectorT<int8_t>>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ vector->syncMap();
+ arguments = std::move(vector);
+ }
}
}
- // TODO actually use the parsed output for wset operations!
}
}
return arguments;
@@ -849,19 +900,66 @@ createQueryVector(const IQueryEnvironment & env, const IAttributeVector * attrib
}
+DotProductBlueprint::DotProductBlueprint() :
+ Blueprint("dotProduct"),
+ _defaultAttribute(),
+ _queryVector(),
+ _attrKey(),
+ _queryVectorKey()
+{ }
+
+DotProductBlueprint::~DotProductBlueprint() = default;
+
+vespalib::string
+DotProductBlueprint::getAttribute(const IQueryEnvironment & env) const
+{
+ Property prop = env.getProperties().lookup(getBaseName(), _defaultAttribute + ".override.name");
+ if (prop.found() && !prop.get().empty()) {
+ return prop.get();
+ }
+ return _defaultAttribute;
+}
+
+void
+DotProductBlueprint::visitDumpFeatures(const IIndexEnvironment &, IDumpFeatureVisitor &) const
+{
+}
+
+bool
+DotProductBlueprint::setup(const IIndexEnvironment & env, const ParameterList & params)
+{
+ _defaultAttribute = params[0].getValue();
+ _queryVector = params[1].getValue();
+ _attrKey = make_attribute_key(getBaseName(), _defaultAttribute);
+ _queryVectorKey = make_queryvector_key(getBaseName(), _queryVector);
+ describeOutput("scalar", "The result after calculating the dot product of the vector represented by the weighted set "
+ "and the vector sent down with the query");
+ env.hintAttributeAccess(_defaultAttribute);
+ return true;
+}
+
+ParameterDescriptions
+DotProductBlueprint::getDescriptions() const {
+ return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::normalTypeSet(), ParameterCollection::ANY).string();
+}
+
+Blueprint::UP
+DotProductBlueprint::createInstance() const
+{
+ return std::make_unique<DotProductBlueprint>();
+}
+
void
DotProductBlueprint::prepareSharedState(const IQueryEnvironment & env, IObjectStore & store) const
{
- vespalib::string attributeKey = make_attribute_key(getBaseName(), _defaultAttribute);
- const IAttributeVector * attribute = lookupAndStoreAttribute(attributeKey, getAttribute(env), env, store);
+ const IAttributeVector * attribute = lookupAndStoreAttribute(_attrKey, getAttribute(env), env, store);
if (attribute == nullptr) return;
- vespalib::string queryVectorKey = make_queryvector_key(getBaseName(), _queryVector);
- const fef::Anything * queryVector = env.getObjectStore().get(queryVectorKey);
+ const fef::Anything * queryVector = env.getObjectStore().get(_queryVectorKey);
if (queryVector == nullptr) {
fef::Anything::UP arguments = createQueryVector(env, attribute, getBaseName(), _queryVector);
if (arguments) {
- store.add(queryVectorKey, std::move(arguments));
+ store.add(_queryVectorKey, std::move(arguments));
}
}
@@ -872,7 +970,7 @@ FeatureExecutor &
DotProductBlueprint::createExecutor(const IQueryEnvironment & env, vespalib::Stash &stash) const
{
// Doing it "manually" here to avoid looking up attribute override unless needed.
- const fef::Anything * attributeArg = env.getObjectStore().get(make_attribute_key(getBaseName(), _defaultAttribute));
+ const fef::Anything * attributeArg = env.getObjectStore().get(_attrKey);
const IAttributeVector * attribute = (attributeArg != nullptr)
? static_cast<const fef::AnyWrapper<const IAttributeVector *> *>(attributeArg)->getValue()
: env.getAttributeContext().getAttribute(getAttribute(env));
@@ -882,7 +980,7 @@ DotProductBlueprint::createExecutor(const IQueryEnvironment & env, vespalib::Sta
return stash.create<SingleZeroValueExecutor>();
}
attribute = upgradeIfNecessary(attribute, env);
- const fef::Anything * queryVectorArg = env.getObjectStore().get(make_queryvector_key(getBaseName(), _queryVector));
+ const fef::Anything * queryVectorArg = env.getObjectStore().get(_queryVectorKey);
if (queryVectorArg != nullptr) {
return createFromObject(attribute, *queryVectorArg, stash);
} else {
diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.h b/searchlib/src/vespa/searchlib/features/dotproductfeature.h
index 94c72233c4b..d315a24ecb3 100644
--- a/searchlib/src/vespa/searchlib/features/dotproductfeature.h
+++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.h
@@ -45,7 +45,7 @@ struct ArrayParam : public fef::Anything {
namespace wset {
template <typename DimensionVType, typename DimensionHType, typename ComponentType, typename HashMapComparator = std::equal_to<DimensionHType> >
-class VectorBase {
+class VectorBase : public fef::Anything {
public:
typedef std::pair<DimensionVType, ComponentType> Element; // <dimension, component>
typedef std::vector<Element> Vector;
@@ -75,6 +75,10 @@ public:
}
};
+extern template class VectorBase<int64_t, int64_t, double>;
+extern template class VectorBase<uint32_t, uint32_t, double>;
+extern template class IntegerVectorT<int64_t>;
+
using IntegerVector = IntegerVectorT<int64_t>;
/**
@@ -118,11 +122,11 @@ public:
using AT = multivalue::WeightedValue<BaseType>;
using V = VectorBase<BaseType, BaseType, feature_t>;
private:
- V _queryVector;
- const typename V::HashMap::const_iterator _end;
+ const V & _queryVector;
+ const typename V::HashMap::const_iterator _end;
virtual size_t getAttributeValues(uint32_t docid, const AT * & count) = 0;
public:
- DotProductExecutorBase(V queryVector);
+ DotProductExecutorBase(const V & queryVector);
~DotProductExecutorBase() override;
void execute(uint32_t docId) override;
};
@@ -135,9 +139,11 @@ public:
protected:
const A * _attribute;
private:
+ std::unique_ptr<V> _backing;
size_t getAttributeValues(uint32_t docid, const AT * & count) override;
public:
- DotProductExecutor(const A * attribute, V queryVector);
+ DotProductExecutor(const A * attribute, const V & queryVector);
+ DotProductExecutor(const A * attribute, std::unique_ptr<V> queryVector);
~DotProductExecutor();
};
@@ -149,12 +155,13 @@ template <typename Vector, typename Buffer>
class DotProductExecutorByCopy final : public fef::FeatureExecutor {
private:
const attribute::IAttributeVector * _attribute;
- Vector _queryVector;
+ const Vector & _queryVector;
const typename Vector::HashMap::const_iterator _end;
Buffer _buffer;
-
+ std::unique_ptr<Vector> _backing;
public:
- DotProductExecutorByCopy(const attribute::IAttributeVector * attribute, Vector queryVector);
+ DotProductExecutorByCopy(const attribute::IAttributeVector * attribute, const Vector & queryVector);
+ DotProductExecutorByCopy(const attribute::IAttributeVector * attribute, std::unique_ptr<Vector> queryVector);
~DotProductExecutorByCopy() override;
void execute(uint32_t docId) override;
};
@@ -303,6 +310,8 @@ private:
using IAttributeVector = attribute::IAttributeVector;
vespalib::string _defaultAttribute;
vespalib::string _queryVector;
+ vespalib::string _attrKey;
+ vespalib::string _queryVectorKey;
vespalib::string getAttribute(const fef::IQueryEnvironment & env) const;
const IAttributeVector * upgradeIfNecessary(const IAttributeVector * attribute, const fef::IQueryEnvironment & env) const;
diff --git a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
index eb73cef1f4c..fd1faeae5ea 100644
--- a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
@@ -22,32 +22,46 @@ using search::features::dotproduct::wset::IntegerVector;
namespace search::features {
+namespace {
+
/**
* Executor used when array can be accessed directly
*/
-template <typename BaseType>
+template<typename BaseType>
class RawExecutor : public FeatureExecutor {
+private:
+ std::unique_ptr<IntegerVector> _backing;
protected:
const IAttributeVector *_attribute;
- IntegerVector _queryVector;
+ const IntegerVector &_queryVector;
public:
- RawExecutor(const IAttributeVector *attribute, IntegerVector queryVector);
+ RawExecutor(const IAttributeVector *attribute, const IntegerVector & queryVector);
+ RawExecutor(const IAttributeVector *attribute, std::unique_ptr<IntegerVector> queryVector);
+
void execute(uint32_t docId) override;
};
-template <typename BaseType>
-RawExecutor<BaseType>::RawExecutor(const IAttributeVector *attribute, IntegerVector queryVector) :
- FeatureExecutor(),
- _attribute(attribute),
- _queryVector(std::move(queryVector))
+template<typename BaseType>
+RawExecutor<BaseType>::RawExecutor(const IAttributeVector *attribute, std::unique_ptr<IntegerVector> queryVector)
+ : FeatureExecutor(),
+ _backing(std::move(queryVector)),
+ _attribute(attribute),
+ _queryVector(*_backing)
{
- _queryVector.syncMap();
}
-template <typename A, typename V>
-feature_t maxProduct(const A &array, size_t count, const V &query)
+template<typename BaseType>
+RawExecutor<BaseType>::RawExecutor(const IAttributeVector *attribute, const IntegerVector & queryVector)
+ : FeatureExecutor(),
+ _backing(),
+ _attribute(attribute),
+ _queryVector(queryVector)
{
+}
+
+template<typename A, typename V>
+feature_t maxProduct(const A &array, size_t count, const V &query) {
feature_t val = -std::numeric_limits<double>::max();
for (size_t i = 0; i < count; ++i) {
auto itr = query.getDimMap().find(array[i].value());
@@ -61,10 +75,9 @@ feature_t maxProduct(const A &array, size_t count, const V &query)
return val == -std::numeric_limits<double>::max() ? 0.0 : val;
}
-template <typename BaseType>
+template<typename BaseType>
void
-RawExecutor<BaseType>::execute(uint32_t docId)
-{
+RawExecutor<BaseType>::execute(uint32_t docId) {
using A = IntegerAttributeTemplate<BaseType>;
const multivalue::Value<BaseType> *values(nullptr);
const A *iattr = static_cast<const A *>(_attribute);
@@ -75,68 +88,40 @@ RawExecutor<BaseType>::execute(uint32_t docId)
/**
* Executor when array can't be accessed directly
*/
-template <typename BaseType>
+template<typename BaseType>
class BufferedExecutor : public RawExecutor<BaseType> {
private:
WeightedIntegerContent _buffer;
public:
- BufferedExecutor(const IAttributeVector *attribute, IntegerVector queryVector);
+ BufferedExecutor(const IAttributeVector *attribute, const IntegerVector & queryVector);
+ BufferedExecutor(const IAttributeVector *attribute, std::unique_ptr<IntegerVector> queryVector);
+
void execute(uint32_t docId) override;
};
-template <typename BaseType>
-BufferedExecutor<BaseType>::BufferedExecutor(const IAttributeVector *attribute, IntegerVector queryVector) :
- RawExecutor<BaseType>(attribute, std::move(queryVector)),
- _buffer()
-{
-}
-
-
-template <typename BaseType>
-void
-BufferedExecutor<BaseType>::execute(uint32_t docId)
+template<typename BaseType>
+BufferedExecutor<BaseType>::BufferedExecutor(const IAttributeVector *attribute, const IntegerVector & queryVector)
+ : RawExecutor<BaseType>(attribute, queryVector),
+ _buffer()
{
- _buffer.fill(*(this->_attribute), docId);
- this->outputs().set_number(0, maxProduct(_buffer, _buffer.size(), this->_queryVector));
}
-/**
- * Blueprint
- */
-InternalMaxReduceProdJoinBlueprint::InternalMaxReduceProdJoinBlueprint() :
- Blueprint("internalMaxReduceProdJoin")
+template<typename BaseType>
+BufferedExecutor<BaseType>::BufferedExecutor(const IAttributeVector *attribute, std::unique_ptr<IntegerVector> queryVector)
+ : RawExecutor<BaseType>(attribute, std::move(queryVector)),
+ _buffer()
{
}
-InternalMaxReduceProdJoinBlueprint::~InternalMaxReduceProdJoinBlueprint() = default;
+template<typename BaseType>
void
-InternalMaxReduceProdJoinBlueprint::visitDumpFeatures(const IIndexEnvironment &, IDumpFeatureVisitor &) const
-{
-}
-
-Blueprint::UP
-InternalMaxReduceProdJoinBlueprint::createInstance() const
-{
- return std::make_unique<InternalMaxReduceProdJoinBlueprint>();
+BufferedExecutor<BaseType>::execute(uint32_t docId) {
+ _buffer.fill(*(this->_attribute), docId);
+ this->outputs().set_number(0, maxProduct(_buffer, _buffer.size(), this->_queryVector));
}
-ParameterDescriptions
-InternalMaxReduceProdJoinBlueprint::getDescriptions() const
-{
- return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::int32OrInt64TypeSet(), ParameterCollection::ARRAY).string();
-}
-
-bool
-InternalMaxReduceProdJoinBlueprint::setup(const IIndexEnvironment &env, const ParameterList &params)
-{
- _attribute = params[0].getValue();
- _query = params[1].getValue();
- describeOutput("scalar", "Internal executor for optimized execution of reduce(join(A,Q,f(x,y)(x*y)),max)");
- env.hintAttributeAccess(_attribute);
- return true;
-}
template<typename A>
bool supportsGetRawValues(const A &attr) noexcept {
@@ -150,10 +135,9 @@ bool supportsGetRawValues(const A &attr) noexcept {
}
}
-template <typename BaseType>
+template<typename BaseType, typename V>
FeatureExecutor &
-selectTypedExecutor(const IAttributeVector *attribute, IntegerVector vector, vespalib::Stash &stash)
-{
+selectTypedExecutor(const IAttributeVector *attribute, V && vector, vespalib::Stash &stash) {
if (!attribute->isImported()) {
using A = IntegerAttributeTemplate<BaseType>;
using VT = multivalue::Value<BaseType>;
@@ -163,50 +147,127 @@ selectTypedExecutor(const IAttributeVector *attribute, IntegerVector vector, ves
if (supportsGetRawValues(*iattr)) {
const ExactA *exactA = dynamic_cast<const ExactA *>(iattr);
if (exactA != nullptr) {
- return stash.create<RawExecutor<BaseType>>(attribute, std::move(vector));
+ return stash.create<RawExecutor<BaseType>>(attribute, std::forward<V>(vector));
}
}
}
- return stash.create<BufferedExecutor<BaseType>>(attribute, std::move(vector));
+ return stash.create<BufferedExecutor<BaseType>>(attribute, std::forward<V>(vector));
}
+template<typename V>
FeatureExecutor &
-selectExecutor(const IAttributeVector *attribute, IntegerVector vector, vespalib::Stash &stash)
-{
+selectExecutor(const IAttributeVector *attribute, V && vector, vespalib::Stash &stash) {
if (attribute->getCollectionType() == CollectionType::ARRAY) {
switch (attribute->getBasicType()) {
case BasicType::INT32:
- return selectTypedExecutor<int32_t>(attribute, std::move(vector), stash);
+ return selectTypedExecutor<int32_t, V>(attribute, std::forward<V>(vector), stash);
case BasicType::INT64:
- return selectTypedExecutor<int64_t>(attribute, std::move(vector), stash);
+ return selectTypedExecutor<int64_t, V>(attribute, std::forward<V>(vector), stash);
default:
break;
}
}
LOG(warning, "The attribute vector '%s' is not of type "
- "array<int/long>, returning executor with default value.", attribute->getName().c_str());
+ "array<int/long>, returning executor with default value.", attribute->getName().c_str());
return stash.create<SingleZeroValueExecutor>();
}
+vespalib::string
+make_queryvector_key(const vespalib::string & base, const vespalib::string & subKey) {
+ vespalib::string key(base);
+ key.append(".vector.");
+ key.append(subKey);
+ return key;
+}
+
+std::unique_ptr<IntegerVector>
+createQueryVector(const Property & prop) {
+ if (prop.found() && !prop.get().empty()) {
+ auto vector = std::make_unique<IntegerVector>();
+ WeightedSetParser::parse(prop.get(), *vector);
+ if (!vector->getVector().empty()) {
+ vector->syncMap();
+ return vector;
+ }
+ }
+ return std::unique_ptr<IntegerVector>();
+}
+
+}
+
+InternalMaxReduceProdJoinBlueprint::InternalMaxReduceProdJoinBlueprint()
+ : Blueprint("internalMaxReduceProdJoin"),
+ _attribute(),
+ _queryVector(),
+ _attrKey(),
+ _queryVectorKey()
+{
+}
+
+InternalMaxReduceProdJoinBlueprint::~InternalMaxReduceProdJoinBlueprint() = default;
+
+void
+InternalMaxReduceProdJoinBlueprint::visitDumpFeatures(const IIndexEnvironment &, IDumpFeatureVisitor &) const {
+}
+
+Blueprint::UP
+InternalMaxReduceProdJoinBlueprint::createInstance() const {
+ return std::make_unique<InternalMaxReduceProdJoinBlueprint>();
+}
+
+ParameterDescriptions
+InternalMaxReduceProdJoinBlueprint::getDescriptions() const {
+ return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::int32OrInt64TypeSet(),
+ ParameterCollection::ARRAY).string();
+}
+
+bool
+InternalMaxReduceProdJoinBlueprint::setup(const IIndexEnvironment &env, const ParameterList &params) {
+ _attribute = params[0].getValue();
+ _attrKey = createAttributeKey(_attribute);
+ _queryVector = params[1].getValue();
+ _queryVectorKey = make_queryvector_key(getBaseName(), _queryVector);
+ describeOutput("scalar", "Internal executor for optimized execution of reduce(join(A,Q,f(x,y)(x*y)),max)");
+ env.hintAttributeAccess(_attribute);
+ return true;
+}
+
+void
+InternalMaxReduceProdJoinBlueprint::prepareSharedState(const fef::IQueryEnvironment & env, fef::IObjectStore & store) const
+{
+ const IAttributeVector * attribute = lookupAndStoreAttribute(_attrKey, _attribute, env, store);
+ if (attribute == nullptr) return;
+
+ const fef::Anything * queryVector = env.getObjectStore().get(_queryVectorKey);
+ if (queryVector == nullptr) {
+ std::unique_ptr<IntegerVector> vector = createQueryVector(env.getProperties().lookup(_queryVector));
+ if (vector) {
+ store.add(_queryVectorKey, std::move(vector));
+ }
+ }
+}
FeatureExecutor &
InternalMaxReduceProdJoinBlueprint::createExecutor(const IQueryEnvironment &env, vespalib::Stash &stash) const
{
- const IAttributeVector *attribute = env.getAttributeContext().getAttribute(_attribute);
+ const IAttributeVector * attribute = lookupAttribute(_attrKey, _attribute, env);
if (attribute == nullptr) {
LOG(warning, "The attribute vector '%s' was not found in the attribute manager, "
- "returning executor with default value.",
- _attribute.c_str());
+ "returning executor with default value.", _attribute.c_str());
return stash.create<SingleZeroValueExecutor>();
}
- Property prop = env.getProperties().lookup(_query);
- if (prop.found() && !prop.get().empty()) {
- IntegerVector vector;
- WeightedSetParser::parse(prop.get(), vector);
- if (!vector.getVector().empty()) {
+ const fef::Anything * queryVectorArg = env.getObjectStore().get(_queryVectorKey);
+ if (queryVectorArg != nullptr) {
+ // Vector is not copied as it is safe in ObjectStore
+ return selectExecutor<const IntegerVector &>(attribute, *dynamic_cast<const IntegerVector *>(queryVectorArg), stash);
+ } else {
+ std::unique_ptr<IntegerVector> vector = createQueryVector(env.getProperties().lookup(_queryVector));
+ if (vector) {
+ // Vector is moved and handed over to the executor.
return selectExecutor(attribute, std::move(vector), stash);
}
}
+
return stash.create<SingleZeroValueExecutor>();
}
diff --git a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.h b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.h
index 65dd0ac2082..5314687c98d 100644
--- a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.h
+++ b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.h
@@ -25,15 +25,18 @@ namespace search::features {
class InternalMaxReduceProdJoinBlueprint : public fef::Blueprint {
private:
vespalib::string _attribute;
- vespalib::string _query;
+ vespalib::string _queryVector;
+ vespalib::string _attrKey;
+ vespalib::string _queryVectorKey;
public:
InternalMaxReduceProdJoinBlueprint();
- ~InternalMaxReduceProdJoinBlueprint();
+ ~InternalMaxReduceProdJoinBlueprint() override;
fef::ParameterDescriptions getDescriptions() const override;
fef::Blueprint::UP createInstance() const override;
bool setup(const fef::IIndexEnvironment &env, const fef::ParameterList &params) override;
+ void prepareSharedState(const fef::IQueryEnvironment & queryEnv, fef::IObjectStore & objectStore) const override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
void visitDumpFeatures(const fef::IIndexEnvironment &env, fef::IDumpFeatureVisitor &visitor) const override;
diff --git a/searchlib/src/vespa/searchlib/features/queryfeature.cpp b/searchlib/src/vespa/searchlib/features/queryfeature.cpp
index b9041901ced..b927188c1aa 100644
--- a/searchlib/src/vespa/searchlib/features/queryfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/queryfeature.cpp
@@ -15,6 +15,7 @@
#include <vespa/eval/tensor/tensor.h>
#include <vespa/eval/eval/value_type.h>
#include <vespa/vespalib/locale/c.h>
+#include <cerrno>
#include <vespa/log/log.h>
LOG_SETUP(".features.queryfeature");
diff --git a/searchlib/src/vespa/searchlib/fef/blueprint.cpp b/searchlib/src/vespa/searchlib/fef/blueprint.cpp
index d7c5cb665ed..7073d0c0ccd 100644
--- a/searchlib/src/vespa/searchlib/fef/blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/fef/blueprint.cpp
@@ -10,8 +10,7 @@ LOG_SETUP(".fef.blueprint");
namespace search::fef {
const FeatureType &
-Blueprint::defineInput(vespalib::stringref inName,
- AcceptInput accept)
+Blueprint::defineInput(vespalib::stringref inName, AcceptInput accept)
{
assert(_dependency_handler != nullptr);
return _dependency_handler->resolve_input(inName, accept);
@@ -60,8 +59,7 @@ Blueprint::setup(const IIndexEnvironment &indexEnv,
}
bool
-Blueprint::setup(const IIndexEnvironment &indexEnv,
- const ParameterList &params)
+Blueprint::setup(const IIndexEnvironment &indexEnv, const ParameterList &params)
{
(void) indexEnv; (void) params;
LOG(error, "The setup function using a typed parameter list does not have a default implementation. "
@@ -69,6 +67,11 @@ Blueprint::setup(const IIndexEnvironment &indexEnv,
return false;
}
+void
+Blueprint::prepareSharedState(const IQueryEnvironment & queryEnv, IObjectStore & objectStore) const {
+ (void) queryEnv; (void) objectStore;
+}
+
const attribute::IAttributeVector *
Blueprint::lookupAndStoreAttribute(const vespalib::string & key, vespalib::stringref attrName,
const IQueryEnvironment & env, IObjectStore & store)
diff --git a/searchlib/src/vespa/searchlib/fef/blueprint.h b/searchlib/src/vespa/searchlib/fef/blueprint.h
index dd622ea36d9..5d7eb6eb2c0 100644
--- a/searchlib/src/vespa/searchlib/fef/blueprint.h
+++ b/searchlib/src/vespa/searchlib/fef/blueprint.h
@@ -45,7 +45,7 @@ public:
struct DependencyHandler {
virtual const FeatureType &resolve_input(const vespalib::string &feature_name, AcceptInput accept_type) = 0;
virtual void define_output(const vespalib::string &output_name, const FeatureType &type) = 0;
- virtual ~DependencyHandler() {}
+ virtual ~DependencyHandler() = default;
};
/**
@@ -62,14 +62,19 @@ public:
typedef std::vector<string> StringVector;
private:
- Blueprint(const Blueprint &);
- Blueprint &operator=(const Blueprint &);
-
string _baseName;
string _name;
DependencyHandler *_dependency_handler;
protected:
+ /**
+ * Create an empty blueprint. Blueprints in their initial state
+ * are used as prototypes to create other instances of the same
+ * class. The @ref setup method is used to tailor a blueprint
+ * object for a specific set of parameters.
+ **/
+ Blueprint(vespalib::stringref baseName);
+
using IAttributeVector = attribute::IAttributeVector;
/**
* Define an input feature for this blueprint. This method should
@@ -115,13 +120,8 @@ protected:
lookupAttribute(const vespalib::string & key, vespalib::stringref attrName, const IQueryEnvironment & env);
static vespalib::string createAttributeKey(vespalib::stringref attrName);
public:
- /**
- * Create an empty blueprint. Blueprints in their initial state
- * are used as prototypes to create other instances of the same
- * class. The @ref setup method is used to tailor a blueprint
- * object for a specific set of parameters.
- **/
- Blueprint(vespalib::stringref baseName);
+ Blueprint(const Blueprint &) = delete;
+ Blueprint &operator=(const Blueprint &) = delete;
/**
* Obtain the base name of this blueprint. This method will
@@ -239,10 +239,7 @@ public:
* This is called before creating multiple execution threads.
* @param queryEnv The query environment.
*/
- virtual void prepareSharedState(const IQueryEnvironment & queryEnv, IObjectStore & objectStore) const {
- (void) queryEnv;
- (void) objectStore;
- }
+ virtual void prepareSharedState(const IQueryEnvironment & queryEnv, IObjectStore & objectStore) const;
/**
* Create a feature executor based on this blueprint. Failure to
diff --git a/searchlib/src/vespa/searchlib/fef/iqueryenvironment.h b/searchlib/src/vespa/searchlib/fef/iqueryenvironment.h
index a7f268e5c6b..041e9ec67bc 100644
--- a/searchlib/src/vespa/searchlib/fef/iqueryenvironment.h
+++ b/searchlib/src/vespa/searchlib/fef/iqueryenvironment.h
@@ -70,6 +70,15 @@ public:
virtual const search::attribute::IAttributeContext & getAttributeContext() const = 0;
/**
+ * Returns the average field length for the given field.
+ *
+ * @param field_name field name
+ *
+ * @return average field length
+ **/
+ virtual double get_average_field_length(const vespalib::string &field_name) const = 0;
+
+ /**
* Returns a const view of the index environment.
*
* @return index environment
diff --git a/searchlib/src/vespa/searchlib/fef/itermfielddata.h b/searchlib/src/vespa/searchlib/fef/itermfielddata.h
index 80343db2250..6fb467ce25c 100644
--- a/searchlib/src/vespa/searchlib/fef/itermfielddata.h
+++ b/searchlib/src/vespa/searchlib/fef/itermfielddata.h
@@ -27,13 +27,26 @@ public:
**/
virtual uint32_t getFieldId() const = 0;
+
+ /**
+ * Returns the number of documents matching this term.
+ */
+ virtual uint32_t get_matching_doc_count() const = 0;
+
+ /**
+ * Returns the total number of documents in the corpus.
+ */
+ virtual uint32_t get_total_doc_count() const = 0;
+
/**
* Obtain the document frequency. This is a value between 0 and 1
* indicating the ratio of the matching documents to the corpus.
*
* @return document frequency
- **/
- virtual double getDocFreq() const = 0;
+ **/
+ double getDocFreq() const {
+ return (double)get_matching_doc_count() / (double)get_total_doc_count();
+ }
/**
* Obtain the match handle for this field,
diff --git a/searchlib/src/vespa/searchlib/fef/objectstore.h b/searchlib/src/vespa/searchlib/fef/objectstore.h
index 49176afa3c9..2debcd277e9 100644
--- a/searchlib/src/vespa/searchlib/fef/objectstore.h
+++ b/searchlib/src/vespa/searchlib/fef/objectstore.h
@@ -2,9 +2,13 @@
#pragma once
#include <vespa/vespalib/stllike/hash_map.h>
+#include <cassert>
namespace search::fef {
+/**
+ * Top level interface for things to store in an IObjectStore.
+ */
class Anything
{
public:
@@ -12,6 +16,9 @@ public:
virtual ~Anything() { }
};
+/**
+ * Implementation of the Anything interface that wraps a value of the given type.
+ */
template<typename T>
class AnyWrapper : public Anything
{
@@ -22,6 +29,9 @@ private:
T _value;
};
+/**
+ * Interface for a key value store of Anything instances.
+ */
class IObjectStore
{
public:
@@ -30,6 +40,9 @@ public:
virtual const Anything * get(const vespalib::string & key) const = 0;
};
+/**
+ * Object store implementation on top of a hash map.
+ */
class ObjectStore : public IObjectStore
{
public:
@@ -42,4 +55,20 @@ private:
ObjectMap _objectMap;
};
+namespace objectstore {
+
+/**
+ * Utility function that gets the value stored in an Anything instance (via AnyWrapper).
+ */
+template<typename T>
+const T &
+as_value(const Anything &val) {
+ using WrapperType = AnyWrapper<T>;
+ const auto *wrapper = dynamic_cast<const WrapperType *>(&val);
+ assert(wrapper != nullptr);
+ return wrapper->getValue();
+}
+
+}
+
}
diff --git a/searchlib/src/vespa/searchlib/fef/phrasesplitter.h b/searchlib/src/vespa/searchlib/fef/phrasesplitter.h
index 4c7ca4b67d7..4e46c9eaa7c 100644
--- a/searchlib/src/vespa/searchlib/fef/phrasesplitter.h
+++ b/searchlib/src/vespa/searchlib/fef/phrasesplitter.h
@@ -113,6 +113,7 @@ public:
const Properties & getProperties() const override { return _queryEnv.getProperties(); }
const Location & getLocation() const override { return _queryEnv.getLocation(); }
const attribute::IAttributeContext & getAttributeContext() const override { return _queryEnv.getAttributeContext(); }
+ double get_average_field_length(const vespalib::string &field_name) const override { return _queryEnv.get_average_field_length(field_name); }
const IIndexEnvironment & getIndexEnvironment() const override { return _queryEnv.getIndexEnvironment(); }
void bind_match_data(const fef::MatchData &md) { _matchData = &md; }
};
diff --git a/searchlib/src/vespa/searchlib/fef/simpletermfielddata.cpp b/searchlib/src/vespa/searchlib/fef/simpletermfielddata.cpp
index d1edee7fd07..64906eed22e 100644
--- a/searchlib/src/vespa/searchlib/fef/simpletermfielddata.cpp
+++ b/searchlib/src/vespa/searchlib/fef/simpletermfielddata.cpp
@@ -2,22 +2,22 @@
#include "simpletermfielddata.h"
-namespace search {
-namespace fef {
+namespace search::fef {
SimpleTermFieldData::SimpleTermFieldData(uint32_t fieldId)
: _fieldId(fieldId),
- _docFreq(0),
+ _matching_doc_count(0),
+ _total_doc_count(1),
_handle(IllegalHandle)
{
}
SimpleTermFieldData::SimpleTermFieldData(const ITermFieldData &rhs)
: _fieldId(rhs.getFieldId()),
- _docFreq(rhs.getDocFreq()),
+ _matching_doc_count(rhs.get_matching_doc_count()),
+ _total_doc_count(rhs.get_total_doc_count()),
_handle(rhs.getHandle())
{
}
-} // namespace fef
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/fef/simpletermfielddata.h b/searchlib/src/vespa/searchlib/fef/simpletermfielddata.h
index 6f0fbc9af64..d92d3a48f03 100644
--- a/searchlib/src/vespa/searchlib/fef/simpletermfielddata.h
+++ b/searchlib/src/vespa/searchlib/fef/simpletermfielddata.h
@@ -4,8 +4,7 @@
#include "itermfielddata.h"
-namespace search {
-namespace fef {
+namespace search::fef {
/**
* Information about a single field that is being searched for a term
@@ -17,7 +16,8 @@ class SimpleTermFieldData : public ITermFieldData
{
private:
uint32_t _fieldId;
- double _docFreq;
+ uint32_t _matching_doc_count;
+ uint32_t _total_doc_count;
TermFieldHandle _handle;
public:
@@ -33,28 +33,14 @@ public:
**/
SimpleTermFieldData(uint32_t fieldId);
- /**
- * Obtain the field id.
- *
- * @return field id
- **/
uint32_t getFieldId() const override final { return _fieldId; }
- /**
- * Obtain the document frequency.
- *
- * @return document frequency
- **/
- double getDocFreq() const override final { return _docFreq; }
+ uint32_t get_matching_doc_count() const override { return _matching_doc_count; }
+
+ uint32_t get_total_doc_count() const override { return _total_doc_count; }
using ITermFieldData::getHandle;
- /**
- * Obtain the match handle for this field,
- * requesting match data with the given details in the corresponding TermFieldMatchData.
- *
- * @return match handle (or IllegalHandle)
- **/
TermFieldHandle getHandle(MatchDataDetails requestedDetails) const override {
(void) requestedDetails;
return _handle;
@@ -62,20 +48,15 @@ public:
/**
* Sets the document frequency.
- *
- * @return this object (for chaining)
- * @param docFreq document frequency
**/
- SimpleTermFieldData &setDocFreq(double docFreq) {
- _docFreq = docFreq;
+ SimpleTermFieldData &setDocFreq(uint32_t matching_doc_count, uint32_t total_doc_count) {
+ _matching_doc_count = matching_doc_count;
+ _total_doc_count = total_doc_count;
return *this;
}
/**
* Sets the match handle for this field.
- *
- * @return this object (for chaining)
- * @param handle match handle
**/
SimpleTermFieldData &setHandle(TermFieldHandle handle) {
_handle = handle;
@@ -83,6 +64,5 @@ public:
}
};
-} // namespace fef
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/fef/test/queryenvironment.cpp b/searchlib/src/vespa/searchlib/fef/test/queryenvironment.cpp
index ee305dcff55..4697675c071 100644
--- a/searchlib/src/vespa/searchlib/fef/test/queryenvironment.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/queryenvironment.cpp
@@ -2,21 +2,17 @@
#include "queryenvironment.h"
-namespace search {
-namespace fef {
-namespace test {
+namespace search::fef::test {
QueryEnvironment::QueryEnvironment(IndexEnvironment *env)
: _indexEnv(env),
_terms(),
_properties(),
_location(),
- _attrCtx((env == NULL) ? attribute::IAttributeContext::UP() : env->getAttributeMap().createContext())
+ _attrCtx((env == nullptr) ? attribute::IAttributeContext::UP() : env->getAttributeMap().createContext())
{
}
QueryEnvironment::~QueryEnvironment() { }
-} // namespace test
-} // namespace fef
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/fef/test/queryenvironment.h b/searchlib/src/vespa/searchlib/fef/test/queryenvironment.h
index 0179b5020e6..40898281794 100644
--- a/searchlib/src/vespa/searchlib/fef/test/queryenvironment.h
+++ b/searchlib/src/vespa/searchlib/fef/test/queryenvironment.h
@@ -6,10 +6,9 @@
#include <vespa/searchlib/fef/iqueryenvironment.h>
#include <vespa/searchlib/fef/location.h>
#include <vespa/searchlib/fef/simpletermdata.h>
+#include <unordered_map>
-namespace search {
-namespace fef {
-namespace test {
+namespace search::fef::test {
/**
* Implementation of the IQueryEnvironment interface used for testing.
@@ -25,6 +24,7 @@ private:
Properties _properties;
Location _location;
search::attribute::IAttributeContext::UP _attrCtx;
+ std::unordered_map<std::string, double> _avg_field_lengths;
public:
/**
@@ -40,6 +40,13 @@ public:
const ITermData *getTerm(uint32_t idx) const override { return idx < _terms.size() ? &_terms[idx] : NULL; }
const Location & getLocation() const override { return _location; }
const search::attribute::IAttributeContext &getAttributeContext() const override { return *_attrCtx; }
+ double get_average_field_length(const vespalib::string& field_name) const override {
+ auto itr = _avg_field_lengths.find(field_name);
+ if (itr != _avg_field_lengths.end()) {
+ return itr->second;
+ }
+ return 1.0;
+ }
const IIndexEnvironment &getIndexEnvironment() const override { assert(_indexEnv != NULL); return *_indexEnv; }
/** Returns a reference to the index environment of this. */
@@ -76,9 +83,9 @@ public:
/** Returns a reference to the location of this. */
Location & getLocation() { return _location; }
+
+ std::unordered_map<std::string, double>& get_avg_field_lengths() { return _avg_field_lengths; }
};
-} // namespace test
-} // namespace fef
-} // namespace search
+}
diff --git a/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.cpp b/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.cpp
index 2d9fb998869..67a2eaf5677 100644
--- a/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.cpp
@@ -2,16 +2,13 @@
#include "queryenvironmentbuilder.h"
-namespace search {
-namespace fef {
-namespace test {
+namespace search::fef::test {
QueryEnvironmentBuilder::QueryEnvironmentBuilder(QueryEnvironment &env,
MatchDataLayout &layout) :
_queryEnv(env),
_layout(layout)
{
- // empty
}
QueryEnvironmentBuilder::~QueryEnvironmentBuilder() { }
@@ -39,8 +36,8 @@ QueryEnvironmentBuilder::addIndexNode(const std::vector<vespalib::string> &field
td.setWeight(search::query::Weight(100));
for (uint32_t i = 0; i < fieldNames.size(); ++i) {
const FieldInfo *info = _queryEnv.getIndexEnv()->getFieldByName(fieldNames[i]);
- if (info == NULL || info->type() != FieldType::INDEX) {
- return NULL;
+ if (info == nullptr || info->type() != FieldType::INDEX) {
+ return nullptr;
}
SimpleTermFieldData &tfd = td.addField(info->id());
tfd.setHandle(_layout.allocTermField(tfd.getFieldId()));
@@ -52,8 +49,8 @@ SimpleTermData *
QueryEnvironmentBuilder::addAttributeNode(const vespalib::string &attrName)
{
const FieldInfo *info = _queryEnv.getIndexEnv()->getFieldByName(attrName);
- if (info == NULL || info->type() != FieldType::ATTRIBUTE) {
- return NULL;
+ if (info == nullptr || info->type() != FieldType::ATTRIBUTE) {
+ return nullptr;
}
_queryEnv.getTerms().push_back(SimpleTermData());
SimpleTermData &td = _queryEnv.getTerms().back();
@@ -63,6 +60,11 @@ QueryEnvironmentBuilder::addAttributeNode(const vespalib::string &attrName)
return &td;
}
-} // namespace test
-} // namespace fef
-} // namespace search
+QueryEnvironmentBuilder&
+QueryEnvironmentBuilder::set_avg_field_length(const vespalib::string& field_name, double avg_field_length)
+{
+ _queryEnv.get_avg_field_lengths()[field_name] = avg_field_length;
+ return *this;
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.h b/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.h
index 98aed323f9a..36a63b2a9a2 100644
--- a/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.h
+++ b/searchlib/src/vespa/searchlib/fef/test/queryenvironmentbuilder.h
@@ -57,6 +57,8 @@ public:
/** Returns a const reference to the match data layout of this. */
const MatchDataLayout &getLayout() const { return _layout; }
+ QueryEnvironmentBuilder& set_avg_field_length(const vespalib::string& field_name, double avg_field_length);
+
private:
QueryEnvironmentBuilder(const QueryEnvironmentBuilder &); // hide
QueryEnvironmentBuilder & operator=(const QueryEnvironmentBuilder &); // hide
diff --git a/searchlib/src/vespa/searchlib/util/url.cpp b/searchlib/src/vespa/searchlib/util/url.cpp
index 638f22fc8b7..496a19d153f 100644
--- a/searchlib/src/vespa/searchlib/util/url.cpp
+++ b/searchlib/src/vespa/searchlib/util/url.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "url.h"
+#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.util.url");
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java
index eb90b2f56d7..2b72a775b24 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/DuperModelManager.java
@@ -9,10 +9,7 @@ import com.yahoo.config.model.api.SuperModelListener;
import com.yahoo.config.model.api.SuperModelProvider;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.service.monitor.DuperModelInfraApi;
import com.yahoo.vespa.service.monitor.InfraApplicationApi;
@@ -40,14 +37,12 @@ public class DuperModelManager implements DuperModelInfraApi {
static final TenantHostApplication tenantHostApplication = new TenantHostApplication();
private final Map<ApplicationId, InfraApplication> supportedInfraApplications;
- private final Map<ApplicationId, InfraApplication> supportedMinusTenantHostInfraApplications;
private final Object monitor = new Object();
private final DuperModel duperModel;
// The set of active infrastructure ApplicationInfo. Not all are necessarily in the DuperModel for historical reasons.
private final Set<ApplicationId> activeInfraInfos = new HashSet<>(10);
- private final BooleanFlag tenantHostApplicationEnabled;
@Inject
public DuperModelManager(ConfigserverConfig configServerConfig, FlagSource flagSource, SuperModelProvider superModelProvider) {
@@ -59,7 +54,6 @@ public class DuperModelManager implements DuperModelInfraApi {
/** For testing */
DuperModelManager(boolean multitenant, boolean isController, SuperModelProvider superModelProvider, DuperModel duperModel, FlagSource flagSource) {
this.duperModel = duperModel;
- this.tenantHostApplicationEnabled = Flags.ENABLE_TENANT_HOST_APP.bindTo(flagSource);
if (multitenant) {
supportedInfraApplications =
@@ -70,9 +64,6 @@ public class DuperModelManager implements DuperModelInfraApi {
} else {
supportedInfraApplications = Map.of();
}
- supportedMinusTenantHostInfraApplications = supportedInfraApplications.entrySet().stream()
- .filter(app -> app.getValue().getCapacity().type() != NodeType.host)
- .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
superModelProvider.registerListener(new SuperModelListener() {
@Override
@@ -103,23 +94,16 @@ public class DuperModelManager implements DuperModelInfraApi {
@Override
public List<InfraApplicationApi> getSupportedInfraApplications() {
- return new ArrayList<>(getSupportedApps().values());
+ return new ArrayList<>(supportedInfraApplications.values());
}
@Override
public Optional<InfraApplicationApi> getInfraApplication(ApplicationId applicationId) {
- return Optional.ofNullable(getSupportedApps().get(applicationId));
- }
-
- private Map<ApplicationId, InfraApplication> getSupportedApps() {
- return tenantHostApplicationEnabled.value() ? supportedInfraApplications : supportedMinusTenantHostInfraApplications;
+ return Optional.ofNullable(supportedInfraApplications.get(applicationId));
}
/**
* Returns true if application is considered an infrastructure application by the DuperModel.
- *
- * <p>Note: Unless enable-tenant-host-app flag is enabled, the tenant host "application" is NOT considered an
- * infrastructure application: It is just a cluster in the {@link ZoneApplication zone application}.
*/
public boolean isSupportedInfraApplication(ApplicationId applicationId) {
return supportedInfraApplications.containsKey(applicationId);
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ZoneApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ZoneApplication.java
deleted file mode 100644
index bcf5f096e7f..00000000000
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/duper/ZoneApplication.java
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.service.duper;
-
-import com.yahoo.config.model.api.ServiceInfo;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationName;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.applicationmodel.ClusterId;
-import com.yahoo.vespa.applicationmodel.ServiceType;
-import com.yahoo.vespa.service.model.ApplicationInstanceGenerator;
-
-import java.util.Objects;
-
-/**
- * @author hakon
- *
- * TODO: This does not extend InfraApplication because
- * 1) It is not deployed same as the other HostedVespaApplications
- * 2) ZoneApplication has multiple clusters
- */
-public class ZoneApplication {
-
- private ZoneApplication() {}
-
- private static final ApplicationId ZONE_APPLICATION_ID = InfraApplication
- .createHostedVespaApplicationId("routing");
- private static final ClusterId NODE_ADMIN_CLUSTER_ID = new ClusterId("node-admin");
- private static final ClusterId ROUTING_CLUSTER_ID = new ClusterId("routing");
-
- public static ApplicationId getApplicationId() {
- return ZONE_APPLICATION_ID;
- }
-
- public static TenantName getTenantName() {
- return ZONE_APPLICATION_ID.tenant();
- }
-
- public static ApplicationName getApplicationName() {
- return ZONE_APPLICATION_ID.application();
- }
-
- public static NodeType getNodeAdminNodeType() {
- return NodeType.host;
- }
-
- public static ClusterId getNodeAdminClusterId() {
- return NODE_ADMIN_CLUSTER_ID;
- }
-
- public static ClusterSpec.Type getNodeAdminClusterSpecType() {
- return ClusterSpec.Type.container;
- }
-
- public static ClusterSpec.Id getNodeAdminClusterSpecId() {
- return new ClusterSpec.Id(getNodeAdminClusterId().s());
- }
-
- public static ServiceType getNodeAdminServiceType() {
- return ServiceType.CONTAINER;
- }
-
- public static int getNodeAdminHealthPort() {
- return HostAdminApplication.HOST_ADMIN_HEALT_PORT;
- }
-
- public static NodeType getRoutingNodeType() {
- return NodeType.proxy;
- }
-
- public static ClusterId getRoutingClusterId() {
- return ROUTING_CLUSTER_ID;
- }
-
- public static ClusterSpec.Type getRoutingClusterSpecType() {
- return ClusterSpec.Type.container;
- }
-
- public static ClusterSpec.Id getRoutingClusterSpecId() {
- return new ClusterSpec.Id(getRoutingClusterId().s());
- }
-
- public static ServiceType getRoutingServiceType() {
- return ServiceType.CONTAINER;
- }
-
- public static int getRoutingHealthPort() {
- return 4088;
- }
-
- public static boolean isNodeAdminService(ApplicationId applicationId,
- ClusterId clusterId,
- ServiceType serviceType) {
- return Objects.equals(applicationId, getApplicationId()) &&
- Objects.equals(serviceType, getNodeAdminServiceType()) &&
- Objects.equals(clusterId, getNodeAdminClusterId());
- }
-
- /** Whether a {@link ServiceInfo} belongs to the zone application's node-admin cluster. */
- public static boolean isNodeAdminServiceInfo(ApplicationId applicationId, ServiceInfo serviceInfo) {
- return isNodeAdminService(
- applicationId,
- ApplicationInstanceGenerator.getClusterId(serviceInfo),
- ApplicationInstanceGenerator.toServiceType(serviceInfo));
- }
-
-}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/health/HealthMonitorManager.java b/service-monitor/src/main/java/com/yahoo/vespa/service/health/HealthMonitorManager.java
index 7601cfd2e95..3cc7010e209 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/health/HealthMonitorManager.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/health/HealthMonitorManager.java
@@ -10,7 +10,6 @@ import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.service.duper.DuperModelManager;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.executor.RunletExecutorImpl;
import com.yahoo.vespa.service.manager.HealthMonitorApi;
import com.yahoo.vespa.service.manager.MonitorManager;
@@ -77,7 +76,7 @@ public class HealthMonitorManager implements MonitorManager, HealthMonitorApi {
@Override
public void applicationActivated(ApplicationInfo application) {
- if (wouldMonitor(application.getApplicationId())) {
+ if (duperModel.isSupportedInfraApplication(application.getApplicationId())) {
healthMonitors
.computeIfAbsent(application.getApplicationId(), applicationHealthMonitorFactory::create)
.monitor(application);
@@ -103,24 +102,9 @@ public class HealthMonitorManager implements MonitorManager, HealthMonitorApi {
return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED);
}
- if (applicationId.equals(ZoneApplication.getApplicationId())) {
- // New: The zone app is health monitored (monitor != null), possibly even the routing cluster
- // which is a normal jdisc container (unnecessary but harmless), but the node-admin cluster
- // are tenant Docker hosts running host admin that are monitored via /state/v1/health.
- if (ZoneApplication.isNodeAdminService(applicationId, clusterId, serviceType)) {
- return monitor.getStatus(applicationId, clusterId, serviceType, configId);
- } else {
- return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED);
- }
- }
-
return monitor.getStatus(applicationId, clusterId, serviceType, configId);
}
- private boolean wouldMonitor(ApplicationId id) {
- return duperModel.isSupportedInfraApplication(id) || id.equals(ZoneApplication.getApplicationId());
- }
-
@Override
public List<ApplicationId> getMonitoredApplicationIds() {
return Collections.list(healthMonitors.keys());
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthModel.java b/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthModel.java
index 8e3780744f6..0408e0134ea 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthModel.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/health/StateV1HealthModel.java
@@ -6,14 +6,11 @@ import com.yahoo.config.model.api.HostInfo;
import com.yahoo.config.model.api.PortInfo;
import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.provision.HostName;
-import com.yahoo.vespa.service.duper.HostAdminApplication;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.executor.RunletExecutor;
import com.yahoo.vespa.service.model.ApplicationInstanceGenerator;
import com.yahoo.vespa.service.monitor.ServiceId;
import java.time.Duration;
-import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
@@ -28,7 +25,7 @@ public class StateV1HealthModel implements AutoCloseable {
private static final String PORT_TAG_HTTP = "HTTP";
/** Port tags implying /state/v1/health is served on HTTP. */
- public static final List<String> HTTP_HEALTH_PORT_TAGS = Arrays.asList(PORT_TAG_HTTP, PORT_TAG_STATE);
+ public static final List<String> HTTP_HEALTH_PORT_TAGS = List.of(PORT_TAG_HTTP, PORT_TAG_STATE);
private final Duration targetHealthStaleness;
private final Duration requestTimeout;
private final Duration connectionKeepAlive;
@@ -47,32 +44,16 @@ public class StateV1HealthModel implements AutoCloseable {
Map<ServiceId, HealthEndpoint> extractHealthEndpoints(ApplicationInfo application) {
Map<ServiceId, HealthEndpoint> endpoints = new HashMap<>();
- boolean isZoneApplication = application.getApplicationId().equals(ZoneApplication.getApplicationId());
-
for (HostInfo hostInfo : application.getModel().getHosts()) {
HostName hostname = HostName.from(hostInfo.getHostname());
for (ServiceInfo serviceInfo : hostInfo.getServices()) {
-
- boolean isNodeAdmin = false;
- if (isZoneApplication) {
- if (ZoneApplication.isNodeAdminServiceInfo(application.getApplicationId(), serviceInfo)) {
- isNodeAdmin = true;
- } else {
- // Only the node admin/host admin cluster of the zone application should be monitored
- // TODO: Move the node admin cluster out to a separate infrastructure application
- continue;
- }
- }
-
ServiceId serviceId = ApplicationInstanceGenerator.getServiceId(application, serviceInfo);
for (PortInfo portInfo : serviceInfo.getPorts()) {
if (portTaggedWith(portInfo, HTTP_HEALTH_PORT_TAGS)) {
- // The host-admin-in-zone-application is one big hack.
- int port = isNodeAdmin ? HostAdminApplication.HOST_ADMIN_HEALT_PORT : portInfo.getPort();
StateV1HealthEndpoint endpoint = new StateV1HealthEndpoint(
serviceId,
hostname,
- port,
+ portInfo.getPort(),
targetHealthStaleness,
requestTimeout,
connectionKeepAlive,
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/model/ApplicationInstanceGenerator.java b/service-monitor/src/main/java/com/yahoo/vespa/service/model/ApplicationInstanceGenerator.java
index e535aff8b46..5cc2d538c24 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/model/ApplicationInstanceGenerator.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/model/ApplicationInstanceGenerator.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.service.model;
import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.model.api.HostInfo;
import com.yahoo.config.model.api.ServiceInfo;
-import com.yahoo.config.model.api.container.ContainerServiceType;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
@@ -19,7 +18,6 @@ import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.applicationmodel.TenantId;
import com.yahoo.vespa.service.duper.ConfigServerApplication;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.monitor.ServiceId;
import com.yahoo.vespa.service.monitor.ServiceStatusProvider;
@@ -56,20 +54,9 @@ public class ApplicationInstanceGenerator {
for (HostInfo host : applicationInfo.getModel().getHosts()) {
HostName hostName = new HostName(host.getHostname());
- boolean isTenantHost =
- applicationInfo.getApplicationId().equals(ZoneApplication.getApplicationId()) &&
- host.getServices().stream().anyMatch(serviceInfo ->
- ZoneApplication.isNodeAdminServiceInfo(applicationInfo.getApplicationId(), serviceInfo));
-
for (ServiceInfo serviceInfo : host.getServices()) {
ServiceClusterKey serviceClusterKey = toServiceClusterKey(serviceInfo);
- if (isTenantHost && !ZoneApplication.isNodeAdminServiceInfo(applicationInfo.getApplicationId(), serviceInfo)) {
- // A tenant host only runs the host-admin service, even though the model contains a bunch of
- // standard services like config-sentinel and metrics proxy.
- continue;
- }
-
ServiceInstance serviceInstance =
toServiceInstance(
applicationInfo.getApplicationId(),
@@ -78,9 +65,7 @@ public class ApplicationInstanceGenerator {
hostName,
serviceStatusProvider);
- if (!groupedServiceInstances.containsKey(serviceClusterKey)) {
- groupedServiceInstances.put(serviceClusterKey, new HashSet<>());
- }
+ groupedServiceInstances.putIfAbsent(serviceClusterKey, new HashSet<>());
groupedServiceInstances.get(serviceClusterKey).add(serviceInstance);
}
}
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/duper/TestZoneApplication.java b/service-monitor/src/test/java/com/yahoo/vespa/service/duper/TestZoneApplication.java
deleted file mode 100644
index 773643c1d09..00000000000
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/duper/TestZoneApplication.java
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.service.duper;
-
-import com.yahoo.config.model.api.ApplicationInfo;
-import com.yahoo.config.model.api.HostInfo;
-import com.yahoo.config.provision.HostName;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * @author hakonhall
- */
-public class TestZoneApplication {
-
- private final List<HostName> nodeAdminHostnames;
- private final List<HostName> routingHostnames;
-
- private TestZoneApplication(List<HostName> nodeAdminHostnames, List<HostName> routingHostnames) {
- this.nodeAdminHostnames = nodeAdminHostnames;
- this.routingHostnames = routingHostnames;
- }
-
- public ApplicationInfo makeApplicationInfo() {
- // Make a test ApplicationInfo by:
- // 1. Make an ApplicationInfo as-if the node-admin cluster of the zone application were the only cluster.
- // Make sure to get the correct tenant name, application name, cluster id, service type, hostnames,
- // services, and ports. This should be easy with the help of InfraApplication.
- ApplicationInfo nodeAdminPart = new NodeAdminPartOfZoneApplication().makeApplicationInfo(nodeAdminHostnames);
-
- // 2. Make an ApplicationInfo as-if the routing cluster of the zone application were the only cluster.
- // Don't care if the application is not perfect.
- ApplicationInfo routingPart = new RoutingPartOfZoneApplication().makeApplicationInfo(routingHostnames);
-
- // 3. Take HostInfo from (1) and (2) to make a single ApplicationInfo.
- List<HostInfo> allHostInfos = new ArrayList<>();
- allHostInfos.addAll(nodeAdminPart.getModel().getHosts());
- allHostInfos.addAll(routingPart.getModel().getHosts());
-
- return new ApplicationInfo(nodeAdminPart.getApplicationId(), 0, new HostsModel(allHostInfos));
- }
-
- public static class Builder {
- private List<HostName> nodeAdminHostnames = null;
- private List<HostName> routingHostnames = null;
-
- public Builder addNodeAdminCluster(String... hostnames) {
- this.nodeAdminHostnames = Stream.of(hostnames).map(HostName::from).collect(Collectors.toList());
- return this;
- }
-
- public Builder addRoutingCluster(String... hostnames) {
- this.routingHostnames = Stream.of(hostnames).map(HostName::from).collect(Collectors.toList());
- return this;
- }
-
- public TestZoneApplication build() {
- return new TestZoneApplication(Objects.requireNonNull(nodeAdminHostnames), Objects.requireNonNull(routingHostnames));
- }
- }
-
- private static class NodeAdminPartOfZoneApplication extends InfraApplication {
- public NodeAdminPartOfZoneApplication() {
- super(ZoneApplication.getApplicationName().value(),
- ZoneApplication.getNodeAdminNodeType(),
- ZoneApplication.getNodeAdminClusterSpecType(),
- ZoneApplication.getNodeAdminClusterSpecId(),
- ZoneApplication.getNodeAdminServiceType(),
- ZoneApplication.getNodeAdminHealthPort());
- }
- }
-
- /**
- * This InfraApplication is bogus (containing host admin instead of jdisc container), but the tests are
- * not supposed to explore this cluster.
- */
- private static class RoutingPartOfZoneApplication extends InfraApplication {
- public RoutingPartOfZoneApplication() {
- super(ZoneApplication.getApplicationName().value(),
- ZoneApplication.getRoutingNodeType(),
- ZoneApplication.getRoutingClusterSpecType(),
- ZoneApplication.getRoutingClusterSpecId(),
- ZoneApplication.getRoutingServiceType(),
- ZoneApplication.getRoutingHealthPort());
- }
- }
-}
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/health/HealthMonitorManagerTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/health/HealthMonitorManagerTest.java
index 89bcda05074..008a271f905 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/health/HealthMonitorManagerTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/health/HealthMonitorManagerTest.java
@@ -3,15 +3,12 @@ package com.yahoo.vespa.service.health;
import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.provision.HostName;
-import com.yahoo.vespa.applicationmodel.ConfigId;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.service.duper.ControllerHostApplication;
import com.yahoo.vespa.service.duper.DuperModelManager;
import com.yahoo.vespa.service.duper.InfraApplication;
import com.yahoo.vespa.service.duper.ProxyHostApplication;
-import com.yahoo.vespa.service.duper.TestZoneApplication;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.monitor.ConfigserverUtil;
import org.junit.Before;
import org.junit.Test;
@@ -22,7 +19,6 @@ import java.util.stream.Stream;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@@ -49,62 +45,6 @@ public class HealthMonitorManagerTest {
}
@Test
- public void verifyZoneApplicationIsMonitored() {
- ApplicationInfo zoneApplicationInfo = new TestZoneApplication.Builder()
- .addNodeAdminCluster("h1", "h2")
- .addRoutingCluster("r1")
- .build()
- .makeApplicationInfo();
-
- verify(monitorFactory, times(0)).create(zoneApplicationInfo.getApplicationId());
- verify(monitor, times(0)).monitor(any());
- manager.applicationActivated(zoneApplicationInfo);
- verify(monitorFactory).create(zoneApplicationInfo.getApplicationId());
- verify(monitor).monitor(any());
-
- when(monitor.getStatus(any(), any(), any(), any())).thenReturn(new ServiceStatusInfo(ServiceStatus.DOWN));
- verifyNodeAdminGetStatus(0);
- assertEquals(ServiceStatus.DOWN, getNodeAdminStatus());
- verifyNodeAdminGetStatus(1);
-
- verifyRoutingGetStatus(0);
- assertEquals(ServiceStatus.NOT_CHECKED, getRoutingStatus());
- verifyRoutingGetStatus(0);
- }
-
- private void verifyNodeAdminGetStatus(int invocations) {
- verify(monitor, times(invocations)).getStatus(
- eq(ZoneApplication.getApplicationId()),
- eq(ZoneApplication.getNodeAdminClusterId()),
- any(),
- any());
- }
-
- private void verifyRoutingGetStatus(int invocations) {
- verify(monitor, times(invocations)).getStatus(
- eq(ZoneApplication.getApplicationId()),
- eq(ZoneApplication.getRoutingClusterId()),
- any(),
- any());
- }
-
- private ServiceStatus getNodeAdminStatus() {
- return manager.getStatus(
- ZoneApplication.getApplicationId(),
- ZoneApplication.getNodeAdminClusterId(),
- ZoneApplication.getNodeAdminServiceType(),
- new ConfigId("foo")).serviceStatus();
- }
-
- private ServiceStatus getRoutingStatus() {
- return manager.getStatus(
- ZoneApplication.getApplicationId(),
- ZoneApplication.getRoutingClusterId(),
- ZoneApplication.getRoutingServiceType(),
- new ConfigId("bar")).serviceStatus();
- }
-
- @Test
public void infrastructureApplication() {
ProxyHostApplication proxyHostApplication = new ProxyHostApplication();
when(duperModel.isSupportedInfraApplication(proxyHostApplication.getApplicationId())).thenReturn(true);
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/health/StateV1HealthModelTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/health/StateV1HealthModelTest.java
index 3fce1cca899..a7f632a2084 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/health/StateV1HealthModelTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/health/StateV1HealthModelTest.java
@@ -10,8 +10,6 @@ import com.yahoo.vespa.applicationmodel.ConfigId;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceType;
import com.yahoo.vespa.service.duper.ProxyHostApplication;
-import com.yahoo.vespa.service.duper.TestZoneApplication;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.executor.Cancellable;
import com.yahoo.vespa.service.executor.RunletExecutor;
import com.yahoo.vespa.service.monitor.ServiceId;
@@ -71,24 +69,6 @@ public class StateV1HealthModelTest {
}
@Test
- public void testMonitoringTenantHostHealth() {
- ApplicationInfo zoneApplicationInfo = new TestZoneApplication.Builder()
- .addNodeAdminCluster("h1")
- .addRoutingCluster("r1")
- .build()
- .makeApplicationInfo();
-
- Map<ServiceId, HealthEndpoint> endpoints = model.extractHealthEndpoints(zoneApplicationInfo);
- assertEquals(1, endpoints.size());
- HealthEndpoint endpoint = endpoints.values().iterator().next();
- assertEquals("http://h1:8080/state/v1/health", endpoint.description());
- ServiceId serviceId = endpoint.getServiceId();
- assertEquals(ZoneApplication.getApplicationId(), serviceId.getApplicationId());
- assertEquals(ZoneApplication.getNodeAdminClusterId(), serviceId.getClusterId());
- assertEquals(ZoneApplication.getNodeAdminServiceType(), serviceId.getServiceType());
- }
-
- @Test
public void caseInsensitiveTagMatching() {
PortInfo portInfo = mock(PortInfo.class);
when(portInfo.getTags()).thenReturn(List.of("http", "STATE", "foo"));
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/manager/UnionMonitorManagerTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/manager/UnionMonitorManagerTest.java
index 5cfe70fae5f..f6ef3977a56 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/manager/UnionMonitorManagerTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/manager/UnionMonitorManagerTest.java
@@ -4,7 +4,7 @@ package com.yahoo.vespa.service.manager;
import com.yahoo.vespa.applicationmodel.ConfigId;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
-import com.yahoo.vespa.service.duper.ZoneApplication;
+import com.yahoo.vespa.service.duper.ConfigServerHostApplication;
import com.yahoo.vespa.service.health.HealthMonitorManager;
import com.yahoo.vespa.service.slobrok.SlobrokMonitorManagerImpl;
import org.junit.Test;
@@ -18,6 +18,7 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class UnionMonitorManagerTest {
+ private final ConfigServerHostApplication application = new ConfigServerHostApplication();
private final SlobrokMonitorManagerImpl slobrokMonitorManager = mock(SlobrokMonitorManagerImpl.class);
private final HealthMonitorManager healthMonitorManager = mock(HealthMonitorManager.class);
@@ -38,9 +39,9 @@ public class UnionMonitorManagerTest {
when(healthMonitorManager.getStatus(any(), any(), any(), any())).thenReturn(new ServiceStatusInfo(healthStatus));
when(slobrokMonitorManager.getStatus(any(), any(), any(), any())).thenReturn(new ServiceStatusInfo(slobrokStatus));
ServiceStatus status = manager.getStatus(
- ZoneApplication.getApplicationId(),
- ZoneApplication.getNodeAdminClusterId(),
- ZoneApplication.getNodeAdminServiceType(), new ConfigId("config-id")).serviceStatus();
+ application.getApplicationId(),
+ application.getClusterId(),
+ application.getServiceType(), new ConfigId("config-id")).serviceStatus();
assertSame(expectedStatus, status);
}
} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/model/ApplicationInstanceGeneratorTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/model/ApplicationInstanceGeneratorTest.java
index e182c9d6468..4810f29b28f 100644
--- a/service-monitor/src/test/java/com/yahoo/vespa/service/model/ApplicationInstanceGeneratorTest.java
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/model/ApplicationInstanceGeneratorTest.java
@@ -2,32 +2,19 @@
package com.yahoo.vespa.service.model;
import com.yahoo.config.model.api.ApplicationInfo;
-import com.yahoo.config.model.api.HostInfo;
-import com.yahoo.config.model.api.Model;
-import com.yahoo.config.model.api.ServiceInfo;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
-import com.yahoo.vespa.applicationmodel.ClusterId;
-import com.yahoo.vespa.applicationmodel.ServiceCluster;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.service.duper.ConfigServerApplication;
-import com.yahoo.vespa.service.duper.ZoneApplication;
import com.yahoo.vespa.service.monitor.ServiceStatusProvider;
import org.junit.Test;
import java.util.List;
-import java.util.Map;
-import java.util.Set;
import java.util.stream.Collectors;
-import java.util.stream.Stream;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
@@ -37,10 +24,7 @@ public class ApplicationInstanceGeneratorTest {
private static final String configServer1 = "cfg1.yahoo.com";
private static final String configServer2 = "cfg2.yahoo.com";
private static final String configServer3 = "cfg3.yahoo.com";
- private static final List<String> configServerList = Stream.of(
- configServer1,
- configServer2,
- configServer3).collect(Collectors.toList());
+ private static final List<String> configServerList = List.of(configServer1, configServer2, configServer3);
private static final ConfigServerApplication configServerApplication = new ConfigServerApplication();
private final ServiceStatusProvider statusProvider = mock(ServiceStatusProvider.class);
@@ -84,63 +68,4 @@ public class ApplicationInstanceGeneratorTest {
.hostName()
.toString()));
}
-
- @Test
- public void verifyOnlyNodeAdminServiceIsLeft() {
- when(statusProvider.getStatus(any(), any(), any(), any())).thenReturn(new ServiceStatusInfo(ServiceStatus.NOT_CHECKED));
-
- String host1 = "host1";
- String host2 = "host2";
-
- List<ServiceInfo> serviceInfos1 = List.of(
- makeServiceInfo("metrics", "metricsproxy-container", host1)
- );
-
- List<ServiceInfo> serviceInfos2 = List.of(
- makeServiceInfo("metrics", "metricsproxy-container", host2),
- makeServiceInfo(ZoneApplication.getNodeAdminClusterId().s(),
- ZoneApplication.getNodeAdminServiceType().s(), host2)
- );
-
- List<HostInfo> hostInfos = List.of(
- new HostInfo(host1, serviceInfos1),
- new HostInfo(host2, serviceInfos2)
- );
-
- Model model = mock(Model.class);
- when(model.getHosts()).thenReturn(hostInfos);
-
- ApplicationInfo applicationInfo = new ApplicationInfo(ZoneApplication.getApplicationId(), 0, model);
-
- Zone zone = mock(Zone.class);
- when(zone.environment()).thenReturn(Environment.prod);
- when(zone.region()).thenReturn(RegionName.from("us-east-1"));
-
- ApplicationInstanceGenerator generator = new ApplicationInstanceGenerator(applicationInfo, zone);
- ApplicationInstance applicationInstance = generator.makeApplicationInstance(statusProvider);
-
- Map<ClusterId, List<ServiceCluster>> serviceClusters =
- applicationInstance.serviceClusters().stream().collect(Collectors.groupingBy(ServiceCluster::clusterId));
- assertEquals(2, serviceClusters.size());
- List<ServiceCluster> nodeAdminClusters = serviceClusters.get(ZoneApplication.getNodeAdminClusterId());
- assertNotNull(nodeAdminClusters);
- assertEquals(1, nodeAdminClusters.size());
- ServiceCluster nodeAdminCluster = nodeAdminClusters.iterator().next();
- assertEquals(1, nodeAdminCluster.serviceInstances().size());
- assertEquals(host2, nodeAdminCluster.serviceInstances().iterator().next().hostName().s());
-
- List<ServiceCluster> metricsClusters = serviceClusters.get(new ClusterId("metrics"));
- assertNotNull(metricsClusters);
- assertEquals(1, metricsClusters.size());
- ServiceCluster metricsCluster = metricsClusters.iterator().next();
-
- // The metrics service on the node admin host is ignored
- assertEquals(1, metricsCluster.serviceInstances().size());
- assertEquals(host1, metricsCluster.serviceInstances().iterator().next().hostName().s());
- }
-
- private ServiceInfo makeServiceInfo(String clusterId, String serviceType, String hostname) {
- var properties = Map.of(ApplicationInstanceGenerator.CLUSTER_ID_PROPERTY_NAME, clusterId);
- return new ServiceInfo("servicename", serviceType, List.of(), properties, "configid", hostname);
- }
} \ No newline at end of file
diff --git a/staging_vespalib/src/vespa/vespalib/util/rusage.cpp b/staging_vespalib/src/vespa/vespalib/util/rusage.cpp
index 645be2937d6..62d0158f784 100644
--- a/staging_vespalib/src/vespa/vespalib/util/rusage.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/rusage.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "rusage.h"
#include <stdexcept>
+#include <cerrno>
#include <vespa/vespalib/util/stringfmt.h>
namespace vespalib {
diff --git a/storage/src/tests/CMakeLists.txt b/storage/src/tests/CMakeLists.txt
index 68ed987599a..53113ea0eb1 100644
--- a/storage/src/tests/CMakeLists.txt
+++ b/storage/src/tests/CMakeLists.txt
@@ -8,13 +8,9 @@ vespa_add_executable(storage_testrunner_app TEST
DEPENDS
storage_teststorageserver
storage_testvisiting
- storage_testbucketdb
storage_testcommon
storage_testhostreporter
storage_testdistributor
- storage_testpersistence
- storage_testfilestorage
- storage_teststatus
)
vespa_add_test(
diff --git a/storage/src/tests/bucketdb/CMakeLists.txt b/storage/src/tests/bucketdb/CMakeLists.txt
index 714faf34de5..2468e587aff 100644
--- a/storage/src/tests/bucketdb/CMakeLists.txt
+++ b/storage/src/tests/bucketdb/CMakeLists.txt
@@ -1,10 +1,10 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# TODO: Remove test library when all tests have been migrated to gtest.
-vespa_add_library(storage_testbucketdb TEST
+vespa_add_executable(storage_bucketdb_gtest_runner_app TEST
SOURCES
bucketinfotest.cpp
bucketmanagertest.cpp
+ gtest_runner.cpp
initializertest.cpp
judyarraytest.cpp
judymultimaptest.cpp
@@ -12,14 +12,6 @@ vespa_add_library(storage_testbucketdb TEST
DEPENDS
storage
storage_testcommon
-)
-
-vespa_add_executable(storage_bucketdb_gtest_runner_app TEST
- SOURCES
- gtest_runner.cpp
- DEPENDS
- storage
- storage_testcommon
gtest
)
diff --git a/storage/src/tests/bucketdb/bucketinfotest.cpp b/storage/src/tests/bucketdb/bucketinfotest.cpp
index 0298c50866c..fe922b5d6bd 100644
--- a/storage/src/tests/bucketdb/bucketinfotest.cpp
+++ b/storage/src/tests/bucketdb/bucketinfotest.cpp
@@ -1,47 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <boost/assign.hpp>
-#include <boost/random.hpp>
-#include <cppunit/extensions/HelperMacros.h>
-#include <map>
-#include <vector>
#include <vespa/vespalib/text/stringtokenizer.h>
#include <vespa/storage/bucketdb/bucketinfo.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vector>
+
+using namespace ::testing;
-namespace storage {
-
-namespace distributor {
-
-struct BucketInfoTest : public CppUnit::TestFixture {
- void testBucketInfoEntriesWithNewestTimestampsAreKept();
- void testOrder();
- void testHasInvalidCopy();
- void testAddNodeSetsTrustedWhenConsistent();
- void testTrustedResetWhenCopiesBecomeInconsistent();
- void testTrustedResetWhenTrustedCopiesGoOutOfSync();
- void testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync();
- void add_nodes_can_immediately_update_trusted_flag();
- void add_nodes_can_defer_update_of_trusted_flag();
- void remove_node_can_immediately_update_trusted_flag();
- void remove_node_can_defer_update_of_trusted_flag();
-
- CPPUNIT_TEST_SUITE(BucketInfoTest);
- CPPUNIT_TEST(testBucketInfoEntriesWithNewestTimestampsAreKept);
- CPPUNIT_TEST(testOrder);
- CPPUNIT_TEST(testHasInvalidCopy);
- CPPUNIT_TEST(testAddNodeSetsTrustedWhenConsistent);
- CPPUNIT_TEST_IGNORED(testTrustedResetWhenCopiesBecomeInconsistent);
- CPPUNIT_TEST(testTrustedResetWhenTrustedCopiesGoOutOfSync);
- CPPUNIT_TEST(testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync);
- CPPUNIT_TEST(add_nodes_can_immediately_update_trusted_flag);
- CPPUNIT_TEST(add_nodes_can_defer_update_of_trusted_flag);
- CPPUNIT_TEST(remove_node_can_immediately_update_trusted_flag);
- CPPUNIT_TEST(remove_node_can_defer_update_of_trusted_flag);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketInfoTest);
+namespace storage::distributor {
BucketInfo
getBucketInfo(std::string nodeList, std::string order) {
@@ -83,68 +49,55 @@ nodeList(const BucketInfo& info) {
// in the meantime from having their updates lost when we perform a batch
// insert. This also applies for when we postpone db updates in persistence
// message tracker until we've received a reply from all copies.
-void
-BucketInfoTest::testBucketInfoEntriesWithNewestTimestampsAreKept()
-{
+TEST(BucketInfoTest, bucket_info_entries_with_newest_timestamps_are_kept) {
BucketInfo bi;
std::vector<uint16_t> idealState;
idealState.push_back(0);
bi.addNode(BucketCopy(5, 0, api::BucketInfo(1,1,1)), idealState);
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
- bi.getNode(0)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(1,1,1), bi.getNode(0)->getBucketInfo());
bi.addNode(BucketCopy(5, 0, api::BucketInfo(2,2,2)), idealState);
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
- bi.getNode(0)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(1,1,1), bi.getNode(0)->getBucketInfo());
bi.addNode(BucketCopy(4, 0, api::BucketInfo(3,3,3)), idealState);
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1,1,1),
- bi.getNode(0)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(1,1,1), bi.getNode(0)->getBucketInfo());
bi.addNode(BucketCopy(7, 0, api::BucketInfo(4,4,4)), idealState);
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(4,4,4),
- bi.getNode(0)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(4,4,4), bi.getNode(0)->getBucketInfo());
bi.addNode(BucketCopy(2, 1, api::BucketInfo(4,4,4)), idealState);
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(4,4,4),
- bi.getNode(1)->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(4,4,4), bi.getNode(1)->getBucketInfo());
}
-void
-BucketInfoTest::testOrder() {
-
- CPPUNIT_ASSERT_EQUAL(std::string("2,0,1"), nodeList(getBucketInfo("0,1,2", "2,0,1")));
- CPPUNIT_ASSERT_EQUAL(std::string("2,0,1"), nodeList(getBucketInfo("1,0,2", "2,0,1")));
- CPPUNIT_ASSERT_EQUAL(std::string("1,0,2"), nodeList(getBucketInfo("1,2,0", "1")));
- CPPUNIT_ASSERT_EQUAL(std::string("2,1,0,3,4"), nodeList(getBucketInfo("0,1,2,3,4", "2,1")));
+TEST(BucketInfoTest, node_ordering_is_preserved) {
+ EXPECT_EQ("2,0,1", nodeList(getBucketInfo("0,1,2", "2,0,1")));
+ EXPECT_EQ("2,0,1", nodeList(getBucketInfo("1,0,2", "2,0,1")));
+ EXPECT_EQ("1,0,2", nodeList(getBucketInfo("1,2,0", "1")));
+ EXPECT_EQ("2,1,0,3,4", nodeList(getBucketInfo("0,1,2,3,4", "2,1")));
}
-void
-BucketInfoTest::testHasInvalidCopy()
-{
+TEST(BucketInfoTest, can_query_for_replica_with_invalid_info) {
std::vector<uint16_t> order;
BucketInfo info;
info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)), order);
info.addNode(BucketCopy(0, 1, api::BucketInfo(10, 100, 1000)), order);
- CPPUNIT_ASSERT(!info.hasInvalidCopy());
+ EXPECT_FALSE(info.hasInvalidCopy());
info.addNode(BucketCopy(0, 2, api::BucketInfo()), order);
- CPPUNIT_ASSERT(info.hasInvalidCopy());
+ EXPECT_TRUE(info.hasInvalidCopy());
}
-void
-BucketInfoTest::testAddNodeSetsTrustedWhenConsistent()
-{
+TEST(BucketInfoTest, add_node_sets_trusted_when_consistent) {
std::vector<uint16_t> order;
{
BucketInfo info;
info.addNode(BucketCopy(0, 0, api::BucketInfo(0x1, 2, 144)).setTrusted(), order);
info.addNode(BucketCopy(0, 1, api::BucketInfo(0x1, 2, 144)), order);
- CPPUNIT_ASSERT(info.getNode(1)->trusted());
+ EXPECT_TRUE(info.getNode(1)->trusted());
}
{
@@ -155,91 +108,78 @@ BucketInfoTest::testAddNodeSetsTrustedWhenConsistent()
BucketCopy copy(1, 1, api::BucketInfo(0x1, 1, 2));
info.updateNode(copy);
- CPPUNIT_ASSERT(info.getNode(1)->trusted());
- CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+ EXPECT_TRUE(info.getNode(1)->trusted());
+ EXPECT_FALSE(info.getNode(2)->trusted());
}
}
-void
-BucketInfoTest::testTrustedResetWhenCopiesBecomeInconsistent()
-{
- CPPUNIT_FAIL("TODO: test this!");
-}
-
-void
-BucketInfoTest::testTrustedResetWhenTrustedCopiesGoOutOfSync()
-{
+TEST(BucketInfoTest, testTrustedResetWhenTrustedCopiesGoOutOfSync) {
std::vector<uint16_t> order;
BucketInfo info;
info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)).setTrusted(), order);
info.addNode(BucketCopy(0, 1, api::BucketInfo(10, 100, 1000)), order);
- CPPUNIT_ASSERT(info.getNode(0)->trusted());
- CPPUNIT_ASSERT(info.getNode(1)->trusted());
+ EXPECT_TRUE(info.getNode(0)->trusted());
+ EXPECT_TRUE(info.getNode(1)->trusted());
info.updateNode(BucketCopy(0, 1, api::BucketInfo(20, 200, 2000)).setTrusted());
- CPPUNIT_ASSERT(!info.getNode(0)->trusted());
- CPPUNIT_ASSERT(!info.getNode(1)->trusted());
+ EXPECT_FALSE(info.getNode(0)->trusted());
+ EXPECT_FALSE(info.getNode(1)->trusted());
}
-void
-BucketInfoTest::testTrustedNotResetWhenNonTrustedCopiesStillOutOfSync()
-{
+TEST(BucketInfoTest, trusted_not_reset_when_non_trusted_copies_still_out_of_sync) {
std::vector<uint16_t> order;
BucketInfo info;
info.addNode(BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)).setTrusted(), order);
info.addNode(BucketCopy(0, 1, api::BucketInfo(20, 200, 2000)), order);
info.addNode(BucketCopy(0, 2, api::BucketInfo(30, 300, 3000)), order);
- CPPUNIT_ASSERT(info.getNode(0)->trusted());
- CPPUNIT_ASSERT(!info.getNode(1)->trusted());
- CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+ EXPECT_TRUE(info.getNode(0)->trusted());
+ EXPECT_FALSE(info.getNode(1)->trusted());
+ EXPECT_FALSE(info.getNode(2)->trusted());
info.updateNode(BucketCopy(0, 1, api::BucketInfo(21, 201, 2001)));
- CPPUNIT_ASSERT(info.getNode(0)->trusted());
- CPPUNIT_ASSERT(!info.getNode(1)->trusted());
- CPPUNIT_ASSERT(!info.getNode(2)->trusted());
+ EXPECT_TRUE(info.getNode(0)->trusted());
+ EXPECT_FALSE(info.getNode(1)->trusted());
+ EXPECT_FALSE(info.getNode(2)->trusted());
}
-void BucketInfoTest::add_nodes_can_immediately_update_trusted_flag() {
+TEST(BucketInfoTest, add_nodes_can_immediately_update_trusted_flag) {
BucketInfo info;
std::vector<uint16_t> order;
info.addNodes({BucketCopy(0, 0, api::BucketInfo(10, 100, 1000))}, order, TrustedUpdate::UPDATE);
// Only one replica, so implicitly trusted iff trusted flag update is invoked.
- CPPUNIT_ASSERT(info.getNode(0)->trusted());
+ EXPECT_TRUE(info.getNode(0)->trusted());
}
-void BucketInfoTest::add_nodes_can_defer_update_of_trusted_flag() {
+TEST(BucketInfoTest, add_nodes_can_defer_update_of_trusted_flag) {
BucketInfo info;
std::vector<uint16_t> order;
info.addNodes({BucketCopy(0, 0, api::BucketInfo(10, 100, 1000))}, order, TrustedUpdate::DEFER);
- CPPUNIT_ASSERT(!info.getNode(0)->trusted());
+ EXPECT_FALSE(info.getNode(0)->trusted());
}
-void BucketInfoTest::remove_node_can_immediately_update_trusted_flag() {
+TEST(BucketInfoTest, remove_node_can_immediately_update_trusted_flag) {
BucketInfo info;
std::vector<uint16_t> order;
info.addNodes({BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)),
BucketCopy(0, 1, api::BucketInfo(20, 200, 2000))},
order, TrustedUpdate::UPDATE);
- CPPUNIT_ASSERT(!info.getNode(0)->trusted());
+ EXPECT_FALSE(info.getNode(0)->trusted());
info.removeNode(1, TrustedUpdate::UPDATE);
// Only one replica remaining after remove, so implicitly trusted iff trusted flag update is invoked.
- CPPUNIT_ASSERT(info.getNode(0)->trusted());
+ EXPECT_TRUE(info.getNode(0)->trusted());
}
-void BucketInfoTest::remove_node_can_defer_update_of_trusted_flag() {
+TEST(BucketInfoTest, remove_node_can_defer_update_of_trusted_flag) {
BucketInfo info;
std::vector<uint16_t> order;
info.addNodes({BucketCopy(0, 0, api::BucketInfo(10, 100, 1000)),
BucketCopy(0, 1, api::BucketInfo(20, 200, 2000))},
order, TrustedUpdate::UPDATE);
info.removeNode(1, TrustedUpdate::DEFER);
- CPPUNIT_ASSERT(!info.getNode(0)->trusted());
-}
-
+ EXPECT_FALSE(info.getNode(0)->trusted());
}
-} // storage
-
+} // storage::distributor
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index 09fe310e97e..1f72347b7ed 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -1,12 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/config/helper/configgetter.h>
-#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/config/helper/configgetter.hpp>
#include <vespa/document/config/config-documenttypes.h>
#include <vespa/document/datatype/documenttype.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/update/documentupdate.h>
#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/document/test/make_bucket_space.h>
#include <vespa/storage/bucketdb/bucketmanager.h>
#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
@@ -16,13 +17,10 @@
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
#include <tests/common/testhelper.h>
-#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/document/test/make_bucket_space.h>
#include <vespa/vdslib/state/random.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/stringfmt.h>
-#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <future>
#include <vespa/log/log.h>
@@ -35,6 +33,7 @@ using document::DocumentType;
using document::DocumentTypeRepo;
using document::test::makeDocumentBucket;
using document::test::makeBucketSpace;
+using namespace ::testing;
namespace storage {
@@ -57,41 +56,8 @@ std::ostream& operator<<(std::ostream& out, const TestBucketInfo& info) {
class ConcurrentOperationFixture;
struct TestParams;
-struct BucketManagerTest : public CppUnit::TestFixture {
+struct BucketManagerTest : public Test {
public:
- CPPUNIT_TEST_SUITE(BucketManagerTest);
- CPPUNIT_TEST(testRequestBucketInfoWithList);
- CPPUNIT_TEST(testDistributionBitGenerationEmpty);
- CPPUNIT_TEST(testDistributionBitChangeOnCreateBucket);
- CPPUNIT_TEST(testMinUsedBitsFromComponentIsHonored);
- CPPUNIT_TEST(testRemoveLastModifiedOK);
- CPPUNIT_TEST(testRemoveLastModifiedFailed);
- CPPUNIT_TEST(testSwallowNotifyBucketChangeReply);
- CPPUNIT_TEST(testMetricsGeneration);
- CPPUNIT_TEST(metrics_are_tracked_per_bucket_space);
- CPPUNIT_TEST(testSplitReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testJoinReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testDeleteReplyOrderedAfterBucketReply);
- CPPUNIT_TEST(testOnlyEnqueueWhenProcessingRequest);
- CPPUNIT_TEST(testOrderRepliesAfterBucketSpecificRequest);
- CPPUNIT_TEST(testQueuedRepliesOnlyDispatchedWhenAllProcessingDone);
- CPPUNIT_TEST(testMutationRepliesForSplitBucketAreEnqueued);
- CPPUNIT_TEST(testMutationRepliesForDeletedBucketAreEnqueued);
- CPPUNIT_TEST(testMutationRepliesForJoinedBucketAreEnqueued);
- CPPUNIT_TEST(testConflictingPutRepliesAreEnqueued);
- CPPUNIT_TEST(testConflictingUpdateRepliesAreEnqueued);
- CPPUNIT_TEST(testRemappedMutationIsCheckedAgainstOriginalBucket);
- CPPUNIT_TEST(testBucketConflictSetIsClearedBetweenBlockingRequests);
- CPPUNIT_TEST(testConflictSetOnlyClearedAfterAllBucketRequestsDone);
- CPPUNIT_TEST(testRejectRequestWithMismatchingDistributionHash);
- CPPUNIT_TEST(testDbNotIteratedWhenAllRequestsRejected);
- CPPUNIT_TEST(fall_back_to_legacy_global_distribution_hash_on_mismatch);
-
- // FIXME(vekterli): test is not deterministic and enjoys failing
- // sporadically when running under Valgrind. See bug 5932891.
- CPPUNIT_TEST_IGNORED(testRequestBucketInfoWithState);
- CPPUNIT_TEST_SUITE_END();
-
std::unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<DummyStorageLink> _top;
BucketManager *_manager;
@@ -101,12 +67,13 @@ public:
uint32_t _emptyBuckets;
document::Document::SP _document;
+ ~BucketManagerTest();
+
void setupTestEnvironment(bool fakePersistenceLayer = true,
bool noDelete = false);
void addBucketsToDB(uint32_t count);
bool wasBlockedDueToLastModified(api::StorageMessage* msg,
uint64_t lastModified);
- bool wasBlockedDueToLastModified(api::StorageMessage::SP msg);
void insertSingleBucket(const document::BucketId& bucket,
const api::BucketInfo& info);
void waitUntilRequestsAreProcessing(size_t nRequests = 1);
@@ -127,53 +94,30 @@ public:
void assertRequestWithBadHashIsRejected(
ConcurrentOperationFixture& fixture);
+protected:
+ void update_min_used_bits() {
+ _manager->updateMinUsedBits();
+ }
+ void trigger_metric_manager_update() {
+ vespalib::Monitor l;
+ _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+ }
- void testRequestBucketInfoWithState();
- void testRequestBucketInfoWithList();
- void testDistributionBitGenerationEmpty();
- void testDistributionBitChangeOnCreateBucket();
- void testMinUsedBitsFromComponentIsHonored();
-
- void testRemoveLastModifiedOK();
- void testRemoveLastModifiedFailed();
-
- void testSwallowNotifyBucketChangeReply();
- void testMetricsGeneration();
- void metrics_are_tracked_per_bucket_space();
- void testSplitReplyOrderedAfterBucketReply();
- void testJoinReplyOrderedAfterBucketReply();
- void testDeleteReplyOrderedAfterBucketReply();
- void testOnlyEnqueueWhenProcessingRequest();
- void testOrderRepliesAfterBucketSpecificRequest();
- void testQueuedRepliesOnlyDispatchedWhenAllProcessingDone();
- void testMutationRepliesForSplitBucketAreEnqueued();
- void testMutationRepliesForDeletedBucketAreEnqueued();
- void testMutationRepliesForJoinedBucketAreEnqueued();
- void testConflictingPutRepliesAreEnqueued();
- void testConflictingUpdateRepliesAreEnqueued();
- void testRemappedMutationIsCheckedAgainstOriginalBucket();
- void testBucketConflictSetIsClearedBetweenBlockingRequests();
- void testConflictSetOnlyClearedAfterAllBucketRequestsDone();
- void testRejectRequestWithMismatchingDistributionHash();
- void testDbNotIteratedWhenAllRequestsRejected();
- void fall_back_to_legacy_global_distribution_hash_on_mismatch();
+ const BucketManagerMetrics& bucket_manager_metrics() const {
+ return *_manager->_metrics;
+ }
public:
- static constexpr uint32_t DIR_SPREAD = 3;
static constexpr uint32_t MESSAGE_WAIT_TIME = 60*2;
-
- void setUp() override {
+ void SetUp() override {
_emptyBuckets = 0;
}
- void tearDown() override {
- }
-
friend class ConcurrentOperationFixture;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketManagerTest);
+BucketManagerTest::~BucketManagerTest() = default;
#define ASSERT_DUMMYLINK_REPLY_COUNT(link, count) \
if (link->getNumReplies() != count) { \
@@ -183,7 +127,7 @@ CPPUNIT_TEST_SUITE_REGISTRATION(BucketManagerTest);
for (uint32_t i=0; i<link->getNumReplies(); ++i) { \
ost << link->getReply(i)->getType() << "\n"; \
} \
- CPPUNIT_FAIL(ost.str()); \
+ FAIL() << ost.str(); \
}
std::string getMkDirDisk(const std::string & rootFolder, int disk) {
@@ -203,34 +147,34 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
assert(system(getMkDirDisk(rootFolder, 0).c_str()) == 0);
assert(system(getMkDirDisk(rootFolder, 1).c_str()) == 0);
- std::shared_ptr<const DocumentTypeRepo> repo(new DocumentTypeRepo(
+ auto repo = std::make_shared<const DocumentTypeRepo>(
*ConfigGetter<DocumenttypesConfig>::getConfig(
- "config-doctypes", FileSpec(TEST_PATH("config-doctypes.cfg")))));
- _top.reset(new DummyStorageLink);
- _node.reset(new TestServiceLayerApp(
- DiskCount(2), NodeIndex(0), config.getConfigId()));
+ "config-doctypes", FileSpec("../config-doctypes.cfg")));
+ _top = std::make_unique<DummyStorageLink>();
+ _node = std::make_unique<TestServiceLayerApp>(
+ DiskCount(2), NodeIndex(0), config.getConfigId());
_node->setTypeRepo(repo);
_node->setupDummyPersistence();
- // Set up the 3 links
- StorageLink::UP manager(new BucketManager("", _node->getComponentRegister()));
- _manager = (BucketManager*) manager.get();
+ // Set up the 3 links
+ auto manager = std::make_unique<BucketManager>("", _node->getComponentRegister());
+ _manager = manager.get();
_top->push_back(std::move(manager));
if (fakePersistenceLayer) {
- StorageLink::UP bottom(new DummyStorageLink);
- _bottom = (DummyStorageLink*) bottom.get();
+ auto bottom = std::make_unique<DummyStorageLink>();
+ _bottom = bottom.get();
_top->push_back(std::move(bottom));
} else {
- StorageLink::UP bottom(new FileStorManager(
+ auto bottom = std::make_unique<FileStorManager>(
config.getConfigId(), _node->getPartitions(),
- _node->getPersistenceProvider(), _node->getComponentRegister()));
- _filestorManager = (FileStorManager*) bottom.get();
+ _node->getPersistenceProvider(), _node->getComponentRegister());
+ _filestorManager = bottom.get();
_top->push_back(std::move(bottom));
}
- // Generate a doc to use for testing..
+ // Generate a doc to use for testing..
const DocumentType &type(*_node->getTypeRepo()
->getDocumentType("text/html"));
- _document.reset(new document::Document(type, document::DocumentId(
- document::DocIdString("test", "ntnu"))));
+ _document = std::make_shared<document::Document>(
+ type, document::DocumentId(document::DocIdString("test", "ntnu")));
}
void BucketManagerTest::addBucketsToDB(uint32_t count)
@@ -241,7 +185,7 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
while (_bucketInfo.size() < count) {
document::BucketId id(16, randomizer.nextUint32());
id = id.stripUnused();
- if (_bucketInfo.size() == 0) {
+ if (_bucketInfo.empty()) {
id = _node->getBucketIdFactory().getBucketId(
_document->getId()).stripUnused();
}
@@ -261,15 +205,13 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
info.count = 0;
info.crc = 0;
++_emptyBuckets;
- for (std::map<document::BucketId, TestBucketInfo>::iterator it
- = _bucketInfo.begin(); it != _bucketInfo.end(); ++it)
- {
+ for (const auto& bi : _bucketInfo) {
bucketdb::StorageBucketInfo entry;
- entry.disk = it->second.partition;
- entry.setBucketInfo(api::BucketInfo(it->second.crc,
- it->second.count,
- it->second.size));
- _node->getStorageBucketDatabase().insert(it->first, entry, "foo");
+ entry.disk = bi.second.partition;
+ entry.setBucketInfo(api::BucketInfo(bi.second.crc,
+ bi.second.count,
+ bi.second.size));
+ _node->getStorageBucketDatabase().insert(bi.first, entry, "foo");
}
}
@@ -293,27 +235,25 @@ BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg,
_top->sendDown(api::StorageMessage::SP(msg));
if (_top->getNumReplies() == 1) {
- CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
- CPPUNIT_ASSERT(!static_cast<api::StorageReply&>(
- *_top->getReply(0)).getResult().success());
+ assert(_bottom->getNumCommands() == 0);
+ assert(!dynamic_cast<api::StorageReply&>(*_top->getReply(0)).getResult().success());
return true;
} else {
- CPPUNIT_ASSERT_EQUAL(0, (int)_top->getNumReplies());
+ assert(_top->getNumReplies() == 0);
// Check that bucket database now has the operation's timestamp as last modified.
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(id, "foo"));
- CPPUNIT_ASSERT_EQUAL(lastModified, entry->info.getLastModified());
+ assert(entry->info.getLastModified() == lastModified);
}
return false;
}
}
-void BucketManagerTest::testRemoveLastModifiedOK()
-{
- CPPUNIT_ASSERT(!wasBlockedDueToLastModified(
+TEST_F(BucketManagerTest, remove_last_modified_ok) {
+ EXPECT_FALSE(wasBlockedDueToLastModified(
new api::RemoveCommand(makeDocumentBucket(document::BucketId(16, 1)),
document::DocumentId("userdoc:m:1:foo"),
api::Timestamp(1235)),
@@ -321,45 +261,37 @@ void BucketManagerTest::testRemoveLastModifiedOK()
}
-void BucketManagerTest::testRemoveLastModifiedFailed()
-{
- CPPUNIT_ASSERT(wasBlockedDueToLastModified(
+TEST_F(BucketManagerTest, remove_last_modified_failed) {
+ EXPECT_TRUE(wasBlockedDueToLastModified(
new api::RemoveCommand(makeDocumentBucket(document::BucketId(16, 1)),
document::DocumentId("userdoc:m:1:foo"),
api::Timestamp(1233)),
1233));
}
-void BucketManagerTest::testDistributionBitGenerationEmpty()
-{
- TestName("BucketManagerTest::testDistributionBitGenerationEmpty()");
+TEST_F(BucketManagerTest, distribution_bit_generation_empty) {
setupTestEnvironment();
_manager->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
- CPPUNIT_ASSERT_EQUAL(58u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ trigger_metric_manager_update();
+ EXPECT_EQ(58u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testDistributionBitChangeOnCreateBucket()
-{
- TestName("BucketManagerTest::testDistributionBitChangeOnCreateBucket()");
+TEST_F(BucketManagerTest, distribution_bit_change_on_create_bucket){
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
_node->getDoneInitializeHandler().notifyDoneInitializing();
_manager->doneInit();
- _manager->updateMinUsedBits();
- CPPUNIT_ASSERT_EQUAL(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ update_min_used_bits();
+ EXPECT_EQ(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
std::shared_ptr<api::CreateBucketCommand> cmd(
new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(4, 5678))));
_top->sendDown(cmd);
- CPPUNIT_ASSERT_EQUAL(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ EXPECT_EQ(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testMinUsedBitsFromComponentIsHonored()
-{
- TestName("BucketManagerTest::testMinUsedBitsFromComponentIsHonored()");
+TEST_F(BucketManagerTest, Min_Used_Bits_From_Component_Is_Honored) {
setupTestEnvironment();
// Let these differ in order to test state update behavior.
_node->getComponentRegister().getMinUsedBitsTracker().setMinUsedBits(10);
@@ -377,40 +309,21 @@ void BucketManagerTest::testMinUsedBitsFromComponentIsHonored()
std::shared_ptr<api::CreateBucketCommand> cmd(
new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(12, 5678))));
_top->sendDown(cmd);
- CPPUNIT_ASSERT_EQUAL(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
+ EXPECT_EQ(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
-void BucketManagerTest::testRequestBucketInfoWithState()
-{
- TestName("BucketManagerTest::testRequestBucketInfoWithState()");
- // Test prior to building bucket cache
+// FIXME: non-deterministic test
+TEST_F(BucketManagerTest, IGNORED_request_bucket_info_with_state) {
+ // Test prior to building bucket cache
setupTestEnvironment();
addBucketsToDB(30);
- /* Currently this is just queued up
- {
- std::shared_ptr<api::RequestBucketInfoCommand> cmd(
- new api::RequestBucketInfoCommand(
- 0, lib::ClusterState("distributor:3 .2.s:d storage:1")));
- _top->sendDown(cmd);
- _top->waitForMessages(1, 5);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, _top->getNumReplies());
- std::shared_ptr<api::RequestBucketInfoReply> reply(
- std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
- _top->getReply(0)));
- _top->reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::NOT_READY),
- reply->getResult());
- } */
+
std::vector<lib::ClusterState> states;
- states.push_back(lib::ClusterState("version:0"));
- states.push_back(lib::ClusterState("version:1 distributor:1 storage:1"));
- states.push_back(lib::ClusterState(
- "version:2 distributor:3 .1.s:i .2.s:d storage:4"));
- states.push_back(lib::ClusterState(
- "version:3 distributor:3 .1.s:i .2.s:d storage:4 .3.s:d"));
- states.push_back(lib::ClusterState(
- "version:4 distributor:3 .1.s:i .2.s:d storage:4"));
+ states.emplace_back("version:0");
+ states.emplace_back("version:1 distributor:1 storage:1");
+ states.emplace_back("version:2 distributor:3 .1.s:i .2.s:d storage:4");
+ states.emplace_back("version:3 distributor:3 .1.s:i .2.s:d storage:4 .3.s:d");
+ states.emplace_back("version:4 distributor:3 .1.s:i .2.s:d storage:4");
_node->setClusterState(states.back());
for (uint32_t i=0; i<states.size(); ++i) {
@@ -419,11 +332,11 @@ void BucketManagerTest::testRequestBucketInfoWithState()
_manager->onDown(cmd);
}
- // Send a request bucket info command that will be outdated and failed.
+ // Send a request bucket info command that will be outdated and failed.
std::shared_ptr<api::RequestBucketInfoCommand> cmd1(
new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[1]));
- // Send two request bucket info commands that will be processed together
- // when the bucket manager is idle, as states are equivalent
+ // Send two request bucket info commands that will be processed together
+ // when the bucket manager is idle, as states are equivalent
std::shared_ptr<api::RequestBucketInfoCommand> cmd2(
new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[2]));
std::shared_ptr<api::RequestBucketInfoCommand> cmd3(
@@ -457,104 +370,29 @@ void BucketManagerTest::testRequestBucketInfoWithState()
std::shared_ptr<api::RequestBucketInfoReply> reply3(
replies[cmd3->getMsgId()]);
_top->reset();
- CPPUNIT_ASSERT(reply1.get());
- CPPUNIT_ASSERT(reply2.get());
- CPPUNIT_ASSERT(reply3.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ ASSERT_TRUE(reply1.get());
+ ASSERT_TRUE(reply2.get());
+ ASSERT_TRUE(reply3.get());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
"Ignoring bucket info request for cluster state version 1 as "
"versions from version 2 differs from this state."),
reply1->getResult());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::REJECTED,
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
"There is already a newer bucket info request for "
"this node from distributor 0"),
reply2->getResult());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK),
reply3->getResult());
api::RequestBucketInfoReply::Entry entry;
- CPPUNIT_ASSERT_EQUAL((size_t) 18, reply3->getBucketInfo().size());
+ ASSERT_EQ(18u, reply3->getBucketInfo().size());
entry = api::RequestBucketInfoReply::Entry(
document::BucketId(16, 0xe8c8), api::BucketInfo(0x79d04f78, 11153, 1851385240u));
- CPPUNIT_ASSERT_EQUAL(entry, reply3->getBucketInfo()[0]);
+ EXPECT_EQ(entry, reply3->getBucketInfo()[0]);
}
}
-namespace {
- struct PopenWrapper {
- FILE* _file;
- std::vector<char> _buffer;
- uint32_t _index;
- uint32_t _size;
- bool _eof;
-
- PopenWrapper(const std::string& cmd)
- : _buffer(65536, '\0'), _index(0), _size(0), _eof(false)
- {
- _file = popen(cmd.c_str(), "r");
- if (_file == 0) {
- throw vespalib::Exception("Failed to run '" + cmd
- + "' in popen: " + strerror(errno), VESPA_STRLOC);
- }
- }
-
- const char* getNextLine() {
- if (_eof && _size == 0) return 0;
- // Check if we have a newline waiting
- char* newline = strchr(&_buffer[_index], '\n');
- // If not try to get one
- if (_eof) {
- newline = &_buffer[_index + _size];
- } else if (newline == 0) {
- // If we index is passed half the buffer, reposition
- if (_index > _buffer.size() / 2) {
- memcpy(&_buffer[0], &_buffer[_index], _size);
- _index = 0;
- }
- // Verify we have space to write to
- if (_index + _size >= _buffer.size()) {
- throw vespalib::Exception("No newline could be find in "
- "half the buffer size. Wrapper not designed to "
- "handle that long lines (1)", VESPA_STRLOC);
- }
- // Fill up buffer
- size_t bytesRead = fread(&_buffer[_index + _size],
- 1, _buffer.size() - _index - _size - 1,
- _file);
- if (bytesRead == 0) {
- if (!feof(_file)) {
- throw vespalib::Exception("Failed to run fgets: "
- + std::string(strerror(errno)), VESPA_STRLOC);
- } else {
- _eof = true;
- }
- } else {
- _size += bytesRead;
- }
- newline = strchr(&_buffer[_index], '\n');
- if (newline == 0) {
- if (_eof) {
- if (_size == 0) return 0;
- } else {
- throw vespalib::Exception("No newline could be find in "
- "half the buffer size. Wrapper not designed to "
- "handle that long lines (2)", VESPA_STRLOC);
- }
- }
- }
- *newline = '\0';
- ++newline;
- const char* line = &_buffer[_index];
- uint32_t strlen = (newline - line);
- _index += strlen;
- _size -= strlen;
- return line;
- }
- };
-}
-
-void BucketManagerTest::testRequestBucketInfoWithList()
-{
- TestName("BucketManagerTest::testRequestBucketInfoWithList()");
+TEST_F(BucketManagerTest, request_bucket_info_with_list) {
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
@@ -562,39 +400,26 @@ void BucketManagerTest::testRequestBucketInfoWithList()
_top->doneInit();
{
std::vector<document::BucketId> bids;
- bids.push_back(document::BucketId(16, 0xe8c8));
+ bids.emplace_back(16, 0xe8c8);
- std::shared_ptr<api::RequestBucketInfoCommand> cmd(
- new api::RequestBucketInfoCommand(makeBucketSpace(), bids));
+ auto cmd = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), bids);
_top->sendDown(cmd);
_top->waitForMessages(1, 5);
ASSERT_DUMMYLINK_REPLY_COUNT(_top, 1);
- std::shared_ptr<api::RequestBucketInfoReply> reply(
- std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
- _top->getReply(0)));
+ auto reply = std::dynamic_pointer_cast<api::RequestBucketInfoReply>(_top->getReply(0));
_top->reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
- if (reply->getBucketInfo().size() > 1) {
- std::cerr << "Too many replies found\n";
- for (uint32_t i=0; i<reply->getBucketInfo().size(); ++i) {
- std::cerr << reply->getBucketInfo()[i] << "\n";
- }
- }
- CPPUNIT_ASSERT_EQUAL((size_t) 1, reply->getBucketInfo().size());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1u, reply->getBucketInfo().size());
api::RequestBucketInfoReply::Entry entry(
document::BucketId(16, 0xe8c8),
api::BucketInfo(0x79d04f78, 11153, 1851385240u));
- CPPUNIT_ASSERT_EQUAL(entry, reply->getBucketInfo()[0]);
+ EXPECT_EQ(entry, reply->getBucketInfo()[0]);
}
}
-void
-BucketManagerTest::testSwallowNotifyBucketChangeReply()
-{
- TestName("BucketManagerTest::testSwallowNotifyBucketChangeReply()");
+TEST_F(BucketManagerTest, swallow_notify_bucket_change_reply) {
setupTestEnvironment();
addBucketsToDB(30);
_top->open();
@@ -603,17 +428,14 @@ BucketManagerTest::testSwallowNotifyBucketChangeReply()
api::NotifyBucketChangeCommand cmd(makeDocumentBucket(document::BucketId(1, 16)),
api::BucketInfo());
- std::shared_ptr<api::NotifyBucketChangeReply> reply(
- new api::NotifyBucketChangeReply(cmd));
+ auto reply = std::make_shared<api::NotifyBucketChangeReply>(cmd);
_top->sendDown(reply);
// Should not leave the bucket manager.
- CPPUNIT_ASSERT_EQUAL(0, (int)_bottom->getNumCommands());
+ EXPECT_EQ(0u, _bottom->getNumCommands());
}
-void
-BucketManagerTest::testMetricsGeneration()
-{
+TEST_F(BucketManagerTest, metrics_generation) {
setupTestEnvironment();
_top->open();
// Add 3 buckets; 2 ready, 1 active. 300 docs total, 600 bytes total.
@@ -633,19 +455,18 @@ BucketManagerTest::testMetricsGeneration()
}
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
-
- CPPUNIT_ASSERT_EQUAL(size_t(2), _manager->_metrics->disks.size());
- const DataStoredMetrics& m(*_manager->_metrics->disks[0]);
- CPPUNIT_ASSERT_EQUAL(int64_t(3), m.buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(300), m.docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(600), m.bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), m.active.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(2), m.ready.getLast());
+ trigger_metric_manager_update();
+
+ ASSERT_EQ(2u, bucket_manager_metrics().disks.size());
+ const DataStoredMetrics& m(*bucket_manager_metrics().disks[0]);
+ EXPECT_EQ(3, m.buckets.getLast());
+ EXPECT_EQ(300, m.docs.getLast());
+ EXPECT_EQ(600, m.bytes.getLast());
+ EXPECT_EQ(1, m.active.getLast());
+ EXPECT_EQ(2, m.ready.getLast());
}
-void BucketManagerTest::metrics_are_tracked_per_bucket_space() {
+TEST_F(BucketManagerTest, metrics_are_tracked_per_bucket_space) {
setupTestEnvironment();
_top->open();
auto& repo = _node->getComponentRegister().getBucketSpaceRepo();
@@ -669,25 +490,24 @@ void BucketManagerTest::metrics_are_tracked_per_bucket_space() {
}
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
- vespalib::Monitor l;
- _manager->updateMetrics(BucketManager::MetricLockGuard(l));
+ trigger_metric_manager_update();
- auto& spaces = _manager->_metrics->bucket_spaces;
+ auto& spaces = bucket_manager_metrics().bucket_spaces;
auto default_m = spaces.find(document::FixedBucketSpaces::default_space());
- CPPUNIT_ASSERT(default_m != spaces.end());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), default_m->second->buckets_total.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(100), default_m->second->docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(200), default_m->second->bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), default_m->second->active_buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), default_m->second->ready_buckets.getLast());
+ ASSERT_TRUE(default_m != spaces.end());
+ EXPECT_EQ(1, default_m->second->buckets_total.getLast());
+ EXPECT_EQ(100, default_m->second->docs.getLast());
+ EXPECT_EQ(200, default_m->second->bytes.getLast());
+ EXPECT_EQ(0, default_m->second->active_buckets.getLast());
+ EXPECT_EQ(1, default_m->second->ready_buckets.getLast());
auto global_m = spaces.find(document::FixedBucketSpaces::global_space());
- CPPUNIT_ASSERT(global_m != spaces.end());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), global_m->second->buckets_total.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(150), global_m->second->docs.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(300), global_m->second->bytes.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(1), global_m->second->active_buckets.getLast());
- CPPUNIT_ASSERT_EQUAL(int64_t(0), global_m->second->ready_buckets.getLast());
+ ASSERT_TRUE(global_m != spaces.end());
+ EXPECT_EQ(1, global_m->second->buckets_total.getLast());
+ EXPECT_EQ(150, global_m->second->docs.getLast());
+ EXPECT_EQ(300, global_m->second->bytes.getLast());
+ EXPECT_EQ(1, global_m->second->active_buckets.getLast());
+ EXPECT_EQ(0, global_m->second->ready_buckets.getLast());
}
void
@@ -725,7 +545,7 @@ struct WithBuckets {
class ConcurrentOperationFixture {
public:
- ConcurrentOperationFixture(BucketManagerTest& self)
+ explicit ConcurrentOperationFixture(BucketManagerTest& self)
: _self(self),
_state("distributor:1 storage:1")
{
@@ -835,21 +655,20 @@ public:
{
const size_t nTotal = nBucketReplies + 1;
auto replies = awaitAndGetReplies(nTotal);
- CPPUNIT_ASSERT_EQUAL(nTotal, replies.size());
+ ASSERT_EQ(nTotal, replies.size());
for (size_t i = 0; i < nBucketReplies; ++i) {
- CPPUNIT_ASSERT_EQUAL(api::MessageType::REQUESTBUCKETINFO_REPLY,
- replies[i]->getType());
+ ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO_REPLY, replies[i]->getType());
}
- CPPUNIT_ASSERT_EQUAL(msgType, replies[nBucketReplies]->getType());
+ ASSERT_EQ(msgType, replies[nBucketReplies]->getType());
}
void assertReplyOrdering(
const std::vector<const api::MessageType*>& replyTypes)
{
auto replies = awaitAndGetReplies(replyTypes.size());
- CPPUNIT_ASSERT_EQUAL(replyTypes.size(), replies.size());
+ ASSERT_EQ(replyTypes.size(), replies.size());
for (size_t i = 0; i < replyTypes.size(); ++i) {
- CPPUNIT_ASSERT_EQUAL(*replyTypes[i], replies[i]->getType());
+ ASSERT_EQ(*replyTypes[i], replies[i]->getType());
}
}
@@ -901,9 +720,7 @@ private:
lib::ClusterState _state;
};
-void
-BucketManagerTest::testSplitReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, split_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1);
@@ -924,9 +741,7 @@ BucketManagerTest::testSplitReplyOrderedAfterBucketReply()
1, api::MessageType::SPLITBUCKET_REPLY);
}
-void
-BucketManagerTest::testJoinReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, join_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1 << 16);
@@ -949,9 +764,7 @@ BucketManagerTest::testJoinReplyOrderedAfterBucketReply()
// Technically, deletes being ordered after bucket info replies won't help
// correctness since buckets are removed from the distributor DB upon _sending_
// the delete and not receiving it.
-void
-BucketManagerTest::testDeleteReplyOrderedAfterBucketReply()
-{
+TEST_F(BucketManagerTest, delete_reply_ordered_after_bucket_reply) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1);
@@ -970,9 +783,7 @@ BucketManagerTest::testDeleteReplyOrderedAfterBucketReply()
1, api::MessageType::DELETEBUCKET_REPLY);
}
-void
-BucketManagerTest::testOnlyEnqueueWhenProcessingRequest()
-{
+TEST_F(BucketManagerTest, only_enqueue_when_processing_request) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -990,9 +801,7 @@ BucketManagerTest::testOnlyEnqueueWhenProcessingRequest()
// differently than full bucket info fetches and are not delegated to the
// worker thread. We still require that any split/joins etc are ordered after
// this reply if their reply is sent up concurrently.
-void
-BucketManagerTest::testOrderRepliesAfterBucketSpecificRequest()
-{
+TEST_F(BucketManagerTest, order_replies_after_bucket_specific_request) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -1025,14 +834,12 @@ BucketManagerTest::testOrderRepliesAfterBucketSpecificRequest()
1, api::MessageType::SPLITBUCKET_REPLY);
}
-// Test is similar to testOrderRepliesAfterBucketSpecificRequest, but has
+// Test is similar to order_replies_after_bucket_specific_request, but has
// two concurrent bucket info request processing instances going on; one in
// the worker thread and one in the message chain itself. Since we only have
// one queue, we must wait with dispatching replies until _all_ processing
// has ceased.
-void
-BucketManagerTest::testQueuedRepliesOnlyDispatchedWhenAllProcessingDone()
-{
+TEST_F(BucketManagerTest, queued_replies_only_dispatched_when_all_processing_done) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
fixture.setUp(WithBuckets()
@@ -1085,9 +892,9 @@ struct TestParams {
BUILDER_PARAM(std::vector<const api::MessageType*>, expectedOrdering);
};
-TestParams::TestParams() { }
+TestParams::TestParams() = default;
TestParams::TestParams(const TestParams &) = default;
-TestParams::~TestParams() {}
+TestParams::~TestParams() = default;
void
BucketManagerTest::doTestMutationOrdering(
@@ -1140,9 +947,7 @@ BucketManagerTest::doTestConflictingReplyIsEnqueued(
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testMutationRepliesForSplitBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_split_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
doTestConflictingReplyIsEnqueued(
bucket,
@@ -1150,9 +955,7 @@ BucketManagerTest::testMutationRepliesForSplitBucketAreEnqueued()
api::MessageType::SPLITBUCKET_REPLY);
}
-void
-BucketManagerTest::testMutationRepliesForDeletedBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_deleted_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
doTestConflictingReplyIsEnqueued(
bucket,
@@ -1160,9 +963,7 @@ BucketManagerTest::testMutationRepliesForDeletedBucketAreEnqueued()
api::MessageType::DELETEBUCKET_REPLY);
}
-void
-BucketManagerTest::testMutationRepliesForJoinedBucketAreEnqueued()
-{
+TEST_F(BucketManagerTest, mutation_replies_for_joined_bucket_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(17, 0);
document::BucketId bucketB(17, 1 << 16);
@@ -1183,9 +984,7 @@ BucketManagerTest::testMutationRepliesForJoinedBucketAreEnqueued()
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testConflictingPutRepliesAreEnqueued()
-{
+TEST_F(BucketManagerTest, conflicting_put_replies_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
@@ -1200,9 +999,7 @@ BucketManagerTest::testConflictingPutRepliesAreEnqueued()
doTestMutationOrdering(fixture, params);
}
-void
-BucketManagerTest::testConflictingUpdateRepliesAreEnqueued()
-{
+TEST_F(BucketManagerTest, conflicting_update_replies_are_enqueued) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
@@ -1223,9 +1020,7 @@ BucketManagerTest::testConflictingUpdateRepliesAreEnqueued()
* resulting from the operation. We have to make sure remapped operations are
* enqueued as well.
*/
-void
-BucketManagerTest::testRemappedMutationIsCheckedAgainstOriginalBucket()
-{
+TEST_F(BucketManagerTest, remapped_mutation_is_checked_against_original_bucket) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
document::BucketId remappedToBucket(18, 0);
@@ -1263,9 +1058,7 @@ BucketManagerTest::scheduleBucketInfoRequestWithConcurrentOps(
guard.unlock();
}
-void
-BucketManagerTest::testBucketConflictSetIsClearedBetweenBlockingRequests()
-{
+TEST_F(BucketManagerTest, bucket_conflict_set_is_cleared_between_blocking_requests) {
ConcurrentOperationFixture fixture(*this);
document::BucketId firstConflictBucket(17, 0);
document::BucketId secondConflictBucket(18, 0);
@@ -1308,9 +1101,7 @@ BucketManagerTest::sendSingleBucketInfoRequest(const document::BucketId& id)
_top->sendDown(infoCmd);
}
-void
-BucketManagerTest::testConflictSetOnlyClearedAfterAllBucketRequestsDone()
-{
+TEST_F(BucketManagerTest, conflict_set_only_cleared_after_all_bucket_requests_done) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucketA(16, 0);
document::BucketId bucketB(16, 1);
@@ -1371,22 +1162,17 @@ BucketManagerTest::assertRequestWithBadHashIsRejected(
_top->sendDown(infoCmd);
auto replies = fixture.awaitAndGetReplies(1);
auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::REJECTED,
- reply.getResult().getResult());
+ ASSERT_EQ(api::ReturnCode::REJECTED, reply.getResult().getResult());
}
-void
-BucketManagerTest::testRejectRequestWithMismatchingDistributionHash()
-{
+TEST_F(BucketManagerTest, reject_request_with_mismatching_distribution_hash) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
assertRequestWithBadHashIsRejected(fixture);
}
-void
-BucketManagerTest::testDbNotIteratedWhenAllRequestsRejected()
-{
+TEST_F(BucketManagerTest, db_not_iterated_when_all_requests_rejected) {
ConcurrentOperationFixture fixture(*this);
document::BucketId bucket(17, 0);
fixture.setUp(WithBuckets().add(bucket, api::BucketInfo(50, 100, 200)));
@@ -1405,7 +1191,7 @@ BucketManagerTest::testDbNotIteratedWhenAllRequestsRejected()
}
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-void BucketManagerTest::fall_back_to_legacy_global_distribution_hash_on_mismatch() {
+TEST_F(BucketManagerTest, fall_back_to_legacy_global_distribution_hash_on_mismatch) {
ConcurrentOperationFixture f(*this);
f.set_grouped_distribution_configs();
@@ -1416,7 +1202,7 @@ void BucketManagerTest::fall_back_to_legacy_global_distribution_hash_on_mismatch
_top->sendDown(infoCmd);
auto replies = f.awaitAndGetReplies(1);
auto& reply = dynamic_cast<api::RequestBucketInfoReply&>(*replies[0]);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult()); // _not_ REJECTED
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult()); // _not_ REJECTED
}
} // storage
diff --git a/storage/src/tests/bucketdb/initializertest.cpp b/storage/src/tests/bucketdb/initializertest.cpp
index 2141dbf4b53..57bb3a865d5 100644
--- a/storage/src/tests/bucketdb/initializertest.cpp
+++ b/storage/src/tests/bucketdb/initializertest.cpp
@@ -2,33 +2,31 @@
/**
* Tests storage initialization without depending on persistence layer.
*/
-#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
-
#include <vespa/document/base/testdocman.h>
-
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/bucketdb/lockablemap.hpp>
+#include <vespa/storage/bucketdb/storagebucketdbinitializer.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/state.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
-#include <tests/common/testhelper.h>
-#include <vespa/vdstestlib/cppunit/dirconfig.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <vespa/storage/bucketdb/lockablemap.hpp>
-#include <vespa/vdstestlib/cppunit/dirconfig.hpp>
-#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <tests/common/testhelper.h> // TODO decouple from CppUnit
+#include <vespa/vdstestlib/cppunit/dirconfig.hpp> // TODO decouple from CppUnit
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP(".test.bucketdb.initializing");
using document::FixedBucketSpaces;
+using namespace ::testing;
namespace storage {
typedef uint16_t PartitionId;
-struct InitializerTest : public CppUnit::TestFixture {
+struct InitializerTest : public Test {
class InitParams {
vdstestlib::DirConfig config;
@@ -59,14 +57,8 @@ struct InitializerTest : public CppUnit::TestFixture {
bucketWrongDisk(false),
bucketMultipleDisks(false),
failingListRequest(false),
- failingInfoRequest(false) {}
-
- void setAllFailures() {
- bucketWrongDisk = true;
- bucketMultipleDisks = true;
- failingListRequest = true;
- failingInfoRequest = true;
- }
+ failingInfoRequest(false)
+ {}
vdstestlib::DirConfig& getConfig() {
if (!configFinalized) {
@@ -83,104 +75,44 @@ struct InitializerTest : public CppUnit::TestFixture {
document::TestDocMan _docMan;
- void testInitialization(InitParams& params);
+ void do_test_initialization(InitParams& params);
+};
- /**
- * Test that the status page can be shown during init without a deadlock
- * or crash or anything. Don't validate much output, it might change.
- */
- void testStatusPage();
+TEST_F(InitializerTest, init_with_empty_node) {
+ InitParams params;
+ params.docsPerDisk = 0;
+ do_test_initialization(params);
+}
- /** Test initializing with an empty node. */
- void testInitEmptyNode() {
- InitParams params;
- params.docsPerDisk = 0;
- testInitialization(params);
- }
- /** Test initializing with some data on single disk. */
- void testInitSingleDisk() {
- InitParams params;
- params.diskCount = DiskCount(1);
- testInitialization(params);
- }
- /** Test initializing with multiple disks. */
- void testInitMultiDisk() {
- InitParams params;
- testInitialization(params);
- }
- /** Test initializing with one of the disks being bad. */
- void testInitFailingMiddleDisk() {
- InitParams params;
- params.disksDown.insert(1);
- testInitialization(params);
- }
- /** Test initializing with last disk being bad. */
- void testInitFailingLastDisk() {
- InitParams params;
- params.disksDown.insert(params.diskCount - 1);
- testInitialization(params);
- }
- /** Test initializing with bucket on wrong disk. */
- void testInitBucketOnWrongDisk() {
- InitParams params;
- params.bucketWrongDisk = true;
- params.bucketBitsUsed = 58;
- testInitialization(params);
- }
- /** Test initializing with bucket on multiple disks. */
- void testInitBucketOnMultipleDisks() {
- InitParams params;
- params.bucketMultipleDisks = true;
- params.bucketBitsUsed = 58;
- testInitialization(params);
- }
- /** Test initializing with failing list request. */
- void testInitFailingListRequest() {
- InitParams params;
- params.failingListRequest = true;
- testInitialization(params);
- }
- void testInitFailingInfoRequest() {
- InitParams params;
- params.failingInfoRequest = true;
- testInitialization(params);
- }
- /** Test initializing with everything being wrong at once. */
- void testAllFailures() {
- InitParams params;
- params.docsPerDisk = 100;
- params.diskCount = DiskCount(10);
- params.disksDown.insert(0);
- params.disksDown.insert(2);
- params.disksDown.insert(3);
- params.disksDown.insert(9);
- params.setAllFailures();
- testInitialization(params);
- }
- void testCommandBlockingDuringInit();
-
- void testBucketProgressCalculator();
-
- void testBucketsInitializedByLoad();
-
- CPPUNIT_TEST_SUITE(InitializerTest);
- CPPUNIT_TEST(testInitEmptyNode);
- CPPUNIT_TEST(testInitSingleDisk);
- CPPUNIT_TEST(testInitMultiDisk);
- CPPUNIT_TEST(testInitFailingMiddleDisk);
- CPPUNIT_TEST(testInitFailingLastDisk);
- CPPUNIT_TEST(testInitBucketOnWrongDisk);
- //CPPUNIT_TEST(testInitBucketOnMultipleDisks);
- //CPPUNIT_TEST(testStatusPage);
- //CPPUNIT_TEST(testCommandBlockingDuringInit);
- //CPPUNIT_TEST(testAllFailures);
- CPPUNIT_TEST(testBucketProgressCalculator);
- CPPUNIT_TEST(testBucketsInitializedByLoad);
- CPPUNIT_TEST_SUITE_END();
+TEST_F(InitializerTest, init_with_data_on_single_disk) {
+ InitParams params;
+ params.diskCount = DiskCount(1);
+ do_test_initialization(params);
+}
-};
+TEST_F(InitializerTest, init_with_multiple_disks) {
+ InitParams params;
+ do_test_initialization(params);
+}
+
+TEST_F(InitializerTest, init_with_bad_non_last_disk) {
+ InitParams params;
+ params.disksDown.insert(1);
+ do_test_initialization(params);
+}
+
+TEST_F(InitializerTest, init_with_bad_last_disk) {
+ InitParams params;
+ params.disksDown.insert(params.diskCount - 1);
+ do_test_initialization(params);
+}
-CPPUNIT_TEST_SUITE_REGISTRATION(InitializerTest);
+TEST_F(InitializerTest, init_with_bucket_on_wrong_disk) {
+ InitParams params;
+ params.bucketWrongDisk = true;
+ params.bucketBitsUsed = 58;
+ do_test_initialization(params);
+}
namespace {
// Data kept on buckets we're using in test.
@@ -202,7 +134,7 @@ struct BucketData {
return copy;
}
};
-// Data reciding on one disk
+// Data residing on one disk
typedef std::map<document::BucketId, BucketData> DiskData;
struct BucketInfoLogger {
std::map<PartitionId, DiskData>& map;
@@ -215,11 +147,8 @@ struct BucketInfoLogger {
{
document::BucketId bucket(
document::BucketId::keyToBucketId(revBucket));
- CPPUNIT_ASSERT(bucket.getRawId() != 0);
- CPPUNIT_ASSERT_MSG(
- "Found invalid bucket in database: " + bucket.toString()
- + " " + entry.getBucketInfo().toString(),
- entry.getBucketInfo().valid());
+ assert(bucket.getRawId() != 0);
+ assert(entry.getBucketInfo().valid());
DiskData& ddata(map[entry.disk]);
BucketData& bdata(ddata[bucket]);
bdata.info = entry.getBucketInfo();
@@ -277,10 +206,10 @@ buildBucketInfo(const document::TestDocMan& docMan,
while (params.disksDown.find(partition) != params.disksDown.end()) {
partition = (partition + 1) % params.diskCount;;
}
- LOG(info, "Putting bucket %s on wrong disk %u instead of %u",
+ LOG(debug, "Putting bucket %s on wrong disk %u instead of %u",
bid.toString().c_str(), partition, correctPart);
}
- LOG(info, "Putting bucket %s on disk %u",
+ LOG(debug, "Putting bucket %s on disk %u",
bid.toString().c_str(), partition);
BucketData& data(result[partition][bid]);
data.info.setDocumentCount(data.info.getDocumentCount() + 1);
@@ -299,84 +228,65 @@ void verifyEqual(std::map<PartitionId, DiskData>& org,
while (part1 != org.end() && part2 != existing.end()) {
if (part1->first < part2->first) {
if (!part1->second.empty()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first << " found.";
}
++part1;
} else if (part1->first > part2->first) {
if (!part2->second.empty()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " which should not exist.";
}
++part2;
} else {
- DiskData::const_iterator bucket1(part1->second.begin());
- DiskData::const_iterator bucket2(part2->second.begin());
+ auto bucket1 = part1->second.begin();
+ auto bucket2 = part2->second.begin();
while (bucket1 != part1->second.end()
&& bucket2 != part2->second.end())
{
if (bucket1->first < bucket2->first) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first
- << " for bucket " << bucket1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
} else if (bucket1->first.getId() > bucket2->first.getId())
{
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " for bucket " << bucket2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
} else if (!(bucket1->second.info == bucket2->second.info)) {
- std::ostringstream ost;
- ost << "Bucket " << bucket1->first << " on partition "
- << part1->first << " has bucket info "
- << bucket2->second.info << " and not "
- << bucket1->second.info << " as expected.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Bucket " << bucket1->first << " on partition "
+ << part1->first << " has bucket info "
+ << bucket2->second.info << " and not "
+ << bucket1->second.info << " as expected.";
}
++bucket1;
++bucket2;
++equalCount;
}
if (bucket1 != part1->second.end()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first
- << " for bucket " << bucket1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first
+ << " for bucket " << bucket1->first << " found.";
}
if (bucket2 != part2->second.end()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " for bucket " << bucket2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " for bucket " << bucket2->first
+ << " which should not exist.";
}
++part1;
++part2;
}
}
if (part1 != org.end() && !part1->second.empty()) {
- std::ostringstream ost;
- ost << "No data in partition " << part1->first << " found.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "No data in partition " << part1->first << " found.";
}
if (part2 != existing.end() && !part2->second.empty()) {
- std::ostringstream ost;
- ost << "Found data in partition " << part2->first
- << " which should not exist.";
- CPPUNIT_FAIL(ost.str());
+ FAIL() << "Found data in partition " << part2->first
+ << " which should not exist.";
}
- //std::cerr << "\n " << equalCount << " buckets were matched. ";
}
struct MessageCallback
{
public:
- virtual ~MessageCallback() {}
+ virtual ~MessageCallback() = default;
virtual void onMessage(const api::StorageMessage&) = 0;
};
@@ -413,7 +323,7 @@ struct FakePersistenceLayer : public StorageLink {
<< "it there.";
fatal(ost.str());
} else {
- DiskData::const_iterator it2(it->second.find(bucket));
+ auto it2 = it->second.find(bucket);
if (it2 == it->second.end()) {
std::ostringstream ost;
ost << "Have no data for " << bucket << " on disk " << partition
@@ -433,10 +343,9 @@ struct FakePersistenceLayer : public StorageLink {
messageCallback->onMessage(*msg);
}
if (msg->getType() == api::MessageType::INTERNAL) {
- api::InternalCommand& cmd(
- dynamic_cast<api::InternalCommand&>(*msg));
+ auto& cmd = dynamic_cast<api::InternalCommand&>(*msg);
if (cmd.getType() == ReadBucketList::ID) {
- ReadBucketList& rbl(dynamic_cast<ReadBucketList&>(cmd));
+ auto& rbl = dynamic_cast<ReadBucketList&>(cmd);
ReadBucketListReply::SP reply(new ReadBucketListReply(rbl));
std::map<PartitionId, DiskData>::const_iterator it(
data.find(rbl.getPartition()));
@@ -448,10 +357,8 @@ struct FakePersistenceLayer : public StorageLink {
fatal(ost.str());
} else {
if (cmd.getBucket().getBucketSpace() == FixedBucketSpaces::default_space()) {
- for (DiskData::const_iterator it2 = it->second.begin();
- it2 != it->second.end(); ++it2)
- {
- reply->getBuckets().push_back(it2->first);
+ for (const auto& bd : it->second) {
+ reply->getBuckets().push_back(bd.first);
}
}
}
@@ -461,7 +368,7 @@ struct FakePersistenceLayer : public StorageLink {
}
sendUp(reply);
} else if (cmd.getType() == ReadBucketInfo::ID) {
- ReadBucketInfo& rbi(dynamic_cast<ReadBucketInfo&>(cmd));
+ auto& rbi = dynamic_cast<ReadBucketInfo&>(cmd);
ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(rbi));
StorBucketDatabase::WrappedEntry entry(
bucketDatabase.get(rbi.getBucketId(), "fakelayer"));
@@ -483,8 +390,7 @@ struct FakePersistenceLayer : public StorageLink {
}
sendUp(reply);
} else if (cmd.getType() == InternalBucketJoinCommand::ID) {
- InternalBucketJoinCommand& ibj(
- dynamic_cast<InternalBucketJoinCommand&>(cmd));
+ auto& ibj = dynamic_cast<InternalBucketJoinCommand&>(cmd);
InternalBucketJoinReply::SP reply(
new InternalBucketJoinReply(ibj));
StorBucketDatabase::WrappedEntry entry(
@@ -521,20 +427,14 @@ struct FakePersistenceLayer : public StorageLink {
} // end of anonymous namespace
-#define CPPUNIT_ASSERT_METRIC_SET(x) \
- CPPUNIT_ASSERT(initializer->getMetrics().x.getValue() > 0);
-
void
-InitializerTest::testInitialization(InitParams& params)
+InitializerTest::do_test_initialization(InitParams& params)
{
std::map<PartitionId, DiskData> data(buildBucketInfo(_docMan, params));
spi::PartitionStateList partitions(params.diskCount);
- for (std::set<uint32_t>::const_iterator it = params.disksDown.begin();
- it != params.disksDown.end(); ++it)
- {
- partitions[*it] = spi::PartitionState(
- spi::PartitionState::DOWN, "Set down in test");
+ for (const auto& p : params.disksDown) {
+ partitions[p] = spi::PartitionState(spi::PartitionState::DOWN, "Set down in test");
}
TestServiceLayerApp node(params.diskCount, params.nodeIndex,
params.getConfig().getConfigId());
@@ -549,233 +449,32 @@ InitializerTest::testInitialization(InitParams& params)
top.push_back(StorageLink::UP(bottom = new FakePersistenceLayer(
data, node.getStorageBucketDatabase())));
- LOG(info, "STARTING INITIALIZATION");
+ LOG(debug, "STARTING INITIALIZATION");
top.open();
- /*
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
- if (params.bucketWrongDisk) updater.moveBucketWrongDisk();
- if (params.bucketMultipleDisks) updater.copyBucketWrongDisk();
- if (params.failingListRequest) {
- updater.removeDirPermission(6, 'r');
- updater.removeBucketsFromDBAtPath(6);
- }
- if (params.failingInfoRequest) {
- updater.removeFilePermission();
- orgBucketDatabase.erase(updater.getBucket(8));
- }
- */
-
node.waitUntilInitialized(initializer);
std::map<PartitionId, DiskData> initedBucketDatabase(
createMapFromBucketDatabase(node.getStorageBucketDatabase()));
verifyEqual(data, initedBucketDatabase);
- /*
- if (params.bucketWrongDisk) {
- CPPUNIT_ASSERT_METRIC_SET(_wrongDisk);
- }
- if (params.bucketMultipleDisks) {
- CPPUNIT_ASSERT_METRIC_SET(_joinedCount);
- }
- */
-}
-
-/*
-namespace {
- enum State { LISTING, INFO, DONE };
- void verifyStatusContent(StorageBucketDBInitializer& initializer,
- State state)
- {
- std::ostringstream ost;
- initializer.reportStatus(ost, framework::HttpUrlPath(""));
- std::string status = ost.str();
-
- if (state == LISTING) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: false", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
- } else if (state == INFO) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: false", status);
- } else if (state == DONE) {
- CPPUNIT_ASSERT_CONTAIN("List phase completed: true", status);
- CPPUNIT_ASSERT_CONTAIN("Initialization completed: true", status);
- }
- }
-}
-
-void
-InitializerTest::testStatusPage()
-{
- // Set up surrounding system to create a single bucket for us to
- // do init on.
- vdstestlib::DirConfig config(getStandardConfig(true));
- uint16_t nodeIndex(
- config.getConfig("stor-server").getValue("node_index", 0));
- InitParams params;
- params.docsPerDisk = 1;
- params.diskCount = 1;
- std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
- buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
-
- // Set up the initializer.
- DummyStorageServer server(config.getConfigId());
- DummyStorageLink top;
- DummyStorageLink *bottom;
- StorageBucketDBInitializer* initializer;
- top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
- config.getConfigId(), server)));
- top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
-
- // Grab bucket database lock for bucket to init to lock the initializer
- // in the init stage
- StorBucketDatabase::WrappedEntry entry(
- server.getStorageBucketDatabase().get(
- updater.getBucket(0), "testCommandBlocking",
- StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
- // Start the initializer
- top.open();
- bottom->waitForMessages(1, 30);
- verifyStatusContent(*initializer, LISTING);
- // Attempt to send put. Should be blocked
- // Attempt to send request bucket info. Should be blocked.
- // Attempt to send getNodeState. Should not be blocked.
-
- // Unlock bucket in bucket database so listing step can complete.
- // Await read info request being sent down.
- entry.unlock();
- bottom->waitForMessages(1, 30);
- verifyStatusContent(*initializer, INFO);
-
- ReadBucketInfo& cmd(dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0)));
- ReadBucketInfoReply::SP reply(new ReadBucketInfoReply(cmd));
- bottom->sendUp(reply);
-
- node.waitUntilInitialized(initializer);
- verifyStatusContent(*initializer, DONE);
-
}
-#define ASSERT_BLOCKED(top, bottom, blocks) \
- if (blocks) { \
- top.waitForMessages(1, 30); \
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getReplies().size()); \
- CPPUNIT_ASSERT_EQUAL(size_t(0), bottom.getCommands().size()); \
- api::StorageReply& reply(dynamic_cast<api::StorageReply&>( \
- *top.getReply(0))); \
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, \
- reply.getResult().getResult()); \
- top.reset(); \
- } else { \
- bottom.waitForMessages(1, 30); \
- CPPUNIT_ASSERT_EQUAL(size_t(0), top.getReplies().size()); \
- CPPUNIT_ASSERT_EQUAL(size_t(1), bottom.getCommands().size()); \
- api::StorageCommand& command(dynamic_cast<api::StorageCommand&>( \
- *bottom.getCommand(0))); \
- (void) command; \
- bottom.reset(); \
- }
-
-namespace {
- void verifyBlockingOn(DummyStorageLink& top,
- DummyStorageLink& bottom,
- bool blockEnabled)
- {
- // Attempt to send get. Should be blocked if block enabled
- {
- api::GetCommand::SP cmd(new api::GetCommand(
- document::BucketId(16, 4),
- document::DocumentId("userdoc:ns:4:test"), true));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, blockEnabled);
- }
- // Attempt to send request bucket info. Should be blocked if enabled.
- {
- api::RequestBucketInfoCommand::SP cmd(
- new api::RequestBucketInfoCommand(
- 0, lib::ClusterState("")));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, blockEnabled);
- }
- // Attempt to send getNodeState. Should not be blocked.
- {
- api::GetNodeStateCommand::SP cmd(new api::GetNodeStateCommand(
- lib::NodeState::UP(0)));
- top.sendDown(cmd);
- ASSERT_BLOCKED(top, bottom, false);
- }
- }
-}
-
-void
-InitializerTest::testCommandBlockingDuringInit()
-{
- // Set up surrounding system to create a single bucket for us to
- // do init on.
- vdstestlib::DirConfig config(getStandardConfig(true));
- uint16_t nodeIndex(
- config.getConfig("stor-server").getValue("node_index", 0));
- InitParams params;
- params.docsPerDisk = 1;
- params.diskCount = 1;
- std::map<document::BucketId, api::BucketInfo> orgBucketDatabase(
- buildBucketInfo(_docMan, config, nodeIndex, 1, 1, params.disksDown));
- FileChanger updater(config, nodeIndex, params, orgBucketDatabase);
-
- // Set up the initializer.
- DummyStorageServer server(config.getConfigId());
- DummyStorageLink top;
- DummyStorageLink *bottom;
- StorageBucketDBInitializer* initializer;
- top.push_back(StorageLink::UP(initializer = new StorageBucketDBInitializer(
- config.getConfigId(), server)));
- top.push_back(StorageLink::UP(bottom = new DummyStorageLink));
-
- // Grab bucket database lock for bucket to init to lock the initializer
- // in the init stage
- StorBucketDatabase::WrappedEntry entry(
- server.getStorageBucketDatabase().get(
- updater.getBucket(0), "testCommandBlocking",
- StorBucketDatabase::LOCK_IF_NONEXISTING_AND_NOT_CREATING));
- // Start the initializer
- top.open();
- verifyBlockingOn(top, *bottom, true);
- // Attempt to send put. Should be blocked
- // Attempt to send request bucket info. Should be blocked.
- // Attempt to send getNodeState. Should not be blocked.
-
- // Unlock bucket in bucket database so listing step can complete.
- // Await read info request being sent down.
- entry.unlock();
- bottom->waitForMessages(1, 30);
- dynamic_cast<ReadBucketInfo&>(*bottom->getCommand(0));
- CPPUNIT_ASSERT(!server.isInitialized());
- bottom->reset();
-
- // Retry - Should now not block
- verifyBlockingOn(top, *bottom, false);
-}
-*/
-
-void
-InitializerTest::testBucketProgressCalculator()
-{
+TEST_F(InitializerTest, bucket_progress_calculator) {
using document::BucketId;
StorageBucketDBInitializer::BucketProgressCalculator calc;
// We consider the given bucket as not being completed, so progress
// will be _up to_, not _including_ the bucket. This means we can never
// reach 1.0, so progress completion must be handled by other logic!
- CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(1, 0)));
- CPPUNIT_ASSERT_EQUAL(0.0, calc.calculateProgress(BucketId(32, 0)));
+ EXPECT_DOUBLE_EQ(0.0, calc.calculateProgress(BucketId(1, 0)));
+ EXPECT_DOUBLE_EQ(0.0, calc.calculateProgress(BucketId(32, 0)));
- CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(1, 1)));
+ EXPECT_DOUBLE_EQ(0.5, calc.calculateProgress(BucketId(1, 1)));
- CPPUNIT_ASSERT_EQUAL(0.25, calc.calculateProgress(BucketId(2, 2)));
- CPPUNIT_ASSERT_EQUAL(0.5, calc.calculateProgress(BucketId(2, 1)));
- CPPUNIT_ASSERT_EQUAL(0.75, calc.calculateProgress(BucketId(2, 3)));
+ EXPECT_DOUBLE_EQ(0.25, calc.calculateProgress(BucketId(2, 2)));
+ EXPECT_DOUBLE_EQ(0.5, calc.calculateProgress(BucketId(2, 1)));
+ EXPECT_DOUBLE_EQ(0.75, calc.calculateProgress(BucketId(2, 3)));
- CPPUNIT_ASSERT_EQUAL(0.875, calc.calculateProgress(BucketId(3, 7)));
+ EXPECT_DOUBLE_EQ(0.875, calc.calculateProgress(BucketId(3, 7)));
}
struct DatabaseInsertCallback : MessageCallback
@@ -809,7 +508,6 @@ struct DatabaseInsertCallback : MessageCallback
_app.getStateUpdater().getReportedNodeState());
double progress(reportedState->getInitProgress().getValue());
LOG(debug, "reported progress is now %g", progress);
- // CppUnit exceptions are swallowed...
if (progress >= 1.0) {
_errors << "progress exceeded 1.0: " << progress << "\n";
}
@@ -835,8 +533,7 @@ struct DatabaseInsertCallback : MessageCallback
}
if (msg.getType() == api::MessageType::INTERNAL) {
- const api::InternalCommand& cmd(
- dynamic_cast<const api::InternalCommand&>(msg));
+ auto& cmd = dynamic_cast<const api::InternalCommand&>(msg);
if (cmd.getType() == ReadBucketInfo::ID) {
if (cmd.getPriority() != _expectedReadBucketPriority) {
_errors << "expected ReadBucketInfo priority of "
@@ -871,9 +568,7 @@ struct DatabaseInsertCallback : MessageCallback
}
};
-void
-InitializerTest::testBucketsInitializedByLoad()
-{
+TEST_F(InitializerTest, buckets_initialized_by_load) {
InitParams params;
params.docsPerDisk = 100;
params.diskCount = DiskCount(1);
@@ -911,8 +606,8 @@ InitializerTest::testBucketsInitializedByLoad()
// has been set.
top.close();
- CPPUNIT_ASSERT(callback._invoked);
- CPPUNIT_ASSERT_EQUAL(std::string(), callback._errors.str());
+ ASSERT_TRUE(callback._invoked);
+ EXPECT_EQ(std::string(), callback._errors.str());
std::map<PartitionId, DiskData> initedBucketDatabase(
createMapFromBucketDatabase(node.getStorageBucketDatabase()));
@@ -922,11 +617,10 @@ InitializerTest::testBucketsInitializedByLoad()
node.getStateUpdater().getReportedNodeState());
double progress(reportedState->getInitProgress().getValue());
- CPPUNIT_ASSERT(progress >= 1.0);
- CPPUNIT_ASSERT(progress < 1.0001);
+ EXPECT_GE(progress, 1.0);
+ EXPECT_LT(progress, 1.0001);
- CPPUNIT_ASSERT_EQUAL(params.bucketBitsUsed,
- reportedState->getMinUsedBits());
+ EXPECT_EQ(params.bucketBitsUsed, reportedState->getMinUsedBits());
}
} // storage
diff --git a/storage/src/tests/bucketdb/judyarraytest.cpp b/storage/src/tests/bucketdb/judyarraytest.cpp
index 07992450fbf..94d61107fcf 100644
--- a/storage/src/tests/bucketdb/judyarraytest.cpp
+++ b/storage/src/tests/bucketdb/judyarraytest.cpp
@@ -1,36 +1,20 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/bucketdb/judyarray.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <boost/assign.hpp>
#include <boost/random.hpp>
-#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
#include <map>
#include <vector>
-namespace storage {
-
-struct JudyArrayTest : public CppUnit::TestFixture {
- void testIterating();
- void testDualArrayFunctions();
- void testComparing();
- void testSize();
- void testStress();
+using namespace ::testing;
- CPPUNIT_TEST_SUITE(JudyArrayTest);
- CPPUNIT_TEST(testIterating);
- CPPUNIT_TEST(testDualArrayFunctions);
- CPPUNIT_TEST(testSize);
- CPPUNIT_TEST(testStress);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(JudyArrayTest);
+namespace storage {
namespace {
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> >
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type>>
getJudyArrayContents(const JudyArray& array) {
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > vals;
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type>> vals;
for (JudyArray::const_iterator it = array.begin();
it != array.end(); ++it)
{
@@ -40,168 +24,129 @@ namespace {
}
}
-void
-JudyArrayTest::testIterating()
-{
+TEST(JudyArrayTest, iterating) {
JudyArray array;
- // Test that things are sane for empty document
- CPPUNIT_ASSERT_EQUAL(array.begin(), array.end());
- // Add some values
- using namespace boost::assign;
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values
- = map_list_of(3,2)(5,12)(15,8)(13,10)(7,6)(9,4);
+ // Test that things are sane for empty document
+ ASSERT_EQ(array.begin(), array.end());
+ // Add some values
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type>> values({
+ {3, 2}, {5, 12}, {15, 8}, {13, 10}, {7, 6}, {9, 4}
+ });
for (uint32_t i=0; i<values.size(); ++i) {
array.insert(values[i].first, values[i].second);
}
- // Create expected result
+ // Create expected result
std::sort(values.begin(), values.end());
- // Test that we can iterate through const iterator
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> >
- foundVals = getJudyArrayContents(array);
- CPPUNIT_ASSERT_EQUAL(values, foundVals);
+ // Test that we can iterate through const iterator
+ auto foundVals = getJudyArrayContents(array);
+ ASSERT_EQ(values, foundVals);
{ // Test that we can alter through non-const iterator
JudyArray::iterator it = array.begin();
++it;
++it;
it.setValue(20);
- CPPUNIT_ASSERT_EQUAL((JudyArray::key_type) 7, it.key());
- CPPUNIT_ASSERT_EQUAL((JudyArray::data_type) 20, array[7]);
+ ASSERT_EQ((JudyArray::key_type) 7, it.key());
+ ASSERT_EQ((JudyArray::data_type) 20, array[7]);
it.remove();
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 5,
- getJudyArrayContents(array).size());
- CPPUNIT_ASSERT_EQUAL(array.end(), array.find(7));
+ ASSERT_EQ((JudyArray::size_type) 5, getJudyArrayContents(array).size());
+ ASSERT_EQ(array.end(), array.find(7));
values.erase(values.begin() + 2);
- CPPUNIT_ASSERT_EQUAL(values, getJudyArrayContents(array));
- // And that we can continue iterating after removing.
+ ASSERT_EQ(values, getJudyArrayContents(array));
+ // And that we can continue iterating after removing.
++it;
- CPPUNIT_ASSERT_EQUAL((JudyArray::key_type) 9, it.key());
- CPPUNIT_ASSERT_EQUAL((JudyArray::data_type) 4, array[9]);
+ ASSERT_EQ((JudyArray::key_type) 9, it.key());
+ ASSERT_EQ((JudyArray::data_type) 4, array[9]);
}
{ // Test printing of iterators
JudyArray::ConstIterator cit = array.begin();
- CPPUNIT_ASSERT_MATCH_REGEX(
- "^ConstIterator\\(Key: 3, Valp: 0x[0-9a-f]{1,16}, Val: 2\\)$",
- cit.toString());
+ EXPECT_THAT(cit.toString(), MatchesRegex("^ConstIterator\\(Key: 3, Valp: 0x[0-9a-f]{1,16}, Val: 2\\)$"));
JudyArray::Iterator it = array.end();
- CPPUNIT_ASSERT_MATCH_REGEX(
- "^Iterator\\(Key: 0, Valp: 0\\)$",
- it.toString());
+ EXPECT_THAT(it.toString(), MatchesRegex("^Iterator\\(Key: 0, Valp: 0\\)$"));
}
}
-void
-JudyArrayTest::testDualArrayFunctions()
-{
+TEST(JudyArrayTest, dual_array_functions) {
JudyArray array1;
JudyArray array2;
- // Add values to array1
- using namespace boost::assign;
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values1
- = map_list_of(3,2)(5,12)(15,8)(13,10)(7,6)(9,4);
+ // Add values to array1
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type>> values1({
+ {3, 2}, {5, 12}, {15, 8}, {13, 10}, {7, 6}, {9, 4}
+ });
for (uint32_t i=0; i<values1.size(); ++i) {
array1.insert(values1[i].first, values1[i].second);
}
- // Add values to array2
- std::vector<std::pair<JudyArray::key_type, JudyArray::data_type> > values2
- = map_list_of(4,5)(9,40);
+ // Add values to array2
+ std::vector<std::pair<JudyArray::key_type, JudyArray::data_type>> values2({
+ {4, 5}, {9, 40}
+ });
for (uint32_t i=0; i<values2.size(); ++i) {
array2.insert(values2[i].first, values2[i].second);
}
- // Create expected result
+ // Create expected result
std::sort(values1.begin(), values1.end());
std::sort(values2.begin(), values2.end());
- CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array1));
- CPPUNIT_ASSERT_EQUAL(values2, getJudyArrayContents(array2));
- CPPUNIT_ASSERT(array2 < array1);
- CPPUNIT_ASSERT(array1 != array2);
+ EXPECT_EQ(values1, getJudyArrayContents(array1));
+ EXPECT_EQ(values2, getJudyArrayContents(array2));
+ EXPECT_LT(array2, array1);
+ EXPECT_NE(array1, array2);
array1.swap(array2);
- CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array2));
- CPPUNIT_ASSERT_EQUAL(values2, getJudyArrayContents(array1));
- CPPUNIT_ASSERT(array1 < array2);
- CPPUNIT_ASSERT(array1 != array2);
+ EXPECT_EQ(values1, getJudyArrayContents(array2));
+ EXPECT_EQ(values2, getJudyArrayContents(array1));
+ EXPECT_LT(array1, array2);
+ EXPECT_NE(array1, array2);
- // Test some operators
+ // Test some operators
JudyArray array3;
for (uint32_t i=0; i<values1.size(); ++i) {
array3.insert(values1[i].first, values1[i].second);
}
- CPPUNIT_ASSERT(array1 != array3);
- CPPUNIT_ASSERT_EQUAL(array2, array3);
- CPPUNIT_ASSERT(!(array2 < array3));
+ EXPECT_NE(array1, array3);
+ EXPECT_EQ(array2, array3);
+ EXPECT_FALSE(array2 < array3);
}
-void
-JudyArrayTest::testSize()
-{
+TEST(JudyArrayTest, size) {
JudyArray array;
- CPPUNIT_ASSERT_EQUAL(array.begin(), array.end());
- CPPUNIT_ASSERT(array.empty());
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 0, array.size());
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 0, array.getMemoryUsage());
+ EXPECT_EQ(array.begin(), array.end());
+ EXPECT_TRUE(array.empty());
+ EXPECT_EQ((JudyArray::size_type) 0, array.size());
+ EXPECT_EQ((JudyArray::size_type) 0, array.getMemoryUsage());
- // Test each method one can insert stuff into array
+ // Test each method one can insert stuff into array
array.insert(4, 3);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
array.insert(4, 7);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
- if (sizeof(JudyArray::size_type) == 4) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 12, array.getMemoryUsage());
- } else if (sizeof(JudyArray::size_type) == 8) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 24, array.getMemoryUsage());
- } else CPPUNIT_FAIL("Unknown size of type");
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ((JudyArray::size_type) 24, array.getMemoryUsage());
array[6] = 8;
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
array[6] = 10;
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
- if (sizeof(JudyArray::size_type) == 4) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 20, array.getMemoryUsage());
- } else if (sizeof(JudyArray::size_type) == 8) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 40, array.getMemoryUsage());
- } else CPPUNIT_FAIL("Unknown size of type");
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ((JudyArray::size_type) 40, array.getMemoryUsage());
bool preExisted;
array.find(8, true, preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ(false, preExisted);
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
array.find(8, true, preExisted);
- CPPUNIT_ASSERT_EQUAL(true, preExisted);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 3, array.size());
- if (sizeof(JudyArray::size_type) == 4) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 28, array.getMemoryUsage());
- } else if (sizeof(JudyArray::size_type) == 8) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 56, array.getMemoryUsage());
- } else CPPUNIT_FAIL("Unknown size of type");
+ EXPECT_EQ(true, preExisted);
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ((JudyArray::size_type) 3, array.size());
+ EXPECT_EQ((JudyArray::size_type) 56, array.getMemoryUsage());
- // Test each method one can remove stuff in array with
+ // Test each method one can remove stuff in array with
array.erase(8);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
array.erase(8);
- CPPUNIT_ASSERT_EQUAL(getJudyArrayContents(array).size(), array.size());
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 2, array.size());
- if (sizeof(JudyArray::size_type) == 4) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 20, array.getMemoryUsage());
- } else if (sizeof(JudyArray::size_type) == 8) {
- CPPUNIT_ASSERT_EQUAL((JudyArray::size_type) 40, array.getMemoryUsage());
- } else CPPUNIT_FAIL("Unknown size of type");
-}
-
-namespace {
- template<typename T>
- std::string toString(const T& m) {
- std::cerr << "#";
- std::ostringstream ost;
- ost << m;
- return ost.str();
- }
+ EXPECT_EQ(getJudyArrayContents(array).size(), array.size());
+ EXPECT_EQ((JudyArray::size_type) 2, array.size());
+ EXPECT_EQ((JudyArray::size_type) 40, array.getMemoryUsage());
}
-void
-JudyArrayTest::testStress()
-{
+TEST(JudyArrayTest, stress) {
// Do a lot of random stuff to both judy array and std::map. Ensure equal
// behaviour
@@ -219,9 +164,6 @@ JudyArrayTest::testStress()
JudyArray::key_type value(rnd());
judyArray.insert(key, value);
stdMap[key] = value;
- //std::pair<StdMap::iterator, bool> result
- // = stdMap.insert(std::make_pair(key, value));
- //if (!result.second) result.first->second = value;
} else if (optype < 50) { // operator[]
JudyArray::key_type key(rnd() % 500);
JudyArray::key_type value(rnd());
@@ -229,42 +171,30 @@ JudyArrayTest::testStress()
stdMap[key] = value;
} else if (optype < 70) { // erase()
JudyArray::key_type key(rnd() % 500);
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- stdMap.erase(key), judyArray.erase(key));
+ EXPECT_EQ(stdMap.erase(key), judyArray.erase(key));
} else if (optype < 75) { // size()
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- stdMap.size(), judyArray.size());
+ EXPECT_EQ(stdMap.size(), judyArray.size());
} else if (optype < 78) { // empty()
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- stdMap.empty(), judyArray.empty());
+ EXPECT_EQ(stdMap.empty(), judyArray.empty());
} else { // find()
JudyArray::key_type key(rnd() % 500);
- JudyArray::iterator it = judyArray.find(key);
- StdMap::iterator it2 = stdMap.find(key);
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- it2 == stdMap.end(), it == judyArray.end());
+ auto it = judyArray.find(key);
+ auto it2 = stdMap.find(key);
+ EXPECT_EQ(it2 == stdMap.end(), it == judyArray.end());
if (it != judyArray.end()) {
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- it.key(), it2->first);
- CPPUNIT_ASSERT_EQUAL_MSG(
- toString(judyArray) + toString(stdMap),
- it.value(), it2->second);
+ EXPECT_EQ(it.key(), it2->first);
+ EXPECT_EQ(it.value(), it2->second);
}
}
}
- // Ensure judy array contents is equal to std::map's at this point
+ // Ensure judy array contents is equal to std::map's at this point
StdMap tmpMap;
for (JudyArray::const_iterator it = judyArray.begin();
it != judyArray.end(); ++it)
{
tmpMap[it.key()] = it.value();
}
- CPPUNIT_ASSERT_EQUAL(stdMap, tmpMap);
+ EXPECT_EQ(stdMap, tmpMap);
}
}
diff --git a/storage/src/tests/bucketdb/judymultimaptest.cpp b/storage/src/tests/bucketdb/judymultimaptest.cpp
index 43b83b16dec..254dbb78b18 100644
--- a/storage/src/tests/bucketdb/judymultimaptest.cpp
+++ b/storage/src/tests/bucketdb/judymultimaptest.cpp
@@ -2,31 +2,15 @@
#include <vespa/storage/bucketdb/judymultimap.h>
#include <vespa/storage/bucketdb/judymultimap.hpp>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <boost/assign.hpp>
-#include <boost/random.hpp>
-#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <map>
#include <ostream>
#include <vector>
-#include <vespa/log/log.h>
-LOG_SETUP(".judy_multi_map_test");
+using namespace ::testing;
namespace storage {
-struct JudyMultiMapTest : public CppUnit::TestFixture {
- void testSimpleUsage();
- void testIterator();
-
- CPPUNIT_TEST_SUITE(JudyMultiMapTest);
- CPPUNIT_TEST(testSimpleUsage);
- CPPUNIT_TEST(testIterator);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(JudyMultiMapTest);
-
namespace {
struct B;
struct C;
@@ -84,48 +68,44 @@ namespace {
}
}
-void
-JudyMultiMapTest::testSimpleUsage() {
+TEST(JudyMultiMapTest, simple_usage) {
typedef JudyMultiMap<C, B, A> MultiMap;
MultiMap multiMap;
- // Do some insertions
+ // Do some insertions
bool preExisted;
- CPPUNIT_ASSERT(multiMap.empty());
+ EXPECT_TRUE(multiMap.empty());
multiMap.insert(16, A(1, 2, 3), preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ EXPECT_EQ(false, preExisted);
multiMap.insert(11, A(4, 6, 0), preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ EXPECT_EQ(false, preExisted);
multiMap.insert(14, A(42, 0, 0), preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
- CPPUNIT_ASSERT_EQUAL_MSG(multiMap.toString(),
- (MultiMap::size_type) 3, multiMap.size());
+ EXPECT_EQ(false, preExisted);
+ EXPECT_EQ((MultiMap::size_type) 3, multiMap.size()) << multiMap.toString();
multiMap.insert(11, A(4, 7, 0), preExisted);
- CPPUNIT_ASSERT_EQUAL(true, preExisted);
- CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 3, multiMap.size());
- CPPUNIT_ASSERT(!multiMap.empty());
-
- // Access some elements
- CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), multiMap[11]);
- CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), multiMap[16]);
- CPPUNIT_ASSERT_EQUAL(A(42,0, 0), multiMap[14]);
-
- // Do removes
- CPPUNIT_ASSERT(multiMap.erase(12) == 0);
- CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 3, multiMap.size());
-
- CPPUNIT_ASSERT(multiMap.erase(14) == 1);
- CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 2, multiMap.size());
-
- CPPUNIT_ASSERT(multiMap.erase(11) == 1);
- CPPUNIT_ASSERT(multiMap.erase(16) == 1);
- CPPUNIT_ASSERT_EQUAL((MultiMap::size_type) 0, multiMap.size());
- CPPUNIT_ASSERT(multiMap.empty());
+ EXPECT_EQ(true, preExisted);
+ EXPECT_EQ((MultiMap::size_type) 3, multiMap.size());
+ EXPECT_FALSE(multiMap.empty());
+
+ // Access some elements
+ EXPECT_EQ(A(4, 7, 0), multiMap[11]);
+ EXPECT_EQ(A(1, 2, 3), multiMap[16]);
+ EXPECT_EQ(A(42,0, 0), multiMap[14]);
+
+ // Do removes
+ EXPECT_EQ(multiMap.erase(12), 0);
+ EXPECT_EQ((MultiMap::size_type) 3, multiMap.size());
+
+ EXPECT_EQ(multiMap.erase(14), 1);
+ EXPECT_EQ((MultiMap::size_type) 2, multiMap.size());
+
+ EXPECT_EQ(multiMap.erase(11), 1);
+ EXPECT_EQ(multiMap.erase(16), 1);
+ EXPECT_EQ((MultiMap::size_type) 0, multiMap.size());
+ EXPECT_TRUE(multiMap.empty());
}
-void
-JudyMultiMapTest::testIterator()
-{
+TEST(JudyMultiMapTest, iterator) {
typedef JudyMultiMap<C, B, A> MultiMap;
MultiMap multiMap;
bool preExisted;
@@ -135,37 +115,37 @@ JudyMultiMapTest::testIterator()
multiMap.insert(14, A(42, 0, 0), preExisted);
MultiMap::Iterator iter = multiMap.begin();
- CPPUNIT_ASSERT_EQUAL((uint64_t)11, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+ EXPECT_EQ((uint64_t)11, (uint64_t)iter.key());
+ EXPECT_EQ(A(4, 6, 0), iter.value());
++iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ EXPECT_EQ((uint64_t)14, (uint64_t)iter.key());
+ EXPECT_EQ(A(42, 0, 0), iter.value());
++iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ EXPECT_EQ((uint64_t)16, (uint64_t)iter.key());
+ EXPECT_EQ(A(1, 2, 3), iter.value());
--iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ EXPECT_EQ((uint64_t)14, (uint64_t)iter.key());
+ EXPECT_EQ(A(42, 0, 0), iter.value());
++iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ EXPECT_EQ((uint64_t)16, (uint64_t)iter.key());
+ EXPECT_EQ(A(1, 2, 3), iter.value());
--iter;
--iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key());
- CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+ EXPECT_EQ((uint64_t)11,(uint64_t) iter.key());
+ EXPECT_EQ(A(4, 6, 0), iter.value());
++iter;
++iter;
++iter;
- CPPUNIT_ASSERT_EQUAL(multiMap.end(), iter);
+ EXPECT_EQ(multiMap.end(), iter);
--iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value());
+ EXPECT_EQ((uint64_t)16, (uint64_t)iter.key());
+ EXPECT_EQ(A(1, 2, 3), iter.value());
--iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key());
- CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value());
+ EXPECT_EQ((uint64_t)14, (uint64_t)iter.key());
+ EXPECT_EQ(A(42, 0, 0), iter.value());
--iter;
- CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key());
- CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value());
+ EXPECT_EQ((uint64_t)11,(uint64_t) iter.key());
+ EXPECT_EQ(A(4, 6, 0), iter.value());
}
} // storage
diff --git a/storage/src/tests/bucketdb/lockablemaptest.cpp b/storage/src/tests/bucketdb/lockablemaptest.cpp
index 10f806f2e97..a55e258129c 100644
--- a/storage/src/tests/bucketdb/lockablemaptest.cpp
+++ b/storage/src/tests/bucketdb/lockablemaptest.cpp
@@ -4,84 +4,17 @@
#include <vespa/storage/bucketdb/judymultimap.h>
#include <vespa/storage/bucketdb/judymultimap.hpp>
#include <vespa/storage/bucketdb/lockablemap.hpp>
-#include <vespa/vdstestlib/cppunit/macros.h>
-#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <boost/operators.hpp>
#include <vespa/log/log.h>
LOG_SETUP(".lockable_map_test");
-namespace storage {
+// FIXME these old tests may have the least obvious semantics and worst naming in the entire storage module
+
+using namespace ::testing;
-struct LockableMapTest : public CppUnit::TestFixture {
- void testSimpleUsage();
- void testComparison();
- void testIterating();
- void testChunkedIterationIsTransparentAcrossChunkSizes();
- void testCanAbortDuringChunkedIteration();
- void testThreadSafetyStress();
- void testFindBuckets();
- void testFindBuckets2();
- void testFindBuckets3();
- void testFindBuckets4();
- void testFindBuckets5();
- void testFindBucketsSimple();
- void testFindNoBuckets();
- void testFindAll();
- void testFindAll2();
- void testFindAllUnusedBitIsSet();
- void testFindAllInconsistentlySplit();
- void testFindAllInconsistentlySplit2();
- void testFindAllInconsistentlySplit3();
- void testFindAllInconsistentlySplit4();
- void testFindAllInconsistentlySplit5();
- void testFindAllInconsistentlySplit6();
- void testFindAllInconsistentBelow16Bits();
- void testCreate();
- void testCreate2();
- void testCreate3();
- void testCreate4();
- void testCreate5();
- void testCreate6();
- void testCreateEmpty();
- void testIsConsistent();
-
- CPPUNIT_TEST_SUITE(LockableMapTest);
- CPPUNIT_TEST(testSimpleUsage);
- CPPUNIT_TEST(testComparison);
- CPPUNIT_TEST(testIterating);
- CPPUNIT_TEST(testChunkedIterationIsTransparentAcrossChunkSizes);
- CPPUNIT_TEST(testCanAbortDuringChunkedIteration);
- CPPUNIT_TEST(testThreadSafetyStress);
- CPPUNIT_TEST(testFindBuckets);
- CPPUNIT_TEST(testFindBuckets2);
- CPPUNIT_TEST(testFindBuckets3);
- CPPUNIT_TEST(testFindBuckets4);
- CPPUNIT_TEST(testFindBuckets5);
- CPPUNIT_TEST(testFindBucketsSimple);
- CPPUNIT_TEST(testFindNoBuckets);
- CPPUNIT_TEST(testFindAll);
- CPPUNIT_TEST(testFindAll2);
- CPPUNIT_TEST(testFindAllUnusedBitIsSet);
- CPPUNIT_TEST(testFindAllInconsistentlySplit);
- CPPUNIT_TEST(testFindAllInconsistentlySplit2);
- CPPUNIT_TEST(testFindAllInconsistentlySplit3);
- CPPUNIT_TEST(testFindAllInconsistentlySplit4);
- CPPUNIT_TEST(testFindAllInconsistentlySplit5);
- CPPUNIT_TEST(testFindAllInconsistentlySplit6);
- CPPUNIT_TEST(testFindAllInconsistentBelow16Bits);
- CPPUNIT_TEST(testCreate);
- CPPUNIT_TEST(testCreate2);
- CPPUNIT_TEST(testCreate3);
- CPPUNIT_TEST(testCreate4);
- CPPUNIT_TEST(testCreate5);
- CPPUNIT_TEST(testCreate6);
- CPPUNIT_TEST(testCreateEmpty);
- CPPUNIT_TEST(testIsConsistent);
- CPPUNIT_TEST_SUITE_END();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(LockableMapTest);
+namespace storage {
namespace {
struct A : public boost::operators<A> {
@@ -112,84 +45,81 @@ namespace {
typedef LockableMap<JudyMultiMap<A> > Map;
}
-void
-LockableMapTest::testSimpleUsage() {
- // Tests insert, erase, size, empty, operator[]
+TEST(LockableMapTest, simple_usage) {
+ // Tests insert, erase, size, empty, operator[]
Map map;
- // Do some insertions
- CPPUNIT_ASSERT(map.empty());
+ // Do some insertions
+ EXPECT_TRUE(map.empty());
bool preExisted;
map.insert(16, A(1, 2, 3), "foo", preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ EXPECT_EQ(false, preExisted);
map.insert(11, A(4, 6, 0), "foo", preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
+ EXPECT_EQ(false, preExisted);
map.insert(14, A(42, 0, 0), "foo", preExisted);
- CPPUNIT_ASSERT_EQUAL(false, preExisted);
- CPPUNIT_ASSERT_EQUAL_MSG(map.toString(),
- (Map::size_type) 3, map.size());
+ EXPECT_EQ(false, preExisted);
+ EXPECT_EQ((Map::size_type) 3, map.size()) << map.toString();
map.insert(11, A(4, 7, 0), "foo", preExisted);
- CPPUNIT_ASSERT_EQUAL(true, preExisted);
- CPPUNIT_ASSERT_EQUAL((Map::size_type) 3, map.size());
- CPPUNIT_ASSERT(!map.empty());
-
- // Access some elements
- CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), *map.get(16, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(42,0, 0), *map.get(14, "foo"));
-
- // Do removes
- CPPUNIT_ASSERT(map.erase(12, "foo") == 0);
- CPPUNIT_ASSERT_EQUAL((Map::size_type) 3, map.size());
-
- CPPUNIT_ASSERT(map.erase(14, "foo") == 1);
- CPPUNIT_ASSERT_EQUAL((Map::size_type) 2, map.size());
-
- CPPUNIT_ASSERT(map.erase(11, "foo") == 1);
- CPPUNIT_ASSERT(map.erase(16, "foo") == 1);
- CPPUNIT_ASSERT_EQUAL((Map::size_type) 0, map.size());
- CPPUNIT_ASSERT(map.empty());
+ EXPECT_EQ(true, preExisted);
+ EXPECT_EQ((Map::size_type) 3, map.size());
+ EXPECT_FALSE(map.empty());
+
+ // Access some elements
+ EXPECT_EQ(A(4, 7, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(1, 2, 3), *map.get(16, "foo"));
+ EXPECT_EQ(A(42,0, 0), *map.get(14, "foo"));
+
+ // Do removes
+ EXPECT_EQ(map.erase(12, "foo"), 0);
+ EXPECT_EQ((Map::size_type) 3, map.size());
+
+ EXPECT_EQ(map.erase(14, "foo"), 1);
+ EXPECT_EQ((Map::size_type) 2, map.size());
+
+ EXPECT_EQ(map.erase(11, "foo"), 1);
+ EXPECT_EQ(map.erase(16, "foo"), 1);
+ EXPECT_EQ((Map::size_type) 0, map.size());
+ EXPECT_TRUE(map.empty());
}
-void
-LockableMapTest::testComparison() {
+TEST(LockableMapTest, comparison) {
Map map1;
Map map2;
bool preExisted;
- // Check empty state is correct
- CPPUNIT_ASSERT_EQUAL(map1, map2);
- CPPUNIT_ASSERT(!(map1 < map2));
- CPPUNIT_ASSERT(!(map1 != map2));
+ // Check empty state is correct
+ EXPECT_EQ(map1, map2);
+ EXPECT_FALSE(map1 < map2);
+ EXPECT_FALSE(map1 != map2);
- // Check that different lengths are oki
+ // Check that different lengths are ok
map1.insert(4, A(1, 2, 3), "foo", preExisted);
- CPPUNIT_ASSERT(!(map1 == map2));
- CPPUNIT_ASSERT(!(map1 < map2));
- CPPUNIT_ASSERT(map2 < map1);
- CPPUNIT_ASSERT(map1 != map2);
+ EXPECT_FALSE(map1 == map2);
+ EXPECT_FALSE(map1 < map2);
+ EXPECT_LT(map2, map1);
+ EXPECT_NE(map1, map2);
- // Check that equal elements are oki
+ // Check that equal elements are ok
map2.insert(4, A(1, 2, 3), "foo", preExisted);
- CPPUNIT_ASSERT_EQUAL(map1, map2);
- CPPUNIT_ASSERT(!(map1 < map2));
- CPPUNIT_ASSERT(!(map1 != map2));
+ EXPECT_EQ(map1, map2);
+ EXPECT_FALSE(map1 < map2);
+ EXPECT_FALSE(map1 != map2);
- // Check that non-equal values are oki
+ // Check that non-equal values are ok
map1.insert(6, A(1, 2, 6), "foo", preExisted);
map2.insert(6, A(1, 2, 3), "foo", preExisted);
- CPPUNIT_ASSERT(!(map1 == map2));
- CPPUNIT_ASSERT(!(map1 < map2));
- CPPUNIT_ASSERT(map2 < map1);
- CPPUNIT_ASSERT(map1 != map2);
+ EXPECT_FALSE(map1 == map2);
+ EXPECT_FALSE(map1 < map2);
+ EXPECT_LT(map2, map1);
+ EXPECT_NE(map1, map2);
- // Check that non-equal keys are oki
+ // Check that non-equal keys are ok
map1.erase(6, "foo");
map1.insert(7, A(1, 2, 3), "foo", preExisted);
- CPPUNIT_ASSERT(!(map1 == map2));
- CPPUNIT_ASSERT(!(map1 < map2));
- CPPUNIT_ASSERT(map2 < map1);
- CPPUNIT_ASSERT(map1 != map2);
+ EXPECT_FALSE(map1 == map2);
+ EXPECT_FALSE(map1 < map2);
+ EXPECT_LT(map2, map1);
+ EXPECT_NE(map1, map2);
}
namespace {
@@ -225,7 +155,9 @@ namespace {
std::string toString() {
std::ostringstream ost;
- for (uint32_t i=0; i<log.size(); ++i) ost << log[i] << "\n";
+ for (uint32_t i=0; i<log.size(); ++i) {
+ ost << log[i] << "\n";
+ }
return ost.str();
}
};
@@ -234,10 +166,9 @@ namespace {
EntryProcessor::EntryProcessor() : count(0), log(), behaviour() {}
EntryProcessor::EntryProcessor(const std::vector<Map::Decision>& decisions)
: count(0), log(), behaviour(decisions) {}
-EntryProcessor::~EntryProcessor() {}
+EntryProcessor::~EntryProcessor() = default;
-void
-LockableMapTest::testIterating() {
+TEST(LockableMapTest, iterating) {
Map map;
bool preExisted;
map.insert(16, A(1, 2, 3), "foo", preExisted);
@@ -247,13 +178,13 @@ LockableMapTest::testIterating() {
{
NonConstProcessor ncproc;
map.each(ncproc, "foo"); // Locking both for each element
- CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(42,1, 0), *map.get(14, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 3, 3), *map.get(16, "foo"));
+ EXPECT_EQ(A(4, 7, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(42,1, 0), *map.get(14, "foo"));
+ EXPECT_EQ(A(1, 3, 3), *map.get(16, "foo"));
map.all(ncproc, "foo"); // And for all
- CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(42,2, 0), *map.get(14, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+ EXPECT_EQ(A(4, 8, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(42,2, 0), *map.get(14, "foo"));
+ EXPECT_EQ(A(1, 4, 3), *map.get(16, "foo"));
}
// Test that we can use const functors directly..
map.each(EntryProcessor(), "foo");
@@ -265,12 +196,12 @@ LockableMapTest::testIterating() {
std::string expected("11 - A(4, 8, 0)\n"
"14 - A(42, 2, 0)\n"
"16 - A(1, 4, 3)\n");
- CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+ EXPECT_EQ(expected, proc.toString());
EntryProcessor proc2;
map.each(proc2, "foo", 12, 15);
expected = "14 - A(42, 2, 0)\n";
- CPPUNIT_ASSERT_EQUAL(expected, proc2.toString());
+ EXPECT_EQ(expected, proc2.toString());
}
// Test that we can abort iterating
{
@@ -281,7 +212,7 @@ LockableMapTest::testIterating() {
map.each(proc, "foo");
std::string expected("11 - A(4, 8, 0)\n"
"14 - A(42, 2, 0)\n");
- CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+ EXPECT_EQ(expected, proc.toString());
}
// Test that we can remove during iteration
{
@@ -293,19 +224,16 @@ LockableMapTest::testIterating() {
std::string expected("11 - A(4, 8, 0)\n"
"14 - A(42, 2, 0)\n"
"16 - A(1, 4, 3)\n");
- CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
- CPPUNIT_ASSERT_EQUAL_MSG(map.toString(),
- (Map::size_type) 2, map.size());
- CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+ EXPECT_EQ(expected, proc.toString());
+ EXPECT_EQ((Map::size_type) 2, map.size()) << map.toString();
+ EXPECT_EQ(A(4, 8, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(1, 4, 3), *map.get(16, "foo"));
Map::WrappedEntry entry = map.get(14, "foo");
- CPPUNIT_ASSERT(!entry.exist());
+ EXPECT_FALSE(entry.exist());
}
}
-void
-LockableMapTest::testChunkedIterationIsTransparentAcrossChunkSizes()
-{
+TEST(LockableMapTest, chunked_iteration_is_transparent_across_chunk_sizes) {
Map map;
bool preExisted;
map.insert(16, A(1, 2, 3), "foo", preExisted);
@@ -314,19 +242,17 @@ LockableMapTest::testChunkedIterationIsTransparentAcrossChunkSizes()
NonConstProcessor ncproc; // Increments 2nd value in all entries.
// chunkedAll with chunk size of 1
map.chunkedAll(ncproc, "foo", 1);
- CPPUNIT_ASSERT_EQUAL(A(4, 7, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(42, 1, 0), *map.get(14, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 3, 3), *map.get(16, "foo"));
+ EXPECT_EQ(A(4, 7, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(42, 1, 0), *map.get(14, "foo"));
+ EXPECT_EQ(A(1, 3, 3), *map.get(16, "foo"));
// chunkedAll with chunk size larger than db size
map.chunkedAll(ncproc, "foo", 100);
- CPPUNIT_ASSERT_EQUAL(A(4, 8, 0), *map.get(11, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(42, 2, 0), *map.get(14, "foo"));
- CPPUNIT_ASSERT_EQUAL(A(1, 4, 3), *map.get(16, "foo"));
+ EXPECT_EQ(A(4, 8, 0), *map.get(11, "foo"));
+ EXPECT_EQ(A(42, 2, 0), *map.get(14, "foo"));
+ EXPECT_EQ(A(1, 4, 3), *map.get(16, "foo"));
}
-void
-LockableMapTest::testCanAbortDuringChunkedIteration()
-{
+TEST(LockableMapTest, can_abort_during_chunked_iteration) {
Map map;
bool preExisted;
map.insert(16, A(1, 2, 3), "foo", preExisted);
@@ -340,243 +266,10 @@ LockableMapTest::testCanAbortDuringChunkedIteration()
map.chunkedAll(proc, "foo", 100);
std::string expected("11 - A(4, 6, 0)\n"
"14 - A(42, 0, 0)\n");
- CPPUNIT_ASSERT_EQUAL(expected, proc.toString());
+ EXPECT_EQ(expected, proc.toString());
}
-namespace {
- struct LoadGiver : public document::Runnable {
- typedef std::shared_ptr<LoadGiver> SP;
- Map& _map;
- uint32_t _counter;
-
- LoadGiver(Map& map) : _map(map), _counter(0) {}
- ~LoadGiver() __attribute__((noinline));
- };
-
- LoadGiver::~LoadGiver() { }
-
- struct InsertEraseLoadGiver : public LoadGiver {
- InsertEraseLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- // Screws up order of buckets by xor'ing with 12345.
- // Only operate on last 32k super buckets.
- while (running()) {
- uint32_t bucket = ((_counter ^ 12345) % 0x8000) + 0x8000;
- if (bucket % 7 < 3) {
- bool preExisted;
- _map.insert(bucket, A(bucket, 0, _counter), "foo",
- preExisted);
- }
- if (bucket % 5 < 2) {
- _map.erase(bucket, "foo");
- }
- ++_counter;
- }
- }
- };
-
- struct GetLoadGiver : public LoadGiver {
- GetLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- // It's legal to keep entries as long as you only request higher
- // buckets. So, to test this, keep entries until you request one
- // that is smaller than those stored.
- std::vector<std::pair<uint32_t, Map::WrappedEntry> > stored;
- while (running()) {
- uint32_t bucket = (_counter ^ 52721) % 0x10000;
- if (!stored.empty() && stored.back().first > bucket) {
- stored.clear();
- }
- stored.push_back(std::pair<uint32_t, Map::WrappedEntry>(
- bucket, _map.get(bucket, "foo", _counter % 3 == 0)));
- ++_counter;
- }
- }
- };
-
- struct AllLoadGiver : public LoadGiver {
- AllLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- while (running()) {
- _map.all(*this, "foo");
- ++_counter;
- }
- }
-
- Map::Decision operator()(int key, A& a) {
- //std::cerr << (void*) this << " - " << key << "\n";
- (void) key;
- ++a._val2;
- return Map::CONTINUE;
- }
- };
-
- struct EachLoadGiver : public LoadGiver {
- EachLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- while (running()) {
- _map.each(*this, "foo");
- ++_counter;
- }
- }
-
- Map::Decision operator()(int key, A& a) {
- //std::cerr << (void*) this << " - " << key << "\n";
- (void) key;
- ++a._val2;
- return Map::CONTINUE;
- }
- };
-
- struct RandomRangeLoadGiver : public LoadGiver {
- RandomRangeLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- while (running()) {
- uint32_t min = (_counter ^ 23426) % 0x10000;
- uint32_t max = (_counter ^ 40612) % 0x10000;
- if (min > max) {
- uint32_t tmp = min;
- min = max;
- max = tmp;
- }
- if (_counter % 7 < 5) {
- _map.each(*this, "foo", min, max);
- } else {
- _map.all(*this, "foo", min, max);
- }
- ++_counter;
- }
- }
-
- Map::Decision operator()(int key, A& a) {
- //std::cerr << ".";
- (void) key;
- ++a._val2;
- return Map::CONTINUE;
- }
- };
-
- struct GetNextLoadGiver : public LoadGiver {
- GetNextLoadGiver(Map& map) : LoadGiver(map) {}
-
- void run() override {
- while (running()) {
- uint32_t bucket = (_counter ^ 60417) % 0xffff;
- if (_counter % 7 < 5) {
- _map.each(*this, "foo", bucket + 1, 0xffff);
- } else {
- _map.all(*this, "foo", bucket + 1, 0xffff);
- }
- ++_counter;
- }
- }
-
- Map::Decision operator()(int key, A& a) {
- //std::cerr << ".";
- (void) key;
- ++a._val2;
- return Map::ABORT;
- }
- };
-}
-
-void
-LockableMapTest::testThreadSafetyStress() {
- uint32_t duration = 2 * 1000;
- std::cerr << "\nRunning LockableMap threadsafety test for "
- << (duration / 1000) << " seconds.\n";
- // Set up multiple threads going through the bucket database at the same
- // time. Ensuring all works and there are no deadlocks.
-
- // Initial database of 32k elements which should always be present.
- // Next 32k elements may exist (loadgivers may erase and create them, "foo")
- Map map;
- for (uint32_t i=0; i<65536; ++i) {
- bool preExisted;
- map.insert(i, A(i, 0, i ^ 12345), "foo", preExisted);
- }
- std::vector<LoadGiver::SP> loadgivers;
- for (uint32_t i=0; i<8; ++i) {
- loadgivers.push_back(LoadGiver::SP(new InsertEraseLoadGiver(map)));
- }
- for (uint32_t i=0; i<2; ++i) {
- loadgivers.push_back(LoadGiver::SP(new GetLoadGiver(map)));
- }
- for (uint32_t i=0; i<2; ++i) {
- loadgivers.push_back(LoadGiver::SP(new AllLoadGiver(map)));
- }
- for (uint32_t i=0; i<2; ++i) {
- loadgivers.push_back(LoadGiver::SP(new EachLoadGiver(map)));
- }
- for (uint32_t i=0; i<2; ++i) {
- loadgivers.push_back(LoadGiver::SP(new RandomRangeLoadGiver(map)));
- }
- for (uint32_t i=0; i<2; ++i) {
- loadgivers.push_back(LoadGiver::SP(new GetNextLoadGiver(map)));
- }
-
- FastOS_ThreadPool pool(128 * 1024);
- for (uint32_t i=0; i<loadgivers.size(); ++i) {
- CPPUNIT_ASSERT(loadgivers[i]->start(pool));
- }
- FastOS_Thread::Sleep(duration);
- std::cerr << "Closing down test\n";
- for (uint32_t i=0; i<loadgivers.size(); ++i) {
- CPPUNIT_ASSERT(loadgivers[i]->stop());
- }
-// FastOS_Thread::Sleep(duration);
-// std::cerr << "Didn't manage to shut down\n";
-// map._lockedKeys.print(std::cerr, true, "");
-
- for (uint32_t i=0; i<loadgivers.size(); ++i) {
- CPPUNIT_ASSERT(loadgivers[i]->join());
- }
- std::cerr << "Loadgiver counts:";
- for (uint32_t i=0; i<loadgivers.size(); ++i) {
- std::cerr << " " << loadgivers[i]->_counter;
- }
- std::cerr << "\nTest completed\n";
-}
-
-#if 0
-namespace {
-struct Hex {
- document::BucketId::Type val;
-
- Hex(document::BucketId::Type v) : val(v) {}
- bool operator==(const Hex& h) const { return val == h.val; }
-};
-
-std::ostream& operator<<(std::ostream& out, const Hex& h) {
- out << std::hex << h.val << std::dec;
- return out;
-}
-
-void
-printBucket(const std::string s, const document::BucketId& b) {
- std::cerr << s << "bucket=" << b << ", reversed=" << b.stripUnused().toKey() << ", hex=" << Hex(b.stripUnused().toKey()) << "\n";
-}
-
-void
-printBuckets(const std::map<document::BucketId, Map::WrappedEntry>& results) {
- for (std::map<document::BucketId, Map::WrappedEntry>::const_iterator iter = results.begin();
- iter != results.end();
- iter++) {
- printBucket("Returned ", iter->first);
- }
-}
-
-}
-#endif
-
-void
-LockableMapTest::testFindBucketsSimple() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets_simple) {
Map map;
document::BucketId id1(17, 0x0ffff);
@@ -594,17 +287,13 @@ LockableMapTest::testFindBucketsSimple() {
map.insert(id3.toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(22, 0xfffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3]);
-#endif
+ EXPECT_EQ(1, results.size());
+ EXPECT_EQ(A(3,4,5), *results[id3]);
}
-void
-LockableMapTest::testFindBuckets() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets) {
Map map;
document::BucketId id1(16, 0x0ffff);
@@ -619,20 +308,16 @@ LockableMapTest::testFindBuckets() {
map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
document::BucketId id(22, 0xfffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+ EXPECT_EQ(3, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
- CPPUNIT_ASSERT_EQUAL(A(4,5,6), *results[id4.stripUnused()]);
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]);
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]);
+ EXPECT_EQ(A(4,5,6), *results[id4.stripUnused()]);
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]);
}
-void
-LockableMapTest::testFindBuckets2() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets_2) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff);
@@ -647,20 +332,16 @@ LockableMapTest::testFindBuckets2() { // ticket 3121525
map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
document::BucketId id(22, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+ EXPECT_EQ(3, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
- CPPUNIT_ASSERT_EQUAL(A(4,5,6), *results[id4.stripUnused()]);
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]);
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]);
+ EXPECT_EQ(A(4,5,6), *results[id4.stripUnused()]);
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]);
}
-void
-LockableMapTest::testFindBuckets3() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets_3) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff);
@@ -671,18 +352,14 @@ LockableMapTest::testFindBuckets3() { // ticket 3121525
map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
document::BucketId id(22, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+ EXPECT_EQ(1, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]);
}
-void
-LockableMapTest::testFindBuckets4() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets_4) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff);
@@ -695,18 +372,14 @@ LockableMapTest::testFindBuckets4() { // ticket 3121525
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(18, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+ EXPECT_EQ(1, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]);
}
-void
-LockableMapTest::testFindBuckets5() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_buckets_5) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff);
@@ -719,31 +392,23 @@ LockableMapTest::testFindBuckets5() { // ticket 3121525
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(18, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getContained(id, "foo");
+ auto results = map.getContained(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+ EXPECT_EQ(1, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]);
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]);
}
-void
-LockableMapTest::testFindNoBuckets() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_no_buckets) {
Map map;
document::BucketId id(16, 0x0ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)0, results.size());
-#endif
+ EXPECT_EQ(0, results.size());
}
-void
-LockableMapTest::testFindAll() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all) {
Map map;
document::BucketId id1(16, 0x0aaaa); // contains id2-id7
@@ -766,45 +431,26 @@ LockableMapTest::testFindAll() {
map.insert(id7.stripUnused().toKey(), A(7,8,9), "foo", preExisted);
map.insert(id8.stripUnused().toKey(), A(8,9,10), "foo", preExisted);
map.insert(id9.stripUnused().toKey(), A(9,10,11), "foo", preExisted);
- //printBucket("Inserted ", id1);
- //printBucket("Inserted ", id2);
- //printBucket("Inserted ", id3);
- //printBucket("Inserted ", id4);
- //printBucket("Inserted ", id5);
- //printBucket("Inserted ", id6);
- //printBucket("Inserted ", id7);
- //printBucket("Inserted ", id8);
- //printBucket("Inserted ", id9);
document::BucketId id(17, 0x1aaaa);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
-
- //std::cerr << "Done: getAll() for bucket " << id << "\n";
- //printBuckets(results);
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)4, results.size());
+ EXPECT_EQ(4, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(5,6,7), *results[id5.stripUnused()]); // most specific match (exact match)
- CPPUNIT_ASSERT_EQUAL(A(6,7,8), *results[id6.stripUnused()]); // sub bucket
- CPPUNIT_ASSERT_EQUAL(A(7,8,9), *results[id7.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ EXPECT_EQ(A(5,6,7), *results[id5.stripUnused()]); // most specific match (exact match)
+ EXPECT_EQ(A(6,7,8), *results[id6.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(7,8,9), *results[id7.stripUnused()]); // sub bucket
id = document::BucketId(16, 0xffff);
results = map.getAll(id, "foo");
- //std::cerr << "Done: getAll() for bucket " << id << "\n";
- //printBuckets(results);
+ EXPECT_EQ(1, results.size());
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
-
- CPPUNIT_ASSERT_EQUAL(A(9,10,11), *results[id9.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(9,10,11), *results[id9.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAll2() { // Ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_2) { // Ticket 3121525
Map map;
document::BucketId id1(17, 0x00001);
@@ -815,19 +461,15 @@ LockableMapTest::testFindAll2() { // Ticket 3121525
map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
document::BucketId id(16, 0x00001);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // sub bucket
- CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllUnusedBitIsSet() { // ticket 2938896
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_unused_bit_is_set) { // ticket 2938896
Map map;
document::BucketId id1(24, 0x000dc7089);
@@ -843,19 +485,15 @@ LockableMapTest::testFindAllUnusedBitIsSet() { // ticket 2938896
document::BucketId id(33, 0x1053c7089);
id.setUsedBits(32); // Bit 33 is set, but unused
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllInconsistentlySplit() { // Ticket 2938896
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_inconsistently_split) { // Ticket 2938896
Map map;
document::BucketId id1(16, 0x00001); // contains id2-id3
@@ -868,20 +506,16 @@ LockableMapTest::testFindAllInconsistentlySplit() { // Ticket 2938896
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(16, 0x00001);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)3, results.size());
+ EXPECT_EQ(3, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // most specific match (exact match)
- CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // most specific match (exact match)
+ EXPECT_EQ(A(2,3,4), *results[id2.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllInconsistentlySplit2() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_inconsistently_split_2) { // ticket 3121525
Map map;
document::BucketId id1(17, 0x10000);
@@ -896,19 +530,15 @@ LockableMapTest::testFindAllInconsistentlySplit2() { // ticket 3121525
map.insert(id4.stripUnused().toKey(), A(4,5,6), "foo", preExisted);
document::BucketId id(32, 0x027228034);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(2,3,4), *results[id2.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // most specific match (super bucket)
-#endif
+ EXPECT_EQ(A(2,3,4), *results[id2.stripUnused()]); // super bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // most specific match (super bucket)
}
-void
-LockableMapTest::testFindAllInconsistentlySplit3() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_inconsistently_split_3) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff); // contains id2
@@ -919,18 +549,14 @@ LockableMapTest::testFindAllInconsistentlySplit3() { // ticket 3121525
map.insert(id2.stripUnused().toKey(), A(2,3,4), "foo", preExisted);
document::BucketId id(22, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)1, results.size());
+ EXPECT_EQ(1, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
}
-void
-LockableMapTest::testFindAllInconsistentlySplit4() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_inconsistently_split_4) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff); // contains id2-id3
@@ -943,19 +569,15 @@ LockableMapTest::testFindAllInconsistentlySplit4() { // ticket 3121525
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(18, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllInconsistentlySplit5() { // ticket 3121525
-#if __WORDSIZE == 64
+TEST(LockableMapTest, find_all_inconsistently_split_5) { // ticket 3121525
Map map;
document::BucketId id1(16, 0x0ffff); // contains id2-id3
@@ -968,18 +590,15 @@ LockableMapTest::testFindAllInconsistentlySplit5() { // ticket 3121525
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(18, 0x1ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
-#endif
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllInconsistentlySplit6() {
+TEST(LockableMapTest, find_all_inconsistently_split_6) {
Map map;
document::BucketId id1(16, 0x0ffff); // contains id2-id3
@@ -992,18 +611,15 @@ LockableMapTest::testFindAllInconsistentlySplit6() {
map.insert(id3.stripUnused().toKey(), A(3,4,5), "foo", preExisted);
document::BucketId id(18, 0x3ffff);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)2, results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testFindAllInconsistentBelow16Bits()
-{
+TEST(LockableMapTest, find_all_inconsistent_below_16_bits) {
Map map;
document::BucketId id1(1, 0x1); // contains id2-id3
@@ -1017,50 +633,40 @@ LockableMapTest::testFindAllInconsistentBelow16Bits()
document::BucketId id(3, 0x5);
- std::map<document::BucketId, Map::WrappedEntry> results =
- map.getAll(id, "foo");
+ auto results = map.getAll(id, "foo");
- CPPUNIT_ASSERT_EQUAL(size_t(2), results.size());
+ EXPECT_EQ(2, results.size());
- CPPUNIT_ASSERT_EQUAL(A(1,2,3), *results[id1.stripUnused()]); // super bucket
- CPPUNIT_ASSERT_EQUAL(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
+ EXPECT_EQ(A(1,2,3), *results[id1.stripUnused()]); // super bucket
+ EXPECT_EQ(A(3,4,5), *results[id3.stripUnused()]); // sub bucket
}
-void
-LockableMapTest::testCreate() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create) {
Map map;
{
document::BucketId id1(58, 0x43d6c878000004d2ull);
- std::map<document::BucketId, Map::WrappedEntry> entries(
- map.getContained(id1, "foo"));
+ auto entries = map.getContained(id1, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+ EXPECT_EQ(0, entries.size());
Map::WrappedEntry entry = map.createAppropriateBucket(36, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(36,0x8000004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(36,0x8000004d2ull), entry.getBucketId());
}
{
document::BucketId id1(58, 0x423bf1e0000004d2ull);
- std::map<document::BucketId, Map::WrappedEntry> entries(
- map.getContained(id1, "foo"));
- CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+ auto entries = map.getContained(id1, "foo");
+ EXPECT_EQ(0, entries.size());
Map::WrappedEntry entry = map.createAppropriateBucket(36, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(36,0x0000004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(36,0x0000004d2ull), entry.getBucketId());
}
- CPPUNIT_ASSERT_EQUAL((size_t)2, map.size());
-#endif
+ EXPECT_EQ(2, map.size());
}
-void
-LockableMapTest::testCreate2() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_2) {
Map map;
{
document::BucketId id1(58, 0xeaf77782000004d2);
@@ -1069,24 +675,19 @@ LockableMapTest::testCreate2() {
}
{
document::BucketId id1(58, 0x00000000000004d2);
- std::map<document::BucketId, Map::WrappedEntry> entries(
- map.getContained(id1, "foo"));
+ auto entries = map.getContained(id1, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+ EXPECT_EQ(0, entries.size());
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(34, 0x0000004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(34, 0x0000004d2ull), entry.getBucketId());
}
- CPPUNIT_ASSERT_EQUAL((size_t)2, map.size());
-#endif
+ EXPECT_EQ(2, map.size());
}
-void
-LockableMapTest::testCreate3() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_3) {
Map map;
{
document::BucketId id1(58, 0xeaf77780000004d2);
@@ -1100,21 +701,16 @@ LockableMapTest::testCreate3() {
}
{
document::BucketId id1(58, 0x00000000000004d2);
- std::map<document::BucketId, Map::WrappedEntry> entries(
- map.getContained(id1, "foo"));
+ auto entries = map.getContained(id1, "foo");
- CPPUNIT_ASSERT_EQUAL((size_t)0, entries.size());
+ EXPECT_EQ(0, entries.size());
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(40, 0x0000004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(40, 0x0000004d2ull), entry.getBucketId());
}
-#endif
}
-void
-LockableMapTest::testCreate4() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_4) {
Map map;
{
document::BucketId id1(16, 0x00000000000004d1);
@@ -1130,15 +726,11 @@ LockableMapTest::testCreate4() {
document::BucketId id1(58, 0x00000000010004d2);
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(25, 0x0010004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(25, 0x0010004d2ull), entry.getBucketId());
}
-#endif
}
-void
-LockableMapTest::testCreate6() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_5) {
Map map;
{
document::BucketId id1(0x8c000000000004d2);
@@ -1165,16 +757,11 @@ LockableMapTest::testCreate6() {
{
document::BucketId id1(0xe9944a44000004d2);
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(0x90000004000004d2),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(0x90000004000004d2), entry.getBucketId());
}
-#endif
}
-
-void
-LockableMapTest::testCreate5() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_6) {
Map map;
{
document::BucketId id1(58, 0xeaf77780000004d2);
@@ -1190,28 +777,20 @@ LockableMapTest::testCreate5() {
{
document::BucketId id1(58, 0x00000000010004d2);
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(25, 0x0010004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(25, 0x0010004d2ull), entry.getBucketId());
}
-#endif
}
-void
-LockableMapTest::testCreateEmpty() {
-#if __WORDSIZE == 64
+TEST(LockableMapTest, create_empty) {
Map map;
{
document::BucketId id1(58, 0x00000000010004d2);
Map::WrappedEntry entry = map.createAppropriateBucket(16, "", id1);
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 0x0000004d2ull),
- entry.getBucketId());
+ EXPECT_EQ(document::BucketId(16, 0x0000004d2ull), entry.getBucketId());
}
-#endif
}
-void
-LockableMapTest::testIsConsistent()
-{
+TEST(LockableMapTest, is_consistent) {
Map map;
document::BucketId id1(16, 0x00001); // contains id2-id3
document::BucketId id2(17, 0x00001);
@@ -1221,13 +800,13 @@ LockableMapTest::testIsConsistent()
{
Map::WrappedEntry entry(
map.get(id1.stripUnused().toKey(), "foo", true));
- CPPUNIT_ASSERT(map.isConsistent(entry));
+ EXPECT_TRUE(map.isConsistent(entry));
}
map.insert(id2.stripUnused().toKey(), A(1,2,3), "foo", preExisted);
{
Map::WrappedEntry entry(
map.get(id1.stripUnused().toKey(), "foo", true));
- CPPUNIT_ASSERT(!map.isConsistent(entry));
+ EXPECT_FALSE(map.isConsistent(entry));
}
}
diff --git a/storage/src/tests/common/CMakeLists.txt b/storage/src/tests/common/CMakeLists.txt
index ce5376209dc..075dc263be9 100644
--- a/storage/src/tests/common/CMakeLists.txt
+++ b/storage/src/tests/common/CMakeLists.txt
@@ -2,9 +2,6 @@
vespa_add_library(storage_testcommon TEST
SOURCES
dummystoragelink.cpp
- global_bucket_space_distribution_converter_test.cpp
- metricstest.cpp
- storagelinktest.cpp
testhelper.cpp
testnodestateupdater.cpp
teststorageapp.cpp
@@ -14,8 +11,12 @@ vespa_add_library(storage_testcommon TEST
vespa_add_executable(storage_common_gtest_runner_app TEST
SOURCES
+ global_bucket_space_distribution_converter_test.cpp
gtest_runner.cpp
+ metricstest.cpp
+ storagelinktest.cpp
DEPENDS
+ storage_testcommon
storage
gtest
)
diff --git a/storage/src/tests/common/global_bucket_space_distribution_converter_test.cpp b/storage/src/tests/common/global_bucket_space_distribution_converter_test.cpp
index d75f2ac6459..2103970a8f0 100644
--- a/storage/src/tests/common/global_bucket_space_distribution_converter_test.cpp
+++ b/storage/src/tests/common/global_bucket_space_distribution_converter_test.cpp
@@ -1,37 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/config/config.h>
#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vespalib/gtest/gtest.h>
-namespace storage {
-
-struct GlobalBucketSpaceDistributionConverterTest : public CppUnit::TestFixture {
- CPPUNIT_TEST_SUITE(GlobalBucketSpaceDistributionConverterTest);
- CPPUNIT_TEST(can_transform_flat_cluster_config);
- CPPUNIT_TEST(can_transform_single_level_multi_group_config);
- CPPUNIT_TEST(can_transform_multi_level_multi_group_config);
- CPPUNIT_TEST(can_transform_heterogenous_multi_group_config);
- CPPUNIT_TEST(can_transform_concrete_distribution_instance);
- CPPUNIT_TEST(config_retired_state_is_propagated);
- CPPUNIT_TEST(group_capacities_are_propagated);
- CPPUNIT_TEST(global_distribution_has_same_owner_distributors_as_default);
- CPPUNIT_TEST(can_generate_config_with_legacy_partition_spec);
- CPPUNIT_TEST_SUITE_END();
+using namespace ::testing;
- void can_transform_flat_cluster_config();
- void can_transform_single_level_multi_group_config();
- void can_transform_multi_level_multi_group_config();
- void can_transform_heterogenous_multi_group_config();
- void can_transform_concrete_distribution_instance();
- void config_retired_state_is_propagated();
- void group_capacities_are_propagated();
- void global_distribution_has_same_owner_distributors_as_default();
- void can_generate_config_with_legacy_partition_spec();
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(GlobalBucketSpaceDistributionConverterTest);
+namespace storage {
using DistributionConfig = vespa::config::content::StorDistributionConfig;
@@ -77,12 +53,12 @@ disk_distribution MODULO_BID
}
-void GlobalBucketSpaceDistributionConverterTest::can_transform_flat_cluster_config() {
- CPPUNIT_ASSERT_EQUAL(expected_flat_global_config, default_to_global_config(default_flat_config));
+TEST(GlobalBucketSpaceDistributionConverterTest, can_transform_flat_cluster_config) {
+ EXPECT_EQ(expected_flat_global_config, default_to_global_config(default_flat_config));
}
-void GlobalBucketSpaceDistributionConverterTest::can_transform_single_level_multi_group_config() {
+TEST(GlobalBucketSpaceDistributionConverterTest, can_transform_single_level_multi_group_config) {
vespalib::string default_config(
R"(redundancy 2
group[3]
@@ -143,10 +119,10 @@ group[2].nodes[2].index 5
group[2].nodes[2].retired false
disk_distribution MODULO_BID
)");
- CPPUNIT_ASSERT_EQUAL(expected_global_config, default_to_global_config(default_config));
+ EXPECT_EQ(expected_global_config, default_to_global_config(default_config));
}
-void GlobalBucketSpaceDistributionConverterTest::can_transform_multi_level_multi_group_config() {
+TEST(GlobalBucketSpaceDistributionConverterTest, can_transform_multi_level_multi_group_config) {
vespalib::string default_config(
R"(redundancy 2
group[5]
@@ -226,13 +202,13 @@ group[6].nodes[0].index 3
group[6].nodes[0].retired false
disk_distribution MODULO_BID
)");
- CPPUNIT_ASSERT_EQUAL(expected_global_config, default_to_global_config(default_config));
+ EXPECT_EQ(expected_global_config, default_to_global_config(default_config));
}
// FIXME partition specs are order-invariant with regards to groups, so heterogenous
// setups will not produce the expected replica distribution.
// TODO Consider disallowing entirely when using global docs.
-void GlobalBucketSpaceDistributionConverterTest::can_transform_heterogenous_multi_group_config() {
+TEST(GlobalBucketSpaceDistributionConverterTest, can_transform_heterogenous_multi_group_config) {
vespalib::string default_config(
R"(redundancy 2
ready_copies 2
@@ -279,17 +255,17 @@ group[2].nodes[0].index 2
group[2].nodes[0].retired false
disk_distribution MODULO_BID
)");
- CPPUNIT_ASSERT_EQUAL(expected_global_config, default_to_global_config(default_config));
+ EXPECT_EQ(expected_global_config, default_to_global_config(default_config));
}
-void GlobalBucketSpaceDistributionConverterTest::can_transform_concrete_distribution_instance() {
+TEST(GlobalBucketSpaceDistributionConverterTest, can_transform_concrete_distribution_instance) {
auto default_cfg = GlobalBucketSpaceDistributionConverter::string_to_config(default_flat_config);
lib::Distribution flat_distr(*default_cfg);
auto global_distr = GlobalBucketSpaceDistributionConverter::convert_to_global(flat_distr);
- CPPUNIT_ASSERT_EQUAL(expected_flat_global_config, global_distr->serialize());
+ EXPECT_EQ(expected_flat_global_config, global_distr->serialize());
}
-void GlobalBucketSpaceDistributionConverterTest::config_retired_state_is_propagated() {
+TEST(GlobalBucketSpaceDistributionConverterTest, config_retired_state_is_propagated) {
vespalib::string default_config(
R"(redundancy 1
group[1]
@@ -308,14 +284,14 @@ group[0].nodes[2].retired true
auto default_cfg = GlobalBucketSpaceDistributionConverter::string_to_config(default_config);
auto as_global = GlobalBucketSpaceDistributionConverter::convert_to_global(*default_cfg);
- CPPUNIT_ASSERT_EQUAL(size_t(1), as_global->group.size());
- CPPUNIT_ASSERT_EQUAL(size_t(3), as_global->group[0].nodes.size());
- CPPUNIT_ASSERT_EQUAL(false, as_global->group[0].nodes[0].retired);
- CPPUNIT_ASSERT_EQUAL(true, as_global->group[0].nodes[1].retired);
- CPPUNIT_ASSERT_EQUAL(true, as_global->group[0].nodes[2].retired);
+ ASSERT_EQ(1, as_global->group.size());
+ ASSERT_EQ(3, as_global->group[0].nodes.size());
+ EXPECT_FALSE(as_global->group[0].nodes[0].retired);
+ EXPECT_TRUE(as_global->group[0].nodes[1].retired);
+ EXPECT_TRUE(as_global->group[0].nodes[2].retired);
}
-void GlobalBucketSpaceDistributionConverterTest::group_capacities_are_propagated() {
+TEST(GlobalBucketSpaceDistributionConverterTest, group_capacities_are_propagated) {
vespalib::string default_config(
R"(redundancy 2
group[3]
@@ -338,13 +314,13 @@ group[2].nodes[0].index 1
auto default_cfg = GlobalBucketSpaceDistributionConverter::string_to_config(default_config);
auto as_global = GlobalBucketSpaceDistributionConverter::convert_to_global(*default_cfg);
- CPPUNIT_ASSERT_EQUAL(size_t(3), as_global->group.size());
- CPPUNIT_ASSERT_DOUBLES_EQUAL(5.0, as_global->group[0].capacity, 0.00001);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(2.0, as_global->group[1].capacity, 0.00001);
- CPPUNIT_ASSERT_DOUBLES_EQUAL(3.0, as_global->group[2].capacity, 0.00001);
+ ASSERT_EQ(3, as_global->group.size());
+ EXPECT_DOUBLE_EQ(5.0, as_global->group[0].capacity);
+ EXPECT_DOUBLE_EQ(2.0, as_global->group[1].capacity);
+ EXPECT_DOUBLE_EQ(3.0, as_global->group[2].capacity);
}
-void GlobalBucketSpaceDistributionConverterTest::global_distribution_has_same_owner_distributors_as_default() {
+TEST(GlobalBucketSpaceDistributionConverterTest, global_distribution_has_same_owner_distributors_as_default) {
vespalib::string default_config(
R"(redundancy 2
ready_copies 2
@@ -375,13 +351,13 @@ group[2].nodes[1].index 2
document::BucketId bucket(16, i);
const auto default_index = default_distr.getIdealDistributorNode(state, bucket, "ui");
const auto global_index = global_distr.getIdealDistributorNode(state, bucket, "ui");
- CPPUNIT_ASSERT_EQUAL(default_index, global_index);
+ ASSERT_EQ(default_index, global_index);
}
}
// By "legacy" read "broken", but we need to be able to generate it to support rolling upgrades properly.
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-void GlobalBucketSpaceDistributionConverterTest::can_generate_config_with_legacy_partition_spec() {
+TEST(GlobalBucketSpaceDistributionConverterTest, can_generate_config_with_legacy_partition_spec) {
vespalib::string default_config(
R"(redundancy 2
group[3]
@@ -436,7 +412,7 @@ group[2].nodes[2].index 5
group[2].nodes[2].retired false
disk_distribution MODULO_BID
)");
- CPPUNIT_ASSERT_EQUAL(expected_global_config, default_to_global_config(default_config, true));
+ EXPECT_EQ(expected_global_config, default_to_global_config(default_config, true));
}
} \ No newline at end of file
diff --git a/storage/src/tests/common/metricstest.cpp b/storage/src/tests/common/metricstest.cpp
index b0c82aa3166..9a9f05d500e 100644
--- a/storage/src/tests/common/metricstest.cpp
+++ b/storage/src/tests/common/metricstest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <cppunit/extensions/HelperMacros.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
@@ -14,15 +13,17 @@
#include <vespa/metrics/metricmanager.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <thread>
#include <vespa/log/log.h>
LOG_SETUP(".test.metrics");
+using namespace ::testing;
+
namespace storage {
-struct MetricsTest : public CppUnit::TestFixture {
- FastOS_ThreadPool _threadPool;
+struct MetricsTest : public Test {
framework::defaultimplementation::FakeClock* _clock;
std::unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<DummyStorageLink> _top;
@@ -40,48 +41,33 @@ struct MetricsTest : public CppUnit::TestFixture {
uint64_t expected);
MetricsTest();
+ ~MetricsTest() override;
- void setUp() override;
- void tearDown() override;
- void runLoad(uint32_t count = 1);
+ void SetUp() override;
+ void TearDown() override;
void createFakeLoad();
-
- void testFileStorMetrics();
- void testSnapshotPresenting();
- void testHtmlMetricsReport();
- void testCurrentGaugeValuesOverrideSnapshotValues();
- void testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot();
-
- CPPUNIT_TEST_SUITE(MetricsTest);
- CPPUNIT_TEST(testFileStorMetrics);
- CPPUNIT_TEST(testSnapshotPresenting);
- CPPUNIT_TEST(testHtmlMetricsReport);
- CPPUNIT_TEST(testCurrentGaugeValuesOverrideSnapshotValues);
- CPPUNIT_TEST(testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MetricsTest);
-
namespace {
struct MetricClock : public metrics::MetricManager::Timer
{
framework::Clock& _clock;
- MetricClock(framework::Clock& c) : _clock(c) {}
+ explicit MetricClock(framework::Clock& c) : _clock(c) {}
time_t getTime() const override { return _clock.getTimeInSeconds().getTime(); }
time_t getTimeInMilliSecs() const override { return _clock.getTimeInMillis().getTime(); }
};
}
MetricsTest::MetricsTest()
- : _threadPool(256*1024),
- _clock(0),
+ : _clock(nullptr),
_top(),
_metricsConsumer()
{
}
-void MetricsTest::setUp() {
+MetricsTest::~MetricsTest() = default;
+
+void MetricsTest::SetUp() {
_config = std::make_unique<vdstestlib::DirConfig>(getStandardConfig(true, "metricstest"));
assert(system(("rm -rf " + getRootFolder(*_config)).c_str()) == 0);
try {
@@ -124,7 +110,7 @@ void MetricsTest::setUp() {
_metricManager->init(_config->getConfigId(), _node->getThreadPool());
}
-void MetricsTest::tearDown() {
+void MetricsTest::TearDown() {
_metricManager->stop();
_metricsConsumer.reset();
_topSet.reset();
@@ -220,7 +206,7 @@ void MetricsTest::createFakeLoad()
}
}
-void MetricsTest::testFileStorMetrics() {
+TEST_F(MetricsTest, filestor_metrics) {
createFakeLoad();
std::ostringstream ost;
framework::HttpUrlPath path("metrics?interval=-1&format=text");
@@ -244,30 +230,28 @@ void MetricsTest::testFileStorMetrics() {
std::ostringstream ost;\
framework::HttpUrlPath path(pathost.str()); \
bool retVal = _metricsConsumer->reportStatus(ost, path); \
- CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal); \
+ ASSERT_TRUE(retVal) << "_metricsConsumer->reportStatus failed"; \
std::string s = ost.str(); \
if (count == -1) { \
- CPPUNIT_ASSERT_MESSAGE(std::string("Metric ") + metric + " was set", \
- s.find(metric) == std::string::npos); \
+ ASSERT_TRUE(s.find(metric) == std::string::npos) << std::string("Metric ") + metric + " was set"; \
} else { \
std::ostringstream valueost; \
valueost << metric << " count=" << count; \
- CPPUNIT_ASSERT_MESSAGE("Did not find value " + valueost.str() \
- + " in metric dump " + s, \
- s.find(valueost.str()) != std::string::npos); \
+ ASSERT_TRUE(s.find(valueost.str()) != std::string::npos) \
+ << "Did not find value " + valueost.str() + " in metric dump " + s; \
} \
}
-void MetricsTest::testSnapshotPresenting() {
+TEST_F(MetricsTest, snapshot_presenting) {
FileStorDiskMetrics& disk0(*_filestorMetrics->disks[0]);
FileStorThreadMetrics& thread0(*disk0.threads[0]);
- LOG(info, "Adding to get metric");
+ LOG(debug, "Adding to get metric");
using documentapi::LoadType;
thread0.get[LoadType::DEFAULT].count.inc(1);
- LOG(info, "Waiting for 5 minute snapshot to be taken");
+ LOG(debug, "Waiting for 5 minute snapshot to be taken");
// Wait until active metrics have been added to 5 min snapshot and reset
for (uint32_t i=0; i<6; ++i) {
_clock->addSecondsToTime(60);
@@ -279,7 +263,7 @@ void MetricsTest::testSnapshotPresenting() {
FastOS_Thread::Sleep(1);
}
}
- LOG(info, "5 minute snapshot should have been taken. Adding put count");
+ LOG(debug, "5 minute snapshot should have been taken. Adding put count");
thread0.put[LoadType::DEFAULT].count.inc(1);
@@ -300,7 +284,7 @@ void MetricsTest::testSnapshotPresenting() {
ASSERT_METRIC(-1, "vds.filestor.alldisks.allthreads.get.sum.count", 1);
}
-void MetricsTest::testHtmlMetricsReport() {
+TEST_F(MetricsTest, html_metrics_report) {
createFakeLoad();
_clock->addSecondsToTime(6 * 60);
_metricManager->timeChangedNotification();
@@ -309,16 +293,7 @@ void MetricsTest::testHtmlMetricsReport() {
std::ostringstream ost;
framework::HttpUrlPath path("metrics?interval=300&format=html");
bool retVal = _metricsConsumer->reportStatus(ost, path);
- CPPUNIT_ASSERT_MESSAGE("_metricsConsumer->reportStatus failed", retVal);
- std::string s = ost.str();
- // Not actually testing against content. Better to manually verify that
- // HTML look sane after changes.
- //std::cerr << s << "\n";
- {
- std::ofstream out("metricsreport.html");
- out << s;
- out.close();
- }
+ ASSERT_TRUE(retVal) << "_metricsConsumer->reportStatus failed";
}
void
@@ -332,13 +307,12 @@ MetricsTest::assertMetricLastValue(const std::string& name,
<< "&verbosity=2";
std::ostringstream report;
framework::HttpUrlPath uri(path.str());
- CPPUNIT_ASSERT(_metricsConsumer->reportStatus(report, uri));
+ ASSERT_TRUE(_metricsConsumer->reportStatus(report, uri));
std::ostringstream expectedSubstr;
expectedSubstr << " last=" << expected;
auto str = report.str();
- CPPUNIT_ASSERT_MESSAGE("Did not find value " + expectedSubstr.str()
- + " in metric dump " + str,
- str.find(expectedSubstr.str()) != std::string::npos);
+ ASSERT_TRUE(str.find(expectedSubstr.str()) != std::string::npos)
+ << "Did not find value " + expectedSubstr.str() + " in metric dump " + str;
}
using namespace std::chrono_literals;
@@ -355,9 +329,7 @@ MetricsTest::createSnapshotForPeriod(std::chrono::seconds secs)
}
}
-void
-MetricsTest::testCurrentGaugeValuesOverrideSnapshotValues()
-{
+TEST_F(MetricsTest, current_gauge_values_override_snapshot_values) {
auto& metrics(*_bucketManagerMetrics->disks[0]);
metrics.docs.set(1000);
// Take a 5 minute snapshot of active metrics (1000 docs).
@@ -371,9 +343,7 @@ MetricsTest::testCurrentGaugeValuesOverrideSnapshotValues()
assertMetricLastValue("vds.datastored.alldisks.docs", -1, 2000);
}
-void
-MetricsTest::testVerboseReportIncludesNonSetMetricsEvenAfterSnapshot()
-{
+TEST_F(MetricsTest, verbose_report_includes_non_set_metrics_even_after_snapshot) {
createSnapshotForPeriod(5min);
// When using verbosity=2 (which is what the system test framework invokes),
// all metrics should be included regardless of whether they've been set or
diff --git a/storage/src/tests/common/storagelinktest.cpp b/storage/src/tests/common/storagelinktest.cpp
index 95fe8ad23da..890376debbc 100644
--- a/storage/src/tests/common/storagelinktest.cpp
+++ b/storage/src/tests/common/storagelinktest.cpp
@@ -1,25 +1,35 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/storagelinktest.h>
#include <vespa/document/test/make_document_bucket.h>
+#include <tests/common/dummystoragelink.h>
+#include <vespa/storageapi/message/stat.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <iostream>
#include <string>
-#include <vespa/storageapi/message/stat.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-CPPUNIT_TEST_SUITE_REGISTRATION(StorageLinkTest);
+struct StorageLinkTest : public Test {
+ std::unique_ptr<DummyStorageLink> _feeder;
+ DummyStorageLink* _middle;
+ DummyStorageLink* _replier;
+
+ StorageLinkTest();
+
+ void SetUp() override;
+};
StorageLinkTest::StorageLinkTest()
- : _threadPool(1024),
- _feeder(),
- _middle(0),
- _replier(0) {}
+ : _feeder(),
+ _middle(nullptr),
+ _replier(nullptr)
+{}
-void StorageLinkTest::setUp() {
- _feeder.reset(new DummyStorageLink());
+void StorageLinkTest::SetUp() {
+ _feeder = std::make_unique<DummyStorageLink>();
_middle = new DummyStorageLink();
_replier = new DummyStorageLink();
_feeder->push_back(StorageLink::UP(_middle));
@@ -27,7 +37,7 @@ void StorageLinkTest::setUp() {
_replier->setAutoreply(true);
}
-void StorageLinkTest::testPrinting() {
+TEST_F(StorageLinkTest, printing) {
std::ostringstream actual;
actual << *_feeder;
std::string expected =
@@ -36,21 +46,19 @@ void StorageLinkTest::testPrinting() {
" DummyStorageLink(autoreply = off, dispatch = off, 0 commands, 0 replies)\n"
" DummyStorageLink(autoreply = on, dispatch = off, 0 commands, 0 replies)";
- CPPUNIT_ASSERT_EQUAL(expected, actual.str());
+ EXPECT_EQ(expected, actual.str());
}
-void StorageLinkTest::testNotImplemented() {
+TEST_F(StorageLinkTest, not_implemented) {
_feeder->open();
// Test that a message that nobody handles fails with NOT_IMPLEMENTED
_replier->setIgnore(true);
- _feeder->sendDown(api::StorageCommand::SP(
- new api::StatBucketCommand(makeDocumentBucket(document::BucketId(0)), "")));
+ _feeder->sendDown(std::make_shared<api::StatBucketCommand>(makeDocumentBucket(document::BucketId(0)), ""));
_feeder->close();
_feeder->flush();
- CPPUNIT_ASSERT_EQUAL((size_t) 1, _feeder->getNumReplies());
- CPPUNIT_ASSERT_EQUAL(
- dynamic_cast<api::StatBucketReply&>(
- *_feeder->getReply(0)).getResult(),
+ ASSERT_EQ(1, _feeder->getNumReplies());
+ EXPECT_EQ(
+ dynamic_cast<api::StatBucketReply&>(*_feeder->getReply(0)).getResult(),
api::ReturnCode(api::ReturnCode::NOT_IMPLEMENTED, "Statbucket"));
_feeder->reset();
_replier->setIgnore(false);
diff --git a/storage/src/tests/common/storagelinktest.h b/storage/src/tests/common/storagelinktest.h
deleted file mode 100644
index 6f62edefe33..00000000000
--- a/storage/src/tests/common/storagelinktest.h
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <cppunit/extensions/HelperMacros.h>
-#include <tests/common/dummystoragelink.h>
-#include <vespa/fastos/thread.h>
-
-namespace storage {
-
-struct StorageLinkTest : public CppUnit::TestFixture {
- FastOS_ThreadPool _threadPool;
- std::unique_ptr<DummyStorageLink> _feeder;
- DummyStorageLink* _middle;
- DummyStorageLink* _replier;
-
- StorageLinkTest();
-
- void setUp() override;
-
- void testPrinting();
- void testNotImplemented();
-
- static bool callOnUp(StorageLink& link, const api::StorageMessage::SP& msg) {
- return link.onUp(msg);
- }
- static bool callOnDown(StorageLink& link, const api::StorageMessage::SP& msg) {
- return link.onDown(msg);
- }
- static void callOnFlush(StorageLink& link, bool downwards) {
- link.onFlush(downwards);
- }
-
- CPPUNIT_TEST_SUITE(StorageLinkTest);
- CPPUNIT_TEST(testPrinting);
- CPPUNIT_TEST(testNotImplemented);
- CPPUNIT_TEST_SUITE_END();
-};
-
-}
diff --git a/storage/src/tests/distributor/messagesenderstub.h b/storage/src/tests/distributor/messagesenderstub.h
index b86863890a1..1b526813ef7 100644
--- a/storage/src/tests/distributor/messagesenderstub.h
+++ b/storage/src/tests/distributor/messagesenderstub.h
@@ -4,6 +4,7 @@
#include <vespa/storage/distributor/distributormessagesender.h>
#include <cassert>
#include <vector>
+#include <string>
namespace storage {
diff --git a/storage/src/tests/frameworkimpl/status/CMakeLists.txt b/storage/src/tests/frameworkimpl/status/CMakeLists.txt
index fb550807dff..655dff1bd0c 100644
--- a/storage/src/tests/frameworkimpl/status/CMakeLists.txt
+++ b/storage/src/tests/frameworkimpl/status/CMakeLists.txt
@@ -1,17 +1,9 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# TODO: Remove test library when test has been migrated to gtest.
-vespa_add_library(storage_teststatus TEST
- SOURCES
- statustest.cpp
- DEPENDS
- storage
- storage_testcommon
-)
-
vespa_add_executable(storage_status_gtest_runner_app TEST
SOURCES
gtest_runner.cpp
+ statustest.cpp
DEPENDS
storage
storage_testcommon
diff --git a/storage/src/tests/frameworkimpl/status/statustest.cpp b/storage/src/tests/frameworkimpl/status/statustest.cpp
index a9ffe188c69..e7d0d496cc8 100644
--- a/storage/src/tests/frameworkimpl/status/statustest.cpp
+++ b/storage/src/tests/frameworkimpl/status/statustest.cpp
@@ -6,21 +6,24 @@
#include <vespa/storageframework/generic/status/htmlstatusreporter.h>
#include <vespa/storageframework/generic/status/xmlstatusreporter.h>
#include <tests/common/teststorageapp.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/document/util/stringutil.h>
#include <vespa/vespalib/net/crypto_engine.h>
#include <vespa/vespalib/net/socket_spec.h>
#include <vespa/vespalib/net/sync_crypto_socket.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <gmock/gmock.h>
+
+using namespace ::testing;
vespalib::string fetch(int port, const vespalib::string &path) {
auto crypto = vespalib::CryptoEngine::get_default();
auto socket = vespalib::SocketSpec::from_port(port).client_address().connect();
- CPPUNIT_ASSERT(socket.valid());
+ assert(socket.valid());
auto conn = vespalib::SyncCryptoSocket::create(*crypto, std::move(socket), false);
vespalib::string http_req = vespalib::make_string("GET %s HTTP/1.1\r\n"
"Host: localhost:%d\r\n"
"\r\n", path.c_str(), port);
- CPPUNIT_ASSERT_EQUAL(conn->write(http_req.data(), http_req.size()), ssize_t(http_req.size()));
+ assert(conn->write(http_req.data(), http_req.size()) == ssize_t(http_req.size()));
char buf[1024];
vespalib::string result;
ssize_t res = conn->read(buf, sizeof(buf));
@@ -28,33 +31,18 @@ vespalib::string fetch(int port, const vespalib::string &path) {
result.append(vespalib::stringref(buf, res));
res = conn->read(buf, sizeof(buf));
}
- CPPUNIT_ASSERT_EQUAL(res, ssize_t(0));
+ assert(res == 0);
return result;
}
namespace storage {
-struct StatusTest : public CppUnit::TestFixture {
+struct StatusTest : Test {
std::unique_ptr<TestServiceLayerApp> _node;
- void setUp() override;
-
- void testIndexStatusPage();
- void testHtmlStatus();
- void testXmlStatus();
- void test404();
- void requireThatServerSpecIsConstructedCorrectly();
-
- CPPUNIT_TEST_SUITE(StatusTest);
- CPPUNIT_TEST(testIndexStatusPage);
- CPPUNIT_TEST(testHtmlStatus);
- CPPUNIT_TEST(testXmlStatus);
- CPPUNIT_TEST(test404);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(StatusTest);
-
namespace {
struct HtmlStatusReporter : public framework::HtmlStatusReporter {
std::string _headerAddition;
@@ -102,20 +90,16 @@ namespace {
{
registerStatusPage(*_reporter);
}
- ~StatusComponent() { delete _reporter; }
+ ~StatusComponent() override { delete _reporter; }
};
}
-void
-StatusTest::setUp()
-{
- _node.reset(new TestServiceLayerApp);
+void StatusTest::SetUp() {
+ _node = std::make_unique<TestServiceLayerApp>();
}
-void
-StatusTest::testIndexStatusPage()
-{
+TEST_F(StatusTest, index_status_page) {
StatusComponent rep1(_node->getComponentRegister(), "foo",
new HtmlStatusReporter(
"fooid", "Foo impl", "<p>info</p>"));
@@ -144,12 +128,10 @@ StatusTest::testIndexStatusPage()
"<\\/body>\n"
"<\\/html>\n"
);
- CPPUNIT_ASSERT_MATCH_REGEX(expected, actual);
+ EXPECT_THAT(actual, MatchesRegex(expected));
}
-void
-StatusTest::testHtmlStatus()
-{
+TEST_F(StatusTest, html_status) {
StatusComponent rep1(_node->getComponentRegister(), "foo",
new HtmlStatusReporter(
"fooid", "Foo impl", "<p>info</p>", "<!-- script -->"));
@@ -172,12 +154,10 @@ StatusTest::testHtmlStatus()
"<p>info</p></body>\n"
"</html>\n"
);
- CPPUNIT_ASSERT_EQUAL(expected, std::string(actual));
+ EXPECT_EQ(expected, std::string(actual));
}
-void
-StatusTest::testXmlStatus()
-{
+TEST_F(StatusTest, xml_sStatus) {
StatusComponent rep1(_node->getComponentRegister(), "foo",
new XmlStatusReporter(
"fooid", "Foo impl"));
@@ -196,12 +176,10 @@ StatusTest::testXmlStatus()
"<mytag foo=\"bar\">content</mytag>\n"
"</status>"
);
- CPPUNIT_ASSERT_EQUAL(expected, std::string(actual));
+ EXPECT_EQ(expected, std::string(actual));
}
-void
-StatusTest::test404()
-{
+TEST_F(StatusTest, test404) {
StatusWebServer webServer(_node->getComponentRegister(),
_node->getComponentRegister(),
"raw:httpport 0");
@@ -211,7 +189,7 @@ StatusTest::test404()
"Connection: close\r\n"
"\r\n"
);
- CPPUNIT_ASSERT_EQUAL_ESCAPED(expected, std::string(actual));
+ EXPECT_EQ(expected, std::string(actual));
}
} // storage
diff --git a/storage/src/tests/persistence/CMakeLists.txt b/storage/src/tests/persistence/CMakeLists.txt
index 0e4c661c749..76361e1d419 100644
--- a/storage/src/tests/persistence/CMakeLists.txt
+++ b/storage/src/tests/persistence/CMakeLists.txt
@@ -1,5 +1,6 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(storage_testpersistence TEST
+
+vespa_add_executable(storage_persistence_gtest_runner_app TEST
SOURCES
bucketownershipnotifiertest.cpp
diskmoveoperationhandlertest.cpp
@@ -11,14 +12,6 @@ vespa_add_library(storage_testpersistence TEST
provider_error_wrapper_test.cpp
splitbitdetectortest.cpp
testandsettest.cpp
- DEPENDS
- storage
- storage_testdistributor
- storage_testpersistence_common
-)
-
-vespa_add_executable(storage_persistence_gtest_runner_app TEST
- SOURCES
gtest_runner.cpp
DEPENDS
storage
diff --git a/storage/src/tests/persistence/bucketownershipnotifiertest.cpp b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
index 9699e0907e3..f4820f3ff13 100644
--- a/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
+++ b/storage/src/tests/persistence/bucketownershipnotifiertest.cpp
@@ -1,34 +1,26 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/storage/persistence/bucketownershipnotifier.h>
#include <tests/distributor/messagesenderstub.h>
#include <tests/common/teststorageapp.h>
-#include <vespa/storage/persistence/bucketownershipnotifier.h>
-#include <vespa/document/test/make_document_bucket.h>
+#include <vespa/vespalib/gtest/gtest.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class BucketOwnershipNotifierTest : public CppUnit::TestFixture
-{
+struct BucketOwnershipNotifierTest : public Test {
std::unique_ptr<TestServiceLayerApp> _app;
lib::ClusterState _clusterState;
-public:
BucketOwnershipNotifierTest()
: _app(),
_clusterState("distributor:2 storage:1")
{}
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(BucketOwnershipNotifierTest);
- CPPUNIT_TEST(testSendNotifyBucketChangeIfOwningDistributorChanged);
- CPPUNIT_TEST(testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender);
- CPPUNIT_TEST(testIgnoreIdealStateCalculationExceptions);
- CPPUNIT_TEST(testGuardNotifyAlways);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
bool ownsBucket(uint16_t distributorIndex,
const document::BucketId& bucket) const
@@ -56,23 +48,15 @@ public:
return makeDocumentBucket(document::BucketId(0));
}
-
- void testSendNotifyBucketChangeIfOwningDistributorChanged();
- void testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender();
- void testIgnoreIdealStateCalculationExceptions();
- void testGuardNotifyAlways();
-
void doTestNotification(const document::Bucket &bucket,
const api::BucketInfo& info,
const std::string& wantedSend);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(BucketOwnershipNotifierTest);
-
void
-BucketOwnershipNotifierTest::setUp()
+BucketOwnershipNotifierTest::SetUp()
{
- _app.reset(new TestServiceLayerApp);
+ _app = std::make_unique<TestServiceLayerApp>();
_app->setDistribution(Redundancy(1), NodeCount(2));
_app->setClusterState(_clusterState);
}
@@ -89,15 +73,13 @@ BucketOwnershipNotifierTest::doTestNotification(const document::Bucket &bucket,
notifier.notifyIfOwnershipChanged(bucket, 0, info);
- CPPUNIT_ASSERT_EQUAL(wantedSend, sender.getCommands(true, true));
+ EXPECT_EQ(wantedSend, sender.getCommands(true, true));
}
-void
-BucketOwnershipNotifierTest::testSendNotifyBucketChangeIfOwningDistributorChanged()
-{
+TEST_F(BucketOwnershipNotifierTest, send_notify_bucket_change_if_owning_distributor_changed) {
api::BucketInfo info(0x1, 2, 3);
document::Bucket bucket(getFirstNonOwnedBucket());
- CPPUNIT_ASSERT(bucket.getBucketId().getRawId() != 0);
+ ASSERT_NE(bucket.getBucketId().getRawId(), 0ULL);
std::ostringstream wanted;
wanted << "NotifyBucketChangeCommand("
@@ -108,31 +90,25 @@ BucketOwnershipNotifierTest::testSendNotifyBucketChangeIfOwningDistributorChange
doTestNotification(bucket, info, wanted.str());
}
-void
-BucketOwnershipNotifierTest::testDoNotSendNotifyBucketChangeIfBucketOwnedByInitialSender()
-{
+TEST_F(BucketOwnershipNotifierTest, do_not_send_notify_bucket_change_if_bucket_owned_by_initial_sender) {
api::BucketInfo info(0x1, 2, 3);
document::Bucket bucket(getFirstOwnedBucket());
- CPPUNIT_ASSERT(bucket.getBucketId().getRawId() != 0);
+ ASSERT_NE(bucket.getBucketId().getRawId(), 0ULL);
doTestNotification(bucket, info, "");
}
-void
-BucketOwnershipNotifierTest::testIgnoreIdealStateCalculationExceptions()
-{
+TEST_F(BucketOwnershipNotifierTest, ignore_ideal_state_calculation_exceptions) {
api::BucketInfo info(0x1, 2, 3);
document::Bucket bucket(getFirstNonOwnedBucket());
- CPPUNIT_ASSERT(bucket.getBucketId().getRawId() != 0);
+ ASSERT_NE(bucket.getBucketId().getRawId(), 0ULL);
_app->setClusterState(lib::ClusterState("distributor:0 storage:1"));
doTestNotification(bucket, info, "");
}
-void
-BucketOwnershipNotifierTest::testGuardNotifyAlways()
-{
+TEST_F(BucketOwnershipNotifierTest, guard_notify_always) {
ServiceLayerComponent component(_app->getComponentRegister(), "dummy");
MessageSenderStub sender;
BucketOwnershipNotifier notifier(component, sender);
@@ -157,7 +133,7 @@ BucketOwnershipNotifierTest::testGuardNotifyAlways()
<< ") => 1";
}
- CPPUNIT_ASSERT_EQUAL(wanted.str(), sender.getCommands(true, true));
+ EXPECT_EQ(wanted.str(), sender.getCommands(true, true));
}
} // storage
diff --git a/storage/src/tests/persistence/common/CMakeLists.txt b/storage/src/tests/persistence/common/CMakeLists.txt
index 7910336c141..53ec3fd7c0c 100644
--- a/storage/src/tests/persistence/common/CMakeLists.txt
+++ b/storage/src/tests/persistence/common/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_library(storage_testpersistence_common TEST
filestortestfixture.cpp
persistenceproviderwrapper.cpp
DEPENDS
+ gtest
persistence
storage_testcommon
)
diff --git a/storage/src/tests/persistence/common/filestortestfixture.cpp b/storage/src/tests/persistence/common/filestortestfixture.cpp
index 835b8ef1044..63b7885fc53 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.cpp
+++ b/storage/src/tests/persistence/common/filestortestfixture.cpp
@@ -34,7 +34,7 @@ FileStorTestFixture::setupPersistenceThreads(uint32_t threads)
// Default provider setup which should work out of the box for most tests.
void
-FileStorTestFixture::setUp()
+FileStorTestFixture::SetUp()
{
setupPersistenceThreads(1);
_node->setPersistenceProvider(
@@ -42,7 +42,7 @@ FileStorTestFixture::setUp()
}
void
-FileStorTestFixture::tearDown()
+FileStorTestFixture::TearDown()
{
_node.reset();
}
@@ -73,10 +73,8 @@ FileStorTestFixture::bucketExistsInDb(const document::BucketId& bucket) const
FileStorTestFixture::TestFileStorComponents::TestFileStorComponents(
FileStorTestFixture& fixture,
- const char* testName,
const StorageLinkInjector& injector)
- : _testName(testName),
- _fixture(fixture),
+ : _fixture(fixture),
manager(new FileStorManager(fixture._config->getConfigId(),
fixture._node->getPartitions(),
fixture._node->getPersistenceProvider(),
diff --git a/storage/src/tests/persistence/common/filestortestfixture.h b/storage/src/tests/persistence/common/filestortestfixture.h
index c46f9de24fc..a8c32a409ec 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.h
+++ b/storage/src/tests/persistence/common/filestortestfixture.h
@@ -9,10 +9,11 @@
#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/testhelper.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage {
-class FileStorTestFixture : public CppUnit::TestFixture
+class FileStorTestFixture : public ::testing::Test
{
public:
static spi::LoadType defaultLoadType;
@@ -26,8 +27,8 @@ public:
typedef uint32_t DocumentIndex;
typedef uint64_t PutTimestamp;
- void setUp() override;
- void tearDown() override;
+ void SetUp() override;
+ void TearDown() override;
void setupPersistenceThreads(uint32_t diskCount);
void createBucket(const document::BucketId& bid);
bool bucketExistsInDb(const document::BucketId& bucket) const;
@@ -53,7 +54,7 @@ public:
void
expectNoReplies(DummyStorageLink& link) {
- CPPUNIT_ASSERT_EQUAL(size_t(0), link.getNumReplies());
+ EXPECT_EQ(0, link.getNumReplies());
}
template <typename ReplyType>
@@ -65,12 +66,10 @@ public:
api::StorageReply* reply(
dynamic_cast<ReplyType*>(link.getReply(0).get()));
if (reply == 0) {
- std::ostringstream ss;
- ss << "got unexpected reply "
- << link.getReply(0)->toString(true);
- CPPUNIT_FAIL(ss.str());
+ FAIL() << "got unexpected reply "
+ << link.getReply(0)->toString(true);
}
- CPPUNIT_ASSERT_EQUAL(result, reply->getResult().getResult());
+ EXPECT_EQ(result, reply->getResult().getResult());
}
template <typename ReplyType>
@@ -89,14 +88,12 @@ public:
struct TestFileStorComponents
{
private:
- TestName _testName;
FileStorTestFixture& _fixture;
public:
DummyStorageLink top;
FileStorManager* manager;
TestFileStorComponents(FileStorTestFixture& fixture,
- const char* testName,
const StorageLinkInjector& i = NoOpStorageLinkInjector());
void sendDummyGet(const document::BucketId& bid);
diff --git a/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
index def9dd6ec6e..0dd3285e5f3 100644
--- a/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
+++ b/storage/src/tests/persistence/diskmoveoperationhandlertest.cpp
@@ -1,34 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/persistence/diskmoveoperationhandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class DiskMoveOperationHandlerTest : public PersistenceTestUtils
-{
- CPPUNIT_TEST_SUITE(DiskMoveOperationHandlerTest);
- CPPUNIT_TEST(testSimple);
- CPPUNIT_TEST_SUITE_END();
+struct DiskMoveOperationHandlerTest : PersistenceTestUtils {};
-public:
- void testSimple();
- void testTargetExists();
- void testTargetWithOverlap();
-
- void insertDocumentInBucket(uint64_t location, uint64_t timestamp, document::BucketId bucket);
-};
-
-CPPUNIT_TEST_SUITE_REGISTRATION(DiskMoveOperationHandlerTest);
-
-void
-DiskMoveOperationHandlerTest::testSimple()
-{
+TEST_F(DiskMoveOperationHandlerTest, simple) {
setupDisks(10);
// Create bucket 16, 4 on disk 3.
@@ -51,9 +35,8 @@ DiskMoveOperationHandlerTest::testSimple()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
diskMoveHandler.handleBucketDiskMove(move, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("BucketId(0x4000000000000004): 10,4"),
- getBucketStatus(document::BucketId(16,4)));
+ EXPECT_EQ("BucketId(0x4000000000000004): 10,4",
+ getBucketStatus(document::BucketId(16,4)));
}
}
diff --git a/storage/src/tests/persistence/filestorage/CMakeLists.txt b/storage/src/tests/persistence/filestorage/CMakeLists.txt
index 3827b6ac319..5209bcce73d 100644
--- a/storage/src/tests/persistence/filestorage/CMakeLists.txt
+++ b/storage/src/tests/persistence/filestorage/CMakeLists.txt
@@ -1,7 +1,6 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-# TODO: Remove test library when all tests have been migrated to gtest.
-vespa_add_library(storage_testfilestorage TEST
+vespa_add_executable(storage_filestorage_gtest_runner_app TEST
SOURCES
deactivatebucketstest.cpp
deletebuckettest.cpp
@@ -12,14 +11,6 @@ vespa_add_library(storage_testfilestorage TEST
operationabortingtest.cpp
sanitycheckeddeletetest.cpp
singlebucketjointest.cpp
- DEPENDS
- storage
- storageapi
- storage_testpersistence_common
-)
-
-vespa_add_executable(storage_filestorage_gtest_runner_app TEST
- SOURCES
gtest_runner.cpp
DEPENDS
storage
diff --git a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
index f9375790ebb..18f8a235453 100644
--- a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/persistence/spi/test.h>
@@ -9,35 +8,25 @@
#include <tests/persistence/common/filestortestfixture.h>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
-class DeactivateBucketsTest : public FileStorTestFixture
-{
+struct DeactivateBucketsTest : FileStorTestFixture {
bool isActive(const document::BucketId&) const;
-public:
- void bucketsInDatabaseDeactivatedWhenNodeDownInClusterState();
-
- CPPUNIT_TEST_SUITE(DeactivateBucketsTest);
- CPPUNIT_TEST(bucketsInDatabaseDeactivatedWhenNodeDownInClusterState);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(DeactivateBucketsTest);
-
bool
DeactivateBucketsTest::isActive(const document::BucketId& bucket) const
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bucket, "foo"));
- CPPUNIT_ASSERT(entry.exist());
+ assert(entry.exist());
return entry->info.isActive();
}
-void
-DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
-{
- TestFileStorComponents c(*this, "bucketsInDatabaseDeactivatedWhenNodeDownInClusterState");
+TEST_F(DeactivateBucketsTest, buckets_in_database_deactivated_when_node_down_in_cluster_state) {
+ TestFileStorComponents c(*this);
// Must set state to up first, or down-edge case won't trigger.
std::string upState("storage:2 distributor:2");
_node->getStateUpdater().setClusterState(
@@ -55,13 +44,13 @@ DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
entry->info = serviceLayerInfo;
entry.write();
}
- CPPUNIT_ASSERT(isActive(bucket));
+ EXPECT_TRUE(isActive(bucket));
std::string downState("storage:2 .1.s:d distributor:2");
_node->getStateUpdater().setClusterState(
lib::ClusterState::CSP(new lib::ClusterState(downState)));
// Buckets should have been deactivated in content layer
- CPPUNIT_ASSERT(!isActive(bucket));
+ EXPECT_FALSE(isActive(bucket));
}
} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
index ec3e02e85b8..81c9525b78f 100644
--- a/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
+++ b/storage/src/tests/persistence/filestorage/deletebuckettest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,26 +13,15 @@ using document::test::makeDocumentBucket;
namespace storage {
-class DeleteBucketTest : public FileStorTestFixture
-{
-public:
- void testDeleteAbortsOperationsForBucket();
-
- CPPUNIT_TEST_SUITE(DeleteBucketTest);
- CPPUNIT_TEST(testDeleteAbortsOperationsForBucket);
- CPPUNIT_TEST_SUITE_END();
+struct DeleteBucketTest : FileStorTestFixture {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(DeleteBucketTest);
-
-void
-DeleteBucketTest::testDeleteAbortsOperationsForBucket()
-{
- TestFileStorComponents c(*this, "testDeleteAbortsOperationsForBucket");
+TEST_F(DeleteBucketTest, delete_aborts_operations_for_bucket) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "TEST STAGE: taking resume guard");
+ LOG(debug, "TEST STAGE: taking resume guard");
{
ResumeGuard rg(c.manager->getFileStorHandler().pause());
// First put may or may not be queued, since pausing might race with
@@ -51,7 +39,7 @@ DeleteBucketTest::testDeleteAbortsOperationsForBucket()
// with having to check that _at least_ 1 reply had BUCKET_DELETED. Joy!
c.top.waitForMessages(2, 60 * 2);
std::vector <api::StorageMessage::SP> msgs(c.top.getRepliesOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(2), msgs.size());
+ ASSERT_EQ(2, msgs.size());
int numDeleted = 0;
for (uint32_t i = 0; i < 2; ++i) {
api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*msgs[i]));
@@ -59,8 +47,8 @@ DeleteBucketTest::testDeleteAbortsOperationsForBucket()
++numDeleted;
}
}
- CPPUNIT_ASSERT(numDeleted >= 1);
- LOG(info, "TEST STAGE: done, releasing resume guard");
+ ASSERT_GE(numDeleted, 1);
+ LOG(debug, "TEST STAGE: done, releasing resume guard");
}
// Ensure we don't shut down persistence threads before DeleteBucket op has completed
c.top.waitForMessages(1, 60*2);
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index c9285442fbc..f6b8fc3b3f0 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/testhelper.h>
-#include <tests/common/storagelinktest.h>
+#include <tests/common/testhelper.h> // FIXME
+#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/persistence/filestorage/forwardingmessagesender.h>
#include <vespa/document/repo/documenttyperepo.h>
@@ -20,6 +20,7 @@
#include <vespa/persistence/spi/test.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/fastos/file.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <atomic>
#include <vespa/log/log.h>
@@ -30,20 +31,21 @@ using document::Document;
using namespace storage::api;
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
#define ASSERT_SINGLE_REPLY(replytype, reply, link, time) \
-reply = 0; \
+reply = nullptr; \
try{ \
link.waitForMessages(1, time); \
- CPPUNIT_ASSERT_EQUAL((size_t)1, link.getNumReplies()); \
+ ASSERT_EQ(1, link.getNumReplies()); \
reply = dynamic_cast<replytype*>(link.getReply(0).get()); \
- if (reply == 0) { \
- CPPUNIT_FAIL("Got reply of unexpected type: " \
- + link.getReply(0)->getType().toString()); \
+ if (reply == nullptr) { \
+ FAIL() << "Got reply of unexpected type: " \
+ << link.getReply(0)->getType().toString(); \
} \
} catch (vespalib::Exception& e) { \
- reply = 0; \
- CPPUNIT_FAIL("Failed to find single reply in time"); \
+ reply = nullptr; \
+ FAIL() << "Failed to find single reply in time"; \
}
namespace storage {
@@ -56,7 +58,7 @@ struct TestFileStorComponents;
}
-struct FileStorManagerTest : public CppUnit::TestFixture {
+struct FileStorManagerTest : Test{
enum {LONG_WAITTIME=60};
unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<vdstestlib::DirConfig> config;
@@ -67,84 +69,8 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
FileStorManagerTest() : _node(), _waitTime(LONG_WAITTIME) {}
- void setUp() override;
- void tearDown() override;
-
- void testPut();
- void testHeaderOnlyPut();
- void testFlush();
- void testRemapSplit();
- void testHandlerPriority();
- void testHandlerMulti();
- void testHandlerTimeout();
- void testHandlerPause();
- void testHandlerPausedMultiThread();
- void testPriority();
- void testSplit1();
- void testSplitSingleGroup();
- void testSplitEmptyTargetWithRemappedOps();
- void testNotifyOnSplitSourceOwnershipChanged();
- void testJoin();
- void testVisiting();
- void testRemoveLocation();
- void testDeleteBucket();
- void testDeleteBucketRejectOutdatedBucketInfo();
- void testDeleteBucketWithInvalidBucketInfo();
- void testNoTimestamps();
- void testEqualTimestamps();
- void testGetIter();
- void testSetBucketActiveState();
- void testNotifyOwnerDistributorOnOutdatedSetBucketState();
- void testGetBucketDiffImplicitCreateBucket();
- void testMergeBucketImplicitCreateBucket();
- void testNewlyCreatedBucketIsReady();
- void testCreateBucketSetsActiveFlagInDatabaseAndReply();
- void testStateChange();
- void testRepairNotifiesDistributorOnChange();
- void testDiskMove();
- void put_command_size_is_added_to_metric();
- void update_command_size_is_added_to_metric();
- void remove_command_size_is_added_to_metric();
- void get_command_size_is_added_to_metric();
-
- CPPUNIT_TEST_SUITE(FileStorManagerTest);
- CPPUNIT_TEST(testPut);
- CPPUNIT_TEST(testHeaderOnlyPut);
- CPPUNIT_TEST(testFlush);
- CPPUNIT_TEST(testRemapSplit);
- CPPUNIT_TEST(testHandlerPriority);
- CPPUNIT_TEST(testHandlerMulti);
- CPPUNIT_TEST(testHandlerTimeout);
- CPPUNIT_TEST(testHandlerPause);
- CPPUNIT_TEST(testHandlerPausedMultiThread);
- CPPUNIT_TEST(testPriority);
- CPPUNIT_TEST(testSplit1);
- CPPUNIT_TEST(testSplitSingleGroup);
- CPPUNIT_TEST(testSplitEmptyTargetWithRemappedOps);
- CPPUNIT_TEST(testNotifyOnSplitSourceOwnershipChanged);
- CPPUNIT_TEST(testJoin);
- CPPUNIT_TEST(testVisiting);
- CPPUNIT_TEST(testRemoveLocation);
- CPPUNIT_TEST(testDeleteBucket);
- CPPUNIT_TEST(testDeleteBucketRejectOutdatedBucketInfo);
- CPPUNIT_TEST(testDeleteBucketWithInvalidBucketInfo);
- CPPUNIT_TEST(testNoTimestamps);
- CPPUNIT_TEST(testEqualTimestamps);
- CPPUNIT_TEST(testGetIter);
- CPPUNIT_TEST(testSetBucketActiveState);
- CPPUNIT_TEST(testNotifyOwnerDistributorOnOutdatedSetBucketState);
- CPPUNIT_TEST(testGetBucketDiffImplicitCreateBucket);
- CPPUNIT_TEST(testMergeBucketImplicitCreateBucket);
- CPPUNIT_TEST(testNewlyCreatedBucketIsReady);
- CPPUNIT_TEST(testCreateBucketSetsActiveFlagInDatabaseAndReply);
- CPPUNIT_TEST(testStateChange);
- CPPUNIT_TEST(testRepairNotifiesDistributorOnChange);
- CPPUNIT_TEST(testDiskMove);
- CPPUNIT_TEST(put_command_size_is_added_to_metric);
- CPPUNIT_TEST(update_command_size_is_added_to_metric);
- CPPUNIT_TEST(remove_command_size_is_added_to_metric);
- CPPUNIT_TEST(get_command_size_is_added_to_metric);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
+ void TearDown() override;
void createBucket(document::BucketId bid, uint16_t disk)
{
@@ -239,8 +165,6 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
}
};
-CPPUNIT_TEST_SUITE_REGISTRATION(FileStorManagerTest);
-
std::string findFile(const std::string& path, const std::string& file) {
FastOS_DirectoryScan dirScan(path.c_str());
while (dirScan.ReadNext()) {
@@ -285,17 +209,12 @@ std::unique_ptr<DiskThread> createThread(vdstestlib::DirConfig& config,
namespace {
-struct TestFileStorComponents
-{
-private:
- TestName _testName;
-public:
+struct TestFileStorComponents {
DummyStorageLink top;
FileStorManager* manager;
- TestFileStorComponents(FileStorManagerTest& test, const char* testName)
- : _testName(testName),
- manager(new FileStorManager(test.config->getConfigId(),
+ explicit TestFileStorComponents(FileStorManagerTest& test)
+ : manager(new FileStorManager(test.config->getConfigId(),
test._node->getPartitions(),
test._node->getPersistenceProvider(),
test._node->getComponentRegister()))
@@ -308,21 +227,18 @@ public:
}
void
-FileStorManagerTest::setUp()
+FileStorManagerTest::SetUp()
{
setupDisks(1);
}
void
-FileStorManagerTest::tearDown()
+FileStorManagerTest::TearDown()
{
_node.reset(0);
}
-void
-FileStorManagerTest::testHeaderOnlyPut()
-{
- TestName testName("testHeaderOnlyPut");
+TEST_F(FileStorManagerTest, header_only_put) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -340,69 +256,56 @@ FileStorManagerTest::testHeaderOnlyPut()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
doc->setValue(doc->getField("headerval"), document::IntFieldValue(42));
// Putting it again, this time with header only
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 124));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 124);
cmd->setUpdateTimestamp(105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Getting it
{
- std::shared_ptr<api::GetCommand> cmd(new api::GetCommand(
- makeDocumentBucket(bid), doc->getId(), "[all]"));
+ auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(bid), doc->getId(), "[all]");
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
std::shared_ptr<api::GetReply> reply2(
std::dynamic_pointer_cast<api::GetReply>(
top.getReply(0)));
top.reset();
- CPPUNIT_ASSERT(reply2.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply2->getResult());
- CPPUNIT_ASSERT_EQUAL(doc->getId().toString(),
- reply2->getDocumentId().toString());
- // Ensure partial update was done, but other things are equal
- document::FieldValue::UP value(
- reply2->getDocument()->getValue(doc->getField("headerval")));
- CPPUNIT_ASSERT(value.get());
- CPPUNIT_ASSERT_EQUAL(42, dynamic_cast<document::IntFieldValue&>(
- *value).getAsInt());
+ ASSERT_TRUE(reply2.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply2->getResult());
+ EXPECT_EQ(doc->getId().toString(), reply2->getDocumentId().toString());
+ // Ensure partial update was done, but other things are equal
+ auto value = reply2->getDocument()->getValue(doc->getField("headerval"));
+ ASSERT_TRUE(value.get());
+ EXPECT_EQ(42, dynamic_cast<document::IntFieldValue&>(*value).getAsInt());
reply2->getDocument()->remove("headerval");
doc->remove("headerval");
- CPPUNIT_ASSERT_EQUAL(*doc, *reply2->getDocument());
+ EXPECT_EQ(*doc, *reply2->getDocument());
}
}
-void
-FileStorManagerTest::testPut()
-{
- TestName testName("testPut");
+TEST_F(FileStorManagerTest, put) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -420,25 +323,20 @@ FileStorManagerTest::testPut()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
}
-void
-FileStorManagerTest::testDiskMove()
-{
+TEST_F(FileStorManagerTest, disk_move) {
setupDisks(2);
// Setting up manager
@@ -458,27 +356,24 @@ FileStorManagerTest::testDiskMove()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo"));
- CPPUNIT_ASSERT_EQUAL(0, (int)entry->disk);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(0, entry->disk);
+ EXPECT_EQ(
vespalib::string(
"BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 114, "
"ready true, active false)"),
@@ -486,26 +381,24 @@ FileStorManagerTest::testDiskMove()
}
{
- std::shared_ptr<BucketDiskMoveCommand> cmd(
- new BucketDiskMoveCommand(makeDocumentBucket(bid), 0, 1));
+ auto cmd = std::make_shared<BucketDiskMoveCommand>(makeDocumentBucket(bid), 0, 1);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<BucketDiskMoveReply> reply(
- std::dynamic_pointer_cast<BucketDiskMoveReply>(top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<BucketDiskMoveReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
}
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo"));
- CPPUNIT_ASSERT_EQUAL(1, (int)entry->disk);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(1, entry->disk);
+ EXPECT_EQ(
vespalib::string(
"BucketInfo(crc 0x28cc441f, docCount 1, totDocSize 114, "
"ready true, active false)"),
@@ -513,11 +406,7 @@ FileStorManagerTest::testDiskMove()
}
}
-
-void
-FileStorManagerTest::testStateChange()
-{
- TestName testName("testStateChange");
+TEST_F(FileStorManagerTest, state_change) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -528,17 +417,13 @@ FileStorManagerTest::testStateChange()
top.open();
setClusterState("storage:3 distributor:3");
-
- CPPUNIT_ASSERT_EQUAL(true, getDummyPersistence().getClusterState().nodeUp());
+ EXPECT_TRUE(getDummyPersistence().getClusterState().nodeUp());
setClusterState("storage:3 .0.s:d distributor:3");
-
- CPPUNIT_ASSERT_EQUAL(false, getDummyPersistence().getClusterState().nodeUp());
+ EXPECT_FALSE(getDummyPersistence().getClusterState().nodeUp());
}
-void
-FileStorManagerTest::testRepairNotifiesDistributorOnChange()
-{
+TEST_F(FileStorManagerTest, repair_notifies_distributor_on_change) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
@@ -555,9 +440,8 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
for (uint32_t i = 0; i < 3; ++i) {
document::DocumentId docId(vespalib::make_string("userdoc:ns:1:%d", i));
- Document::SP doc(new Document(*_testdoctype1, docId));
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(document::BucketId(16, 1)), doc, i + 1));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(document::BucketId(16, 1)), doc, i + 1);
cmd->setAddress(address);
top.sendDown(cmd);
}
@@ -567,13 +451,12 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
getDummyPersistence().simulateMaintenanceFailure();
- std::shared_ptr<RepairBucketCommand> cmd(
- new RepairBucketCommand(makeDocumentBucket(document::BucketId(16, 1)), 0));
+ auto cmd = std::make_shared<RepairBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)), 0);
top.sendDown(cmd);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(
std::string("NotifyBucketChangeCommand(BucketId(0x4000000000000001), "
"BucketInfo(crc 0x2625a314, docCount 2, totDocSize 154, "
"ready true, active false))"), top.getReply(0)->toString());
@@ -581,22 +464,18 @@ FileStorManagerTest::testRepairNotifiesDistributorOnChange()
top.close();
}
-
-void
-FileStorManagerTest::testFlush()
-{
- TestName testName("testFlush");
- // Setting up manager
+TEST_F(FileStorManagerTest, flush) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ // Creating a document to test with
document::DocumentId docId("doc:crawler:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(4000);
static const uint32_t msgCount = 10;
@@ -604,8 +483,7 @@ FileStorManagerTest::testFlush()
// Generating many put commands
std::vector<std::shared_ptr<api::StorageCommand> > _commands;
for (uint32_t i=0; i<msgCount; ++i) {
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, i+1));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, i+1);
cmd->setAddress(address);
_commands.push_back(cmd);
}
@@ -614,13 +492,10 @@ FileStorManagerTest::testFlush()
}
top.close();
top.flush();
- CPPUNIT_ASSERT_EQUAL((size_t) msgCount, top.getNumReplies());
+ EXPECT_EQ(msgCount, top.getNumReplies());
}
-void
-FileStorManagerTest::testHandlerPriority()
-{
- TestName testName("testHandlerPriority");
+TEST_F(FileStorManagerTest, handler_priority) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -638,7 +513,7 @@ FileStorManagerTest::testHandlerPriority()
FileStorHandler filestorHandler(messageSender, metrics, _node->getPartitions(), _node->getComponentRegister());
filestorHandler.setGetNextMessageTimeout(50);
uint32_t stripeId = filestorHandler.getNextStripeId(0);
- CPPUNIT_ASSERT_EQUAL(0u, stripeId);
+ ASSERT_EQ(0u, stripeId);
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
@@ -657,15 +532,14 @@ FileStorManagerTest::testHandlerPriority()
filestorHandler.schedule(cmd, 0);
}
- CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(45, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(60, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
- CPPUNIT_ASSERT_EQUAL(75, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(15, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(30, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(45, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(60, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(75, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
}
-class MessagePusherThread : public document::Runnable
-{
+class MessagePusherThread : public document::Runnable {
public:
FileStorHandler& _handler;
Document::SP _doc;
@@ -673,7 +547,7 @@ public:
std::atomic<bool> _threadDone;
MessagePusherThread(FileStorHandler& handler, Document::SP doc);
- ~MessagePusherThread();
+ ~MessagePusherThread() override;
void run() override {
while (!_done) {
@@ -690,7 +564,7 @@ public:
};
MessagePusherThread::MessagePusherThread(FileStorHandler& handler, Document::SP doc)
- : _handler(handler), _doc(doc), _done(false), _threadDone(false)
+ : _handler(handler), _doc(std::move(doc)), _done(false), _threadDone(false)
{}
MessagePusherThread::~MessagePusherThread() = default;
@@ -704,7 +578,7 @@ public:
std::atomic<bool> _failed;
std::atomic<bool> _threadDone;
- MessageFetchingThread(FileStorHandler& handler)
+ explicit MessageFetchingThread(FileStorHandler& handler)
: _threadId(handler.getNextStripeId(0)), _handler(handler), _config(0), _fetchedCount(0), _done(false),
_failed(false), _threadDone(false)
{}
@@ -729,10 +603,7 @@ public:
};
};
-void
-FileStorManagerTest::testHandlerPausedMultiThread()
-{
- TestName testName("testHandlerPausedMultiThread");
+TEST_F(FileStorManagerTest, handler_paused_multi_thread) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -767,23 +638,19 @@ FileStorManagerTest::testHandlerPausedMultiThread()
ResumeGuard guard = filestorHandler.pause();
thread._config.fetch_add(1);
uint32_t count = thread._fetchedCount;
- CPPUNIT_ASSERT_EQUAL(count, thread._fetchedCount.load());
+ ASSERT_EQ(count, thread._fetchedCount.load());
}
pushthread._done = true;
thread._done = true;
- CPPUNIT_ASSERT(!thread._failed);
+ ASSERT_FALSE(thread._failed);
while (!pushthread._threadDone || !thread._threadDone) {
FastOS_Thread::Sleep(1);
}
}
-
-void
-FileStorManagerTest::testHandlerPause()
-{
- TestName testName("testHandlerPriority");
+TEST_F(FileStorManagerTest, handler_pause) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -818,15 +685,15 @@ FileStorManagerTest::testHandlerPause()
filestorHandler.schedule(cmd, 0);
}
- CPPUNIT_ASSERT_EQUAL(15, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(15, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
{
ResumeGuard guard = filestorHandler.pause();
(void)guard;
- CPPUNIT_ASSERT(filestorHandler.getNextMessage(0, stripeId).second.get() == NULL);
+ ASSERT_EQ(filestorHandler.getNextMessage(0, stripeId).second.get(), nullptr);
}
- CPPUNIT_ASSERT_EQUAL(30, (int)filestorHandler.getNextMessage(0, stripeId).second->getPriority());
+ ASSERT_EQ(30, filestorHandler.getNextMessage(0, stripeId).second->getPriority());
}
namespace {
@@ -842,10 +709,7 @@ uint64_t getPutTime(api::StorageMessage::SP& msg)
}
-void
-FileStorManagerTest::testRemapSplit()
-{
- TestName testName("testRemapSplit");
+TEST_F(FileStorManagerTest, remap_split) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -878,36 +742,31 @@ FileStorManagerTest::testRemapSplit()
filestorHandler.schedule(std::make_shared<api::PutCommand>(makeDocumentBucket(bucket2), doc2, i + 10), 0);
}
- CPPUNIT_ASSERT_EQUAL(std::string("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
- "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
- "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"),
- filestorHandler.dumpQueue(0));
+ EXPECT_EQ("BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000004d2): Put(BucketId(0x40000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n",
+ filestorHandler.dumpQueue(0));
FileStorHandler::RemapInfo a(makeDocumentBucket(document::BucketId(17, 1234)), 0);
FileStorHandler::RemapInfo b(makeDocumentBucket(document::BucketId(17, 1234 | 1 << 16)), 0);
filestorHandler.remapQueueAfterSplit(FileStorHandler::RemapInfo(makeDocumentBucket(bucket1), 0), a, b);
- CPPUNIT_ASSERT(a.foundInQueue);
- CPPUNIT_ASSERT(!b.foundInQueue);
-
- CPPUNIT_ASSERT_EQUAL(std::string(
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
- "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
- "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n"),
- filestorHandler.dumpQueue(0));
+ ASSERT_TRUE(a.foundInQueue);
+ ASSERT_FALSE(b.foundInQueue);
+ EXPECT_EQ("BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 11, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 12, size 108) (priority: 127)\n"
+ "BucketId(0x40000000000011d7): Put(BucketId(0x40000000000011d7), userdoc:footype:4567:bar, timestamp 13, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 1, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 2, size 108) (priority: 127)\n"
+ "BucketId(0x44000000000004d2): Put(BucketId(0x44000000000004d2), userdoc:footype:1234:bar, timestamp 3, size 108) (priority: 127)\n",
+ filestorHandler.dumpQueue(0));
}
-void
-FileStorManagerTest::testHandlerMulti()
-{
- TestName testName("testHandlerMulti");
+TEST_F(FileStorManagerTest, handler_multi) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -946,29 +805,25 @@ FileStorManagerTest::testHandlerMulti()
{
FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, stripeId);
- CPPUNIT_ASSERT_EQUAL((uint64_t)1, getPutTime(lock.second));
+ ASSERT_EQ(1, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)2, getPutTime(lock.second));
+ ASSERT_EQ(2, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)3, getPutTime(lock.second));
+ ASSERT_EQ(3, getPutTime(lock.second));
}
{
FileStorHandler::LockedMessage lock = filestorHandler.getNextMessage(0, stripeId);
- CPPUNIT_ASSERT_EQUAL((uint64_t)11, getPutTime(lock.second));
+ ASSERT_EQ(11, getPutTime(lock.second));
lock = filestorHandler.getNextMessage(0, stripeId, lock);
- CPPUNIT_ASSERT_EQUAL((uint64_t)12, getPutTime(lock.second));
+ ASSERT_EQ(12, getPutTime(lock.second));
}
}
-
-void
-FileStorManagerTest::testHandlerTimeout()
-{
- TestName testName("testHandlerTimeout");
+TEST_F(FileStorManagerTest, handler_timeout) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1018,20 +873,17 @@ FileStorManagerTest::testHandlerTimeout()
for (;;) {
auto lock = filestorHandler.getNextMessage(0, stripeId);
if (lock.first.get()) {
- CPPUNIT_ASSERT_EQUAL(uint8_t(200), lock.second->getPriority());
+ ASSERT_EQ(200, lock.second->getPriority());
break;
}
}
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::TIMEOUT,
- static_cast<api::StorageReply&>(*top.getReply(0)).getResult().getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ EXPECT_EQ(api::ReturnCode::TIMEOUT,
+ static_cast<api::StorageReply&>(*top.getReply(0)).getResult().getResult());
}
-void
-FileStorManagerTest::testPriority()
-{
- TestName testName("testPriority");
+TEST_F(FileStorManagerTest, priority) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1080,11 +932,8 @@ FileStorManagerTest::testPriority()
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, factory.getBucketId(documents[i]->getId()).getRawId());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
cmd->setPriority(i * 2);
filestorHandler.schedule(cmd, 0);
@@ -1094,35 +943,31 @@ FileStorManagerTest::testPriority()
// Wait until everything is done.
int count = 0;
- while (documents.size() != top.getNumReplies() && count < 1000) {
- FastOS_Thread::Sleep(100);
+ while (documents.size() != top.getNumReplies() && count < 10000) {
+ FastOS_Thread::Sleep(10);
count++;
}
- CPPUNIT_ASSERT(count < 1000);
+ ASSERT_LT(count, 10000);
for (uint32_t i = 0; i < documents.size(); i++) {
std::shared_ptr<api::PutReply> reply(
std::dynamic_pointer_cast<api::PutReply>(
top.getReply(i)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
// Verify that thread 1 gets documents over 50 pri
- CPPUNIT_ASSERT_EQUAL(uint64_t(documents.size()),
- metrics.disks[0]->threads[0]->operations.getValue()
- + metrics.disks[0]->threads[1]->operations.getValue());
+ EXPECT_EQ(documents.size(),
+ metrics.disks[0]->threads[0]->operations.getValue()
+ + metrics.disks[0]->threads[1]->operations.getValue());
// Closing file stor handler before threads are deleted, such that
// file stor threads getNextMessage calls returns.
filestorHandler.close();
}
-void
-FileStorManagerTest::testSplit1()
-{
- TestName testName("testSplit1");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, split1) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1137,7 +982,7 @@ FileStorManagerTest::testSplit1()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
+ // Creating documents to test with. Different gids, 2 locations.
std::vector<document::Document::SP > documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content which is in all documents");
@@ -1161,60 +1006,45 @@ FileStorManagerTest::testSplit1()
_node->getPersistenceProvider().createBucket(
makeSpiBucket(bucket), context);
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- std::unique_ptr<api::StorageMessageAddress> address(
- new api::StorageMessageAddress(
- "storage", lib::NodeType::STORAGE, 3));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ auto address = std::make_unique<api::StorageMessageAddress>("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(*address);
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
LOG(debug, "Got %zu replies", top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
// Delete every 5th document to have delete entries in file too
if (i % 5 == 0) {
- std::shared_ptr<api::RemoveCommand> rcmd(
- new api::RemoveCommand(
- makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i));
+ auto rcmd = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i);
rcmd->setAddress(*address);
filestorHandler.schedule(rcmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveReply> rreply(
- std::dynamic_pointer_cast<api::RemoveReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
- rreply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- rreply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto rreply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
+ ASSERT_TRUE(rreply.get()) << top.getReply(0)->getType().toString();
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), rreply->getResult());
top.reset();
}
}
// Perform a split, check that locations are split
{
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(makeDocumentBucket(document::BucketId(16, 1))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
@@ -1222,37 +1052,30 @@ FileStorManagerTest::testSplit1()
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(
17, i % 3 == 0 ? 0x10001 : 0x0100001);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
// Keep splitting location 1 until we gidsplit
for (int i=17; i<=32; ++i) {
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(
- makeDocumentBucket(document::BucketId(i, 0x0100001))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(
+ makeDocumentBucket(document::BucketId(i, 0x0100001)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
@@ -1265,19 +1088,16 @@ FileStorManagerTest::testSplit1()
bucket = document::BucketId(33, factory.getBucketId(
documents[i]->getId()).getRawId());
}
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
}
@@ -1286,11 +1106,8 @@ FileStorManagerTest::testSplit1()
filestorHandler.close();
}
-void
-FileStorManagerTest::testSplitSingleGroup()
-{
- TestName testName("testSplitSingleGroup");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, split_single_group) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1312,79 +1129,62 @@ FileStorManagerTest::testSplitSingleGroup()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
- std::vector<document::Document::SP > documents;
+ // Creating documents to test with. Different gids, 2 locations.
+ std::vector<document::Document::SP> documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content for all documents");
std::ostringstream uri;
uri << "userdoc:footype:" << (state ? 0x10001 : 0x0100001)
<< ":mydoc-" << i;
- Document::SP doc(createDocument(
- content, uri.str()).release());
- documents.push_back(doc);
+ documents.emplace_back(createDocument(content, uri.str()));
}
document::BucketIdFactory factory;
- // Populate bucket with the given data
+ // Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, factory.getBucketId(
documents[i]->getId()).getRawId());
- _node->getPersistenceProvider().createBucket(
- makeSpiBucket(bucket), context);
+ _node->getPersistenceProvider().createBucket(makeSpiBucket(bucket), context);
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bucket), documents[i], 100 + i));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
- // Perform a split, check that locations are split
+ // Perform a split, check that locations are split
{
- std::shared_ptr<api::SplitBucketCommand> cmd(
- new api::SplitBucketCommand(makeDocumentBucket(document::BucketId(16, 1))));
+ auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1)));
cmd->setSourceIndex(0);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
-
// Test that the documents are all still there
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(17, state ? 0x10001 : 0x00001);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>
+ (makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
// Closing file stor handler before threads are deleted, such that
@@ -1415,21 +1215,16 @@ FileStorManagerTest::putDoc(DummyStorageLink& top,
cmd->setPriority(120);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
std::shared_ptr<api::PutReply> reply(
std::dynamic_pointer_cast<api::PutReply>(
top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_TRUE(reply.get());
+ ASSERT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
-void
-FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
-{
- TestName testName("testSplitEmptyTargetWithRemappedOps");
-
+TEST_F(FileStorManagerTest, split_empty_target_with_remapped_ops) {
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1450,7 +1245,7 @@ FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
for (uint32_t i=0; i<10; ++i) {
- putDoc(top, filestorHandler, source, i);
+ ASSERT_NO_FATAL_FAILURE(putDoc(top, filestorHandler, source, i));
}
// Send split followed by a put that is bound for a target bucket that
@@ -1459,54 +1254,42 @@ FileStorManagerTest::testSplitEmptyTargetWithRemappedOps()
// the persistence provider deleting it internally.
// Make sure we block the operation queue until we've scheduled all
// the operations.
- std::unique_ptr<ResumeGuard> resumeGuard(
- new ResumeGuard(filestorHandler.pause()));
+ auto resumeGuard = std::make_unique<ResumeGuard>(filestorHandler.pause());
- std::shared_ptr<api::SplitBucketCommand> splitCmd(
- new api::SplitBucketCommand(makeDocumentBucket(source)));
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(source));
splitCmd->setPriority(120);
splitCmd->setSourceIndex(0);
document::DocumentId docId(
vespalib::make_string("userdoc:ns:%d:1234", 0x100001));
- Document::SP doc(new Document(*_testdoctype1, docId));
- std::shared_ptr<api::PutCommand> putCmd(
- new api::PutCommand(makeDocumentBucket(source), doc, 1001));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
+ auto putCmd = std::make_shared<api::PutCommand>(makeDocumentBucket(source), doc, 1001);
putCmd->setAddress(address);
putCmd->setPriority(120);
filestorHandler.schedule(splitCmd, 0);
filestorHandler.schedule(putCmd, 0);
- resumeGuard.reset(0); // Unpause
+ resumeGuard.reset(); // Unpause
filestorHandler.flush(true);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 2, top.getNumReplies());
+ ASSERT_EQ(2, top.getNumReplies());
{
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
{
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(1)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(1));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
top.reset();
}
-void
-FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
-{
- TestName testName("testSplit1");
+TEST_F(FileStorManagerTest, notify_on_split_source_ownership_changed) {
// Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
@@ -1525,11 +1308,10 @@ FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
document::BucketId source(getFirstBucketNotOwnedByDistributor(0));
createBucket(source, 0);
for (uint32_t i=0; i<10; ++i) {
- putDoc(top, filestorHandler, source, i);
+ ASSERT_NO_FATAL_FAILURE(putDoc(top, filestorHandler, source, i));
}
- std::shared_ptr<api::SplitBucketCommand> splitCmd(
- new api::SplitBucketCommand(makeDocumentBucket(source)));
+ auto splitCmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(source));
splitCmd->setPriority(120);
splitCmd->setSourceIndex(0); // Source not owned by this distributor.
@@ -1537,25 +1319,18 @@ FileStorManagerTest::testNotifyOnSplitSourceOwnershipChanged()
filestorHandler.flush(true);
top.waitForMessages(4, _waitTime); // 3 notify cmds + split reply
- CPPUNIT_ASSERT_EQUAL(size_t(4), top.getNumReplies());
+ ASSERT_EQ(4, top.getNumReplies());
for (int i = 0; i < 3; ++i) {
- CPPUNIT_ASSERT_EQUAL(api::MessageType::NOTIFYBUCKETCHANGE,
- top.getReply(i)->getType());
+ ASSERT_EQ(api::MessageType::NOTIFYBUCKETCHANGE, top.getReply(i)->getType());
}
- std::shared_ptr<api::SplitBucketReply> reply(
- std::dynamic_pointer_cast<api::SplitBucketReply>(
- top.getReply(3)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ auto reply = std::dynamic_pointer_cast<api::SplitBucketReply>(top.getReply(3));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
-void
-FileStorManagerTest::testJoin()
-{
- TestName testName("testJoin");
- // Setup a filestorthread to test
+TEST_F(FileStorManagerTest, join) {
+ // Setup a filestorthread to test
DummyStorageLink top;
DummyStorageLink *dummyManager;
top.push_back(std::unique_ptr<StorageLink>(
@@ -1570,15 +1345,13 @@ FileStorManagerTest::testJoin()
std::unique_ptr<DiskThread> thread(createThread(
*config, *_node, _node->getPersistenceProvider(),
filestorHandler, *metrics.disks[0]->threads[0], 0));
- // Creating documents to test with. Different gids, 2 locations.
+ // Creating documents to test with. Different gids, 2 locations.
std::vector<document::Document::SP > documents;
for (uint32_t i=0; i<20; ++i) {
std::string content("Here is some content which is in all documents");
std::ostringstream uri;
-
uri << "userdoc:footype:" << (i % 3 == 0 ? 0x10001 : 0x0100001) << ":mydoc-" << i;
- Document::SP doc(createDocument(content, uri.str()).release());
- documents.push_back(doc);
+ documents.emplace_back(createDocument(content, uri.str()));
}
document::BucketIdFactory factory;
@@ -1586,7 +1359,7 @@ FileStorManagerTest::testJoin()
createBucket(document::BucketId(17, 0x10001), 0);
{
- // Populate bucket with the given data
+ // Populate bucket with the given data
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(17, factory.getBucketId(documents[i]->getId()).getRawId());
auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), documents[i], 100 + i);
@@ -1594,66 +1367,57 @@ FileStorManagerTest::testJoin()
cmd->setAddress(*address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
- // Delete every 5th document to have delete entries in file too
- if (i % 5 == 0) {
- auto rcmd = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bucket),
- documents[i]->getId(), 1000000 + 100 + i);
+ // Delete every 5th document to have delete entries in file too
+ if ((i % 5) == 0) {
+ auto rcmd = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), 1000000 + 100 + i);
rcmd->setAddress(*address);
filestorHandler.schedule(rcmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
+ ASSERT_EQ(1, top.getNumReplies());
auto rreply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
- CPPUNIT_ASSERT_MSG(top.getReply(0)->getType().toString(),
- rreply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- rreply->getResult());
+ ASSERT_TRUE(rreply.get()) << top.getReply(0)->getType().toString();
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), rreply->getResult());
top.reset();
}
}
LOG(debug, "Starting the actual join after populating data");
- // Perform a join, check that other files are gone
+ // Perform a join, check that other files are gone
{
- std::shared_ptr<api::JoinBucketsCommand> cmd(
- new api::JoinBucketsCommand(makeDocumentBucket(document::BucketId(16, 1))));
- cmd->getSourceBuckets().push_back(document::BucketId(17, 0x00001));
- cmd->getSourceBuckets().push_back(document::BucketId(17, 0x10001));
+ auto cmd = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(document::BucketId(16, 1)));
+ cmd->getSourceBuckets().emplace_back(document::BucketId(17, 0x00001));
+ cmd->getSourceBuckets().emplace_back(document::BucketId(17, 0x10001));
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::JoinBucketsReply> reply(
- std::dynamic_pointer_cast<api::JoinBucketsReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::JoinBucketsReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
top.reset();
}
// Test that the documents have gotten into the file.
for (uint32_t i=0; i<documents.size(); ++i) {
document::BucketId bucket(16, 1);
- std::shared_ptr<api::GetCommand> cmd(
- new api::GetCommand(makeDocumentBucket(bucket), documents[i]->getId(), "[all]"));
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
+ auto cmd = std::make_shared<api::GetCommand>(
+ makeDocumentBucket(bucket), documents[i]->getId(), "[all]");
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
cmd->setAddress(address);
filestorHandler.schedule(cmd, 0);
filestorHandler.flush(true);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::GetReply> reply(
- std::dynamic_pointer_cast<api::GetReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(i % 5 != 0 ? true : false, reply->wasFound());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::GetReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(((i % 5) != 0), reply->wasFound());
top.reset();
}
}
- // Closing file stor handler before threads are deleted, such that
- // file stor threads getNextMessage calls returns.
+ // Closing file stor handler before threads are deleted, such that
+ // file stor threads getNextMessage calls returns.
filestorHandler.close();
}
@@ -1673,30 +1437,24 @@ createIterator(DummyStorageLink& link,
spi::Selection(spi::DocumentSelection(docSel));
selection.setFromTimestamp(spi::Timestamp(fromTime.getTime()));
selection.setToTimestamp(spi::Timestamp(toTime.getTime()));
- CreateIteratorCommand::SP createIterCmd(
- new CreateIteratorCommand(makeDocumentBucket(bucket),
- selection,
- headerOnly ? "[header]" : "[all]",
- spi::NEWEST_DOCUMENT_ONLY));
+ auto createIterCmd = std::make_shared<CreateIteratorCommand>(
+ makeDocumentBucket(bucket), selection,
+ headerOnly ? "[header]" : "[all]",
+ spi::NEWEST_DOCUMENT_ONLY);
link.sendDown(createIterCmd);
link.waitForMessages(1, FileStorManagerTest::LONG_WAITTIME);
- CPPUNIT_ASSERT_EQUAL(size_t(1), link.getNumReplies());
- std::shared_ptr<CreateIteratorReply> reply(
- std::dynamic_pointer_cast<CreateIteratorReply>(
- link.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
+ assert(link.getNumReplies() == 1);
+ auto reply = std::dynamic_pointer_cast<CreateIteratorReply>(link.getReply(0));
+ assert(reply.get());
link.reset();
- CPPUNIT_ASSERT(reply->getResult().success());
+ assert(reply->getResult().success());
return reply->getIteratorId();
}
}
-void
-FileStorManagerTest::testVisiting()
-{
- TestName testName("testVisiting");
- // Setting up manager
+TEST_F(FileStorManagerTest, visiting) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
@@ -1705,9 +1463,10 @@ FileStorManagerTest::testVisiting()
// Adding documents to two buckets which we are going to visit
// We want one bucket in one slotfile, and one bucket with a file split
uint32_t docCount = 50;
- std::vector<document::BucketId> ids(2);
- ids[0] = document::BucketId(16, 1);
- ids[1] = document::BucketId(16, 2);
+ std::vector<document::BucketId> ids = {
+ document::BucketId(16, 1),
+ document::BucketId(16, 2)
+ };
createBucket(ids[0], 0);
createBucket(ids[1], 0);
@@ -1719,8 +1478,7 @@ FileStorManagerTest::testVisiting()
uri << "userdoc:crawler:" << (i < 3 ? 1 : 2) << ":"
<< randomizer.nextUint32() << ".html";
- Document::SP doc(createDocument(
- content, uri.str()).release());
+ Document::SP doc(createDocument(content, uri.str()));
const document::DocumentType& type(doc->getType());
if (i < 30) {
doc->setValue(type.getField("hstringval"),
@@ -1729,71 +1487,61 @@ FileStorManagerTest::testVisiting()
doc->setValue(type.getField("hstringval"),
document::StringFieldValue("Jane Doe"));
}
- std::shared_ptr<api::PutCommand> cmd(new api::PutCommand(
- makeDocumentBucket(ids[i < 3 ? 0 : 1]), doc, i+1));
+ auto cmd = std::make_shared<api::PutCommand>(
+ makeDocumentBucket(ids[(i < 3) ? 0 : 1]), doc, i+1);
top.sendDown(cmd);
}
top.waitForMessages(docCount, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) docCount, top.getNumReplies());
- // Check nodestate with splitting
+ ASSERT_EQ(docCount, top.getNumReplies());
+ // Check nodestate with splitting
{
api::BucketInfo info;
for (uint32_t i=3; i<docCount; ++i) {
- std::shared_ptr<api::BucketInfoReply> reply(
- std::dynamic_pointer_cast<api::BucketInfoReply>(
- top.getReply(i)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_MESSAGE(reply->getResult().toString(),
- reply->getResult().success());
+ auto reply = std::dynamic_pointer_cast<api::BucketInfoReply>(top.getReply(i));
+ ASSERT_TRUE(reply.get());
+ ASSERT_TRUE(reply->getResult().success()) << reply->getResult().toString();
info = reply->getBucketInfo();
}
- CPPUNIT_ASSERT_EQUAL(docCount-3, info.getDocumentCount());
+ EXPECT_EQ(docCount - 3, info.getDocumentCount());
}
top.reset();
- // Visit bucket with no split, using no selection
+ // Visit bucket with no split, using no selection
{
spi::IteratorId iterId(createIterator(top, ids[0], "true"));
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[0]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(ids[0], reply->getBucketId());
- CPPUNIT_ASSERT_EQUAL(size_t(3), reply->getEntries().size());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(ids[0], reply->getBucketId());
+ EXPECT_EQ(3, reply->getEntries().size());
top.reset();
}
- // Visit bucket with split, using selection
+ // Visit bucket with split, using selection
{
uint32_t totalDocs = 0;
- spi::IteratorId iterId(
- createIterator(top,
- ids[1],
- "testdoctype1.hstringval = \"John Doe\""));
+ spi::IteratorId iterId(createIterator(top, ids[1], "testdoctype1.hstringval = \"John Doe\""));
while (true) {
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[1]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
- CPPUNIT_ASSERT_EQUAL(ids[1], reply->getBucketId());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(ids[1], reply->getBucketId());
totalDocs += reply->getEntries().size();
top.reset();
if (reply->isCompleted()) {
break;
}
}
- CPPUNIT_ASSERT_EQUAL(27u, totalDocs);
+ EXPECT_EQ(27u, totalDocs);
}
- // Visit bucket with min and max timestamps set, headers only
+ // Visit bucket with min and max timestamps set, headers only
{
document::BucketId bucket(16, 2);
spi::IteratorId iterId(
@@ -1808,37 +1556,24 @@ FileStorManagerTest::testVisiting()
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(ids[1]), iterId, 16*1024);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- reply->getResult());
- CPPUNIT_ASSERT_EQUAL(bucket, reply->getBucketId());
-/* Header only is a VDS-specific thing.
-
- for (size_t i = 0; i < reply->getEntries().size(); ++i) {
- CPPUNIT_ASSERT(reply->getEntries()[i]->getDocument()
- ->getBody().empty());
- }
-*/
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(bucket, reply->getBucketId());
totalDocs += reply->getEntries().size();
top.reset();
if (reply->isCompleted()) {
break;
}
}
- CPPUNIT_ASSERT_EQUAL(11u, totalDocs);
+ EXPECT_EQ(11u, totalDocs);
}
}
-void
-FileStorManagerTest::testRemoveLocation()
-{
- TestName testName("testRemoveLocation");
- // Setting up manager
+TEST_F(FileStorManagerTest, remove_location) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -1853,55 +1588,44 @@ FileStorManagerTest::testRemoveLocation()
for (uint32_t i=0; i<=10; ++i) {
std::ostringstream docid;
docid << "userdoc:ns:" << (i << 8) << ":foo";
- Document::SP doc(createDocument(
- "some content", docid.str()).release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 1000 + i));
+ Document::SP doc(createDocument("some content", docid.str()));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 1000 + i);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(i + 1u, reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(i + 1u, reply->getBucketInfo().getDocumentCount());
}
- // Issuing remove location command
+ // Issuing remove location command
{
- std::shared_ptr<api::RemoveLocationCommand> cmd(
- new api::RemoveLocationCommand("id.user % 512 == 0", makeDocumentBucket(bid)));
- //new api::RemoveLocationCommand("id.user == 1", bid));
+ auto cmd = std::make_shared<api::RemoveLocationCommand>("id.user % 512 == 0", makeDocumentBucket(bid));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveLocationReply> reply(
- std::dynamic_pointer_cast<api::RemoveLocationReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::RemoveLocationReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(5u, reply->getBucketInfo().getDocumentCount());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(5u, reply->getBucketInfo().getDocumentCount());
}
}
-void FileStorManagerTest::testDeleteBucket()
-{
- TestName testName("testDeleteBucket");
- // Setting up manager
+TEST_F(FileStorManagerTest, delete_bucket) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
- // Creating a document to test with
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
+ // Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(16, 4000);
createBucket(bid, 0);
@@ -1909,52 +1633,42 @@ void FileStorManagerTest::testDeleteBucket()
api::BucketInfo bucketInfo;
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
bucketInfo = reply->getBucketInfo();
top.reset();
}
// Delete bucket
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setAddress(address);
cmd->setBucketInfo(bucketInfo);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
}
-void
-FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
-{
- TestName testName("testDeleteBucketRejectOutdatedBucketInfo");
+TEST_F(FileStorManagerTest, delete_bucket_rejects_outdated_bucket_info) {
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
Document::SP doc(new Document(*_testdoctype1, docId));
@@ -1966,40 +1680,32 @@ FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
bucketInfo = reply->getBucketInfo();
top.reset();
}
// Attempt to delete bucket, but with non-matching bucketinfo
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setBucketInfo(api::BucketInfo(0xf000baaa, 1, 123, 1, 456));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(
- ReturnCode::REJECTED,
- reply->getResult().getResult());
- CPPUNIT_ASSERT_EQUAL(bucketInfo, reply->getBucketInfo());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
+ EXPECT_EQ(bucketInfo, reply->getBucketInfo());
}
}
@@ -2007,191 +1713,51 @@ FileStorManagerTest::testDeleteBucketRejectOutdatedBucketInfo()
* Test that receiving a DeleteBucketCommand with invalid
* BucketInfo deletes the bucket and does not fail the operation.
*/
-void
-FileStorManagerTest::testDeleteBucketWithInvalidBucketInfo()
-{
- TestName testName("testDeleteBucketWithInvalidBucketInfo");
+TEST_F(FileStorManagerTest, delete_bucket_with_invalid_bucket_info){
// Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager = new FileStorManager(
config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 2);
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 2);
// Creating a document to test with
document::DocumentId docId("userdoc:crawler:4000:http://www.ntnu.no/");
- Document::SP doc(new Document(*_testdoctype1, docId));
+ auto doc = std::make_shared<Document>(*_testdoctype1, docId);
document::BucketId bid(16, 4000);
createBucket(bid, 0);
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 105));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 105);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT_EQUAL(1, (int)reply->getBucketInfo().getDocumentCount());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_EQ(1, reply->getBucketInfo().getDocumentCount());
top.reset();
}
// Attempt to delete bucket with invalid bucketinfo
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(
- ReturnCode::OK,
- reply->getResult().getResult());
- CPPUNIT_ASSERT_EQUAL(api::BucketInfo(), reply->getBucketInfo());
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
+ EXPECT_EQ(api::BucketInfo(), reply->getBucketInfo());
}
}
-namespace {
-
- /**
- * Utility storage link, sending data to the given links instead of through
- * a regular chain.
- */
- struct MidLink : public StorageLink {
- StorageLink& _up;
-
- public:
- MidLink(std::unique_ptr<StorageLink> down, StorageLink& up)
- : StorageLink("MidLink"), _up(up)
- {
- push_back(std::move(down));
- }
- ~MidLink() {
- closeNextLink();
- }
-
- void print(std::ostream& out, bool, const std::string&) const override { out << "MidLink"; }
- bool onUp(const std::shared_ptr<api::StorageMessage> & msg) override {
- if (!StorageLinkTest::callOnUp(_up, msg)) _up.sendUp(msg);
- return true;
- }
-
- };
-
- /**
- * Utility class, connecting two storage links below it, sending
- * messages coming up from one down the other (providing address is set
- * correctly.)
- */
- class BinaryStorageLink : public DummyStorageLink {
- vespalib::Lock _lock;
- std::set<api::StorageMessage::Id> _seen;
- MidLink _left;
- MidLink _right;
- uint16_t _leftAddr;
- uint16_t _rightAddr;
-
- public:
- BinaryStorageLink(uint16_t leftAddr, std::unique_ptr<StorageLink> left,
- uint16_t rightAddr, std::unique_ptr<StorageLink> right)
- : _left(std::move(left), *this),
- _right(std::move(right), *this),
- _leftAddr(leftAddr),
- _rightAddr(rightAddr) {}
-
- void print(std::ostream& out, bool, const std::string&) const override { out << "BinaryStorageLink"; }
-
- bool onDown(const std::shared_ptr<api::StorageMessage> & msg) override {
-// LOG(debug, "onDown Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
-
- vespalib::LockGuard lock(_lock);
- _seen.insert(msg->getMsgId());
- return sendOn(msg);
- }
-
- bool sendOn(const std::shared_ptr<api::StorageMessage> & msg) {
- if (msg->getAddress()) {
- uint16_t address = msg->getAddress()->getIndex();
- if ((address == _leftAddr && !msg->getType().isReply()) ||
- (address == _rightAddr && msg->getType().isReply()))
- {
- if (!StorageLinkTest::callOnDown(_left, msg)) {
- _left.sendDown(msg);
- }
- } else if ((address == _rightAddr && !msg->getType().isReply()) ||
- (address == _leftAddr && msg->getType().isReply()))
- {
- if (!StorageLinkTest::callOnDown(_right, msg)) {
- _right.sendDown(msg);
- }
- } else {
- std::ostringstream ost;
- ost << "Address " << address << " is neither " << _leftAddr
- << " or " << _rightAddr << " in message " << *msg
- << ".\n";
- CPPUNIT_FAIL(ost.str());
- }
- }
- return true;
- }
-
- bool onUp(const std::shared_ptr<api::StorageMessage> & msg) override {
- // LOG(debug, "onUp Received msg: ->%s, %s %llu\n", msg->getAddress() ? msg->getAddress()->toString().c_str() : "(null)", msg->toString().c_str(), msg->getMsgId());
-
- vespalib::LockGuard lock(_lock);
- std::set<api::StorageMessage::Id>::iterator it
- = _seen.find(msg->getMsgId());
- // If message originated from the outside
- if (it != _seen.end()) {
- LOG(debug, "Have seen this message before, storing");
-
- _seen.erase(it);
- return DummyStorageLink::onUp(msg);
- // If it originated from below, send it down again.
- } else if (msg->getType() == api::MessageType::NOTIFYBUCKETCHANGE) {
- // Just throw away notify bucket change
- return true;
- } else {
- LOG(debug, "Never seen %s, sending on!",
- msg->toString().c_str());
-
- return sendOn(msg);
- }
- }
-
- void onFlush(bool downwards) override {
- if (downwards) {
- _left.flush();
- _right.flush();
- }
- }
- void onOpen() override {
- _left.open();
- _right.open();
- }
- void onClose() override {
- _left.close();
- _right.close();
- }
- };
-}
-
-void
-FileStorManagerTest::testNoTimestamps()
-{
- TestName testName("testNoTimestamps");
- // Setting up manager
+TEST_F(FileStorManagerTest, no_timestamps) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -2199,7 +1765,7 @@ FileStorManagerTest::testNoTimestamps()
top.open();
api::StorageMessageAddress address(
"storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ // Creating a document to test with
Document::SP doc(createDocument(
"some content", "doc:crawler:http://www.ntnu.no/").release());
document::BucketId bid(16, 4000);
@@ -2208,53 +1774,41 @@ FileStorManagerTest::testNoTimestamps()
// Putting it
{
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 0));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 0);
cmd->setAddress(address);
- CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ EXPECT_EQ(api::Timestamp(0), cmd->getTimestamp());
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
}
- // Removing it
+ // Removing it
{
- std::shared_ptr<api::RemoveCommand> cmd(
- new api::RemoveCommand(makeDocumentBucket(bid), doc->getId(), 0));
+ auto cmd = std::make_shared<api::RemoveCommand>(makeDocumentBucket(bid), doc->getId(), 0);
cmd->setAddress(address);
- CPPUNIT_ASSERT_EQUAL((api::Timestamp)0, cmd->getTimestamp());
+ EXPECT_EQ(api::Timestamp(0), cmd->getTimestamp());
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::RemoveReply> reply(
- std::dynamic_pointer_cast<api::RemoveReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::RemoveReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::REJECTED,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::REJECTED, reply->getResult().getResult());
}
}
-void
-FileStorManagerTest::testEqualTimestamps()
-{
- TestName testName("testEqualTimestamps");
- // Setting up manager
+TEST_F(FileStorManagerTest, equal_timestamps) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
new FileStorManager(config->getConfigId(), _node->getPartitions(), _node->getPersistenceProvider(), _node->getComponentRegister())));
top.open();
- api::StorageMessageAddress address(
- "storage", lib::NodeType::STORAGE, 3);
- // Creating a document to test with
+ api::StorageMessageAddress address("storage", lib::NodeType::STORAGE, 3);
+ // Creating a document to test with
document::BucketId bid(16, 4000);
createBucket(bid, 0);
@@ -2262,20 +1816,16 @@ FileStorManagerTest::testEqualTimestamps()
// Putting it
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Putting it on same timestamp again
@@ -2283,48 +1833,36 @@ FileStorManagerTest::testEqualTimestamps()
// have to accept this)
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.no/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.no/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::OK, reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::OK, reply->getResult().getResult());
}
// Putting the doc with other id. Now we should fail
{
Document::SP doc(createDocument(
- "some content", "userdoc:crawler:4000:http://www.ntnu.nu/")
- .release());
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), doc, 100));
+ "some content", "userdoc:crawler:4000:http://www.ntnu.nu/"));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), doc, 100);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::TIMESTAMP_EXIST,
- reply->getResult().getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::TIMESTAMP_EXIST, reply->getResult().getResult());
}
}
-void
-FileStorManagerTest::testGetIter()
-{
- TestName testName("testGetIter");
- // Setting up manager
+TEST_F(FileStorManagerTest, get_iter) {
+ // Setting up manager
DummyStorageLink top;
FileStorManager *manager;
top.push_back(unique_ptr<StorageLink>(manager =
@@ -2337,85 +1875,70 @@ FileStorManagerTest::testGetIter()
createBucket(bid, 0);
std::vector<Document::SP > docs;
- // Creating some documents to test with
+ // Creating some documents to test with
for (uint32_t i=0; i<10; ++i) {
std::ostringstream id;
id << "userdoc:crawler:4000:http://www.ntnu.no/" << i;
- docs.push_back(
+ docs.emplace_back(
Document::SP(
_node->getTestDocMan().createRandomDocumentAtLocation(
4000, i, 400, 400)));
}
api::BucketInfo bucketInfo;
- // Putting all docs to have something to visit
+ // Putting all docs to have something to visit
for (uint32_t i=0; i<docs.size(); ++i) {
- std::shared_ptr<api::PutCommand> cmd(
- new api::PutCommand(makeDocumentBucket(bid), docs[i], 100 + i));
+ auto cmd = std::make_shared<api::PutCommand>(makeDocumentBucket(bid), docs[i], 100 + i);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::PutReply> reply(
- std::dynamic_pointer_cast<api::PutReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::PutReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
bucketInfo = reply->getBucketInfo();
}
- // Sending a getiter request that will only visit some of the docs
+ // Sending a getiter request that will only visit some of the docs
spi::IteratorId iterId(createIterator(top, bid, ""));
{
- std::shared_ptr<GetIterCommand> cmd(
- new GetIterCommand(makeDocumentBucket(bid), iterId, 2048));
+ auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(bid), iterId, 2048);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
- CPPUNIT_ASSERT(reply->getEntries().size() > 0);
- CPPUNIT_ASSERT(reply->getEntries().size() < docs.size());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
+ EXPECT_GT(reply->getEntries().size(), 0);
+ EXPECT_LT(reply->getEntries().size(), docs.size());
}
- // Normal case of get iter is testing through visitor tests.
- // Testing specific situation where file is deleted while visiting here
+ // Normal case of get iter is testing through visitor tests.
+ // Testing specific situation where file is deleted while visiting here
{
- std::shared_ptr<api::DeleteBucketCommand> cmd(
- new api::DeleteBucketCommand(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bid));
cmd->setBucketInfo(bucketInfo);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::DeleteBucketReply> reply(
- std::dynamic_pointer_cast<api::DeleteBucketReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::DeleteBucketReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
{
auto cmd = std::make_shared<GetIterCommand>(makeDocumentBucket(bid), iterId, 2048);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(1), top.getNumReplies());
- std::shared_ptr<GetIterReply> reply(
- std::dynamic_pointer_cast<GetIterReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<GetIterReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode::BUCKET_NOT_FOUND,
- reply->getResult().getResult());
- CPPUNIT_ASSERT(reply->getEntries().empty());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode::BUCKET_NOT_FOUND, reply->getResult().getResult());
+ EXPECT_TRUE(reply->getEntries().empty());
}
}
-void
-FileStorManagerTest::testSetBucketActiveState()
-{
- TestName testName("testSetBucketActiveState");
+TEST_F(FileStorManagerTest, set_bucket_active_state) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2431,83 +1954,70 @@ FileStorManagerTest::testSetBucketActiveState()
const uint16_t disk = 0;
createBucket(bid, disk);
- spi::dummy::DummyPersistence& provider(
- dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider()));
- CPPUNIT_ASSERT(!provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ auto& provider = dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SetBucketStateReply> reply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_TRUE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
// Trigger bucket info to be read back into the database
{
- std::shared_ptr<ReadBucketInfo> cmd(
- new ReadBucketInfo(makeDocumentBucket(bid)));
+ auto cmd = std::make_shared<ReadBucketInfo>(makeDocumentBucket(bid));
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<ReadBucketInfoReply> reply(
- std::dynamic_pointer_cast<ReadBucketInfoReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<ReadBucketInfoReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
+ ASSERT_TRUE(reply.get());
}
// Should not have lost active flag
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
{
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::INACTIVE));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::INACTIVE);
cmd->setAddress(address);
top.sendDown(cmd);
top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL((size_t) 1, top.getNumReplies());
- std::shared_ptr<api::SetBucketStateReply> reply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(0)));
+ ASSERT_EQ(1, top.getNumReplies());
+ auto reply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(0));
top.reset();
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
+ ASSERT_TRUE(reply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(!provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
+ EXPECT_FALSE(provider.isActive(makeSpiBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(!entry->info.isActive());
+ EXPECT_FALSE(entry->info.isActive());
}
}
-void
-FileStorManagerTest::testNotifyOwnerDistributorOnOutdatedSetBucketState()
-{
- TestName testName("testNotifyOwnerDistributorOnOutdatedSetBucketState");
+TEST_F(FileStorManagerTest, notify_owner_distributor_on_outdated_set_bucket_state) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2520,47 +2030,37 @@ FileStorManagerTest::testNotifyOwnerDistributorOnOutdatedSetBucketState()
top.open();
document::BucketId bid(getFirstBucketNotOwnedByDistributor(0));
- CPPUNIT_ASSERT(bid.getRawId() != 0);
+ ASSERT_NE(bid.getRawId(), 0);
createBucket(bid, 0);
- std::shared_ptr<api::SetBucketStateCommand> cmd(
- new api::SetBucketStateCommand(
- makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::SetBucketStateCommand>(
+ makeDocumentBucket(bid), api::SetBucketStateCommand::ACTIVE);
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
top.waitForMessages(2, _waitTime);
- CPPUNIT_ASSERT_EQUAL(size_t(2), top.getNumReplies());
+ ASSERT_EQ(2, top.getNumReplies());
// Not necessarily deterministic order.
int idxOffset = 0;
if (top.getReply(0)->getType() != api::MessageType::NOTIFYBUCKETCHANGE) {
++idxOffset;
}
- std::shared_ptr<api::NotifyBucketChangeCommand> notifyCmd(
- std::dynamic_pointer_cast<api::NotifyBucketChangeCommand>(
- top.getReply(idxOffset)));
- std::shared_ptr<api::SetBucketStateReply> stateReply(
- std::dynamic_pointer_cast<api::SetBucketStateReply>(
- top.getReply(1 - idxOffset)));
+ auto notifyCmd = std::dynamic_pointer_cast<api::NotifyBucketChangeCommand>(top.getReply(idxOffset));
+ auto stateReply = std::dynamic_pointer_cast<api::SetBucketStateReply>(top.getReply(1 - idxOffset));
- CPPUNIT_ASSERT(stateReply.get());
- CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK),
- stateReply->getResult());
+ ASSERT_TRUE(stateReply.get());
+ EXPECT_EQ(ReturnCode(ReturnCode::OK), stateReply->getResult());
- CPPUNIT_ASSERT(notifyCmd.get());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), notifyCmd->getAddress()->getIndex());
+ ASSERT_TRUE(notifyCmd.get());
+ EXPECT_EQ(1, notifyCmd->getAddress()->getIndex());
// Not necessary for this to be set since distributor does not insert this
// info into its db, but useful for debugging purposes.
- CPPUNIT_ASSERT(notifyCmd->getBucketInfo().isActive());
+ EXPECT_TRUE(notifyCmd->getBucketInfo().isActive());
}
-void
-FileStorManagerTest::testGetBucketDiffImplicitCreateBucket()
-{
- TestName testName("testGetBucketDiffImplicitCreateBucket");
+TEST_F(FileStorManagerTest, GetBucketDiff_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2573,34 +2073,26 @@ FileStorManagerTest::testGetBucketDiffImplicitCreateBucket()
document::BucketId bid(16, 4000);
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(1);
- nodes.push_back(0);
+ std::vector<api::MergeBucketCommand::Node> nodes = {1, 0};
- std::shared_ptr<api::GetBucketDiffCommand> cmd(
- new api::GetBucketDiffCommand(makeDocumentBucket(bid), nodes, Timestamp(1000)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::GetBucketDiffCommand>(makeDocumentBucket(bid), nodes, Timestamp(1000));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
api::GetBucketDiffReply* reply;
ASSERT_SINGLE_REPLY(api::GetBucketDiffReply, reply, top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
}
}
-void
-FileStorManagerTest::testMergeBucketImplicitCreateBucket()
-{
- TestName testName("testMergeBucketImplicitCreateBucket");
+TEST_F(FileStorManagerTest, merge_bucket_implicitly_creates_bucket) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2613,14 +2105,10 @@ FileStorManagerTest::testMergeBucketImplicitCreateBucket()
document::BucketId bid(16, 4000);
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(1);
- nodes.push_back(2);
+ std::vector<api::MergeBucketCommand::Node> nodes = {1, 2};
- std::shared_ptr<api::MergeBucketCommand> cmd(
- new api::MergeBucketCommand(makeDocumentBucket(bid), nodes, Timestamp(1000)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::MergeBucketCommand>(makeDocumentBucket(bid), nodes, Timestamp(1000));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
@@ -2630,15 +2118,12 @@ FileStorManagerTest::testMergeBucketImplicitCreateBucket()
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
}
}
-void
-FileStorManagerTest::testNewlyCreatedBucketIsReady()
-{
- TestName testName("testNewlyCreatedBucketIsReady");
+TEST_F(FileStorManagerTest, newly_created_bucket_is_ready) {
DummyStorageLink top;
FileStorManager* manager(
new FileStorManager(config->getConfigId(),
@@ -2651,31 +2136,26 @@ FileStorManagerTest::testNewlyCreatedBucketIsReady()
document::BucketId bid(16, 4000);
- std::shared_ptr<api::CreateBucketCommand> cmd(
- new api::CreateBucketCommand(makeDocumentBucket(bid)));
- cmd->setAddress(api::StorageMessageAddress(
- "cluster", lib::NodeType::STORAGE, 1));
+ auto cmd = std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(bid));
+ cmd->setAddress(api::StorageMessageAddress("cluster", lib::NodeType::STORAGE, 1));
cmd->setSourceIndex(0);
top.sendDown(cmd);
api::CreateBucketReply* reply;
ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
- CPPUNIT_ASSERT(!entry->info.isActive());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
+ EXPECT_FALSE(entry->info.isActive());
}
}
-void
-FileStorManagerTest::testCreateBucketSetsActiveFlagInDatabaseAndReply()
-{
- TestFileStorComponents c(*this, "testNotifyOnSplitSourceOwnershipChanged");
+TEST_F(FileStorManagerTest, create_bucket_sets_active_flag_in_database_and_reply) {
+ TestFileStorComponents c(*this);
setClusterState("storage:2 distributor:1");
document::BucketId bid(16, 4000);
@@ -2689,15 +2169,14 @@ FileStorManagerTest::testCreateBucketSetsActiveFlagInDatabaseAndReply()
api::CreateBucketReply* reply;
ASSERT_SINGLE_REPLY(api::CreateBucketReply, reply, c.top, _waitTime);
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode(api::ReturnCode::OK),
- reply->getResult());
+ EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
bid, "foo"));
- CPPUNIT_ASSERT(entry.exist());
- CPPUNIT_ASSERT(entry->info.isReady());
- CPPUNIT_ASSERT(entry->info.isActive());
+ ASSERT_TRUE(entry.exist());
+ EXPECT_TRUE(entry->info.isReady());
+ EXPECT_TRUE(entry->info.isActive());
}
}
@@ -2708,11 +2187,11 @@ void FileStorManagerTest::assert_request_size_set(TestFileStorComponents& c, std
cmd->setAddress(address);
c.top.sendDown(cmd);
c.top.waitForMessages(1, _waitTime);
- CPPUNIT_ASSERT_EQUAL(static_cast<int64_t>(cmd->getApproxByteSize()), metric.request_size.getLast());
+ EXPECT_EQ(static_cast<int64_t>(cmd->getApproxByteSize()), metric.request_size.getLast());
}
-void FileStorManagerTest::put_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "put_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, put_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::PutCommand>(
@@ -2721,8 +2200,8 @@ void FileStorManagerTest::put_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->put[defaultLoadType]);
}
-void FileStorManagerTest::update_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "update_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, update_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto update = std::make_shared<document::DocumentUpdate>(
@@ -2735,8 +2214,8 @@ void FileStorManagerTest::update_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->update[defaultLoadType]);
}
-void FileStorManagerTest::remove_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "remove_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, remove_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::RemoveCommand>(
@@ -2745,8 +2224,8 @@ void FileStorManagerTest::remove_command_size_is_added_to_metric() {
assert_request_size_set(c, std::move(cmd), thread_metrics_of(*c.manager)->remove[defaultLoadType]);
}
-void FileStorManagerTest::get_command_size_is_added_to_metric() {
- TestFileStorComponents c(*this, "get_command_size_is_added_to_metric");
+TEST_F(FileStorManagerTest, get_command_size_is_added_to_metric) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 4000);
createBucket(bucket, 0);
auto cmd = std::make_shared<api::GetCommand>(
diff --git a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
index 1fab3a8bcc1..e21dde006dc 100644
--- a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
#include <vespa/persistence/spi/test.h>
@@ -9,6 +8,7 @@
#include <tests/persistence/common/filestortestfixture.h>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
@@ -16,26 +16,14 @@ namespace storage {
* Effectively an integration test between the ModifiedBucketChecker storage
* link and the behavior of the filestor component.
*/
-class FileStorModifiedBucketsTest : public FileStorTestFixture
-{
-public:
- void modifiedBucketsSendNotifyBucketChange();
- void fileStorRepliesToRecheckBucketCommands();
-
+struct FileStorModifiedBucketsTest : FileStorTestFixture {
void modifyBuckets(uint32_t first, uint32_t count);
spi::dummy::DummyPersistence& getDummyPersistence() {
return dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider());
}
-
- CPPUNIT_TEST_SUITE(FileStorModifiedBucketsTest);
- CPPUNIT_TEST(modifiedBucketsSendNotifyBucketChange);
- CPPUNIT_TEST(fileStorRepliesToRecheckBucketCommands);
- CPPUNIT_TEST_SUITE_END();
};
-CPPUNIT_TEST_SUITE_REGISTRATION(FileStorModifiedBucketsTest);
-
namespace {
struct BucketCheckerInjector : FileStorTestFixture::StorageLinkInjector
@@ -48,20 +36,19 @@ struct BucketCheckerInjector : FileStorTestFixture::StorageLinkInjector
_fixture(fixture)
{}
void inject(DummyStorageLink& link) const override {
- link.push_back(std::unique_ptr<ModifiedBucketChecker>(
- new ModifiedBucketChecker(_node.getComponentRegister(),
- _node.getPersistenceProvider(),
- _fixture._config->getConfigId())));
+ link.push_back(std::make_unique<ModifiedBucketChecker>(
+ _node.getComponentRegister(),
+ _node.getPersistenceProvider(),
+ _fixture._config->getConfigId()));
}
};
void
assertIsNotifyCommandWithActiveBucket(api::StorageMessage& msg)
{
- api::NotifyBucketChangeCommand& cmd(
- dynamic_cast<api::NotifyBucketChangeCommand&>(msg));
- CPPUNIT_ASSERT(cmd.getBucketInfo().isActive());
- CPPUNIT_ASSERT_EQUAL(
+ auto& cmd = dynamic_cast<api::NotifyBucketChangeCommand&>(msg);
+ ASSERT_TRUE(cmd.getBucketInfo().isActive());
+ ASSERT_EQ(
vespalib::string("StorageMessageAddress(Storage protocol, "
"cluster storage, nodetype distributor, index 0)"),
cmd.getAddress()->toString());
@@ -83,11 +70,9 @@ FileStorModifiedBucketsTest::modifyBuckets(uint32_t first, uint32_t count)
getDummyPersistence().setModifiedBuckets(buckets);
}
-void
-FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
-{
+TEST_F(FileStorModifiedBucketsTest, modified_buckets_send_notify_bucket_change) {
BucketCheckerInjector bcj(*_node, *this);
- TestFileStorComponents c(*this, "modifiedBucketsSendNotifyBucketChange", bcj);
+ TestFileStorComponents c(*this, bcj);
setClusterState("storage:1 distributor:1");
uint32_t numBuckets = 10;
@@ -104,21 +89,19 @@ FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
for (uint32_t i = 0; i < 10; ++i) {
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(i)));
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
document::BucketId(16, i), "foo"));
- CPPUNIT_ASSERT(entry->info.isActive());
+ EXPECT_TRUE(entry->info.isActive());
}
}
-void
-FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
-{
+TEST_F(FileStorModifiedBucketsTest, file_stor_replies_to_recheck_bucket_commands) {
BucketCheckerInjector bcj(*_node, *this);
- TestFileStorComponents c(*this, "fileStorRepliesToRecheckBucketCommands", bcj);
+ TestFileStorComponents c(*this, bcj);
setClusterState("storage:1 distributor:1");
document::BucketId bucket(16, 0);
@@ -129,7 +112,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
modifyBuckets(0, 1);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0)));
// If we don't reply to the recheck bucket commands, we won't trigger
// a new round of getModifiedBuckets and recheck commands.
@@ -137,7 +120,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
createBucket(makeSpiBucket(document::BucketId(16, 1)));
modifyBuckets(1, 1);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
+ ASSERT_NO_FATAL_FAILURE(assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0)));
}
} // storage
diff --git a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
index d4cec415937..d9582cec585 100644
--- a/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/mergeblockingtest.cpp
@@ -1,20 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vector>
-#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
-#include <vespa/document/test/make_document_bucket.h>
+#include <vector>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class MergeBlockingTest : public FileStorTestFixture
-{
-public:
+struct MergeBlockingTest : public FileStorTestFixture {
void setupDisks() {
FileStorTestFixture::setupPersistenceThreads(1);
_node->setPersistenceProvider(
@@ -22,32 +20,11 @@ public:
new spi::dummy::DummyPersistence(_node->getTypeRepo(), 1)));
}
-public:
- void testRejectMergeForInconsistentInnerBucket();
- void testRejectMergeForInconsistentLeafBucket();
- void testRejectGetBucketDiffWithInconsistentBucket();
- void testRejectApplyDiffWhenBucketHasBecomeInconsistent();
- void testRejectApplyReplyWhenBucketHasBecomeInconsistent();
- void testRejectGetDiffReplyWhenBucketHasBecomeInconsistent();
- void testRejectMergeWhenLowUsedBitCount();
-
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(MergeBlockingTest);
- CPPUNIT_TEST(testRejectMergeForInconsistentInnerBucket);
- CPPUNIT_TEST(testRejectMergeForInconsistentLeafBucket);
- CPPUNIT_TEST(testRejectGetBucketDiffWithInconsistentBucket);
- CPPUNIT_TEST(testRejectApplyDiffWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectApplyReplyWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectGetDiffReplyWhenBucketHasBecomeInconsistent);
- CPPUNIT_TEST(testRejectMergeWhenLowUsedBitCount);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeBlockingTest);
-
void
-MergeBlockingTest::setUp()
+MergeBlockingTest::SetUp()
{
setupDisks();
}
@@ -67,25 +44,18 @@ assignCommandMeta(api::StorageCommand& msg) {
std::vector<api::MergeBucketCommand::Node>
getNodes() {
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(0);
- nodes.push_back(1);
- return nodes;
+ return std::vector<api::MergeBucketCommand::Node>({0, 1});
}
std::vector<api::MergeBucketCommand::Node>
getNodesWithForwarding() {
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(0);
- nodes.push_back(1);
- nodes.push_back(2);
- return nodes;
+ return std::vector<api::MergeBucketCommand::Node>({0, 1, 2});
}
std::shared_ptr<api::MergeBucketCommand>
createMerge(const document::BucketId& bucket) {
- std::shared_ptr<api::MergeBucketCommand> cmd(
- new api::MergeBucketCommand(makeDocumentBucket(bucket), getNodes(), api::Timestamp(1000)));
+ auto cmd = std::make_shared<api::MergeBucketCommand>(
+ makeDocumentBucket(bucket), getNodes(), api::Timestamp(1000));
assignCommandMeta(*cmd);
return cmd;
}
@@ -94,8 +64,8 @@ std::shared_ptr<api::GetBucketDiffCommand>
createGetDiff(const document::BucketId& bucket,
const std::vector<api::MergeBucketCommand::Node>& nodes)
{
- std::shared_ptr<api::GetBucketDiffCommand> cmd(
- new api::GetBucketDiffCommand(makeDocumentBucket(bucket), nodes, api::Timestamp(1000)));
+ auto cmd = std::make_shared<api::GetBucketDiffCommand>(
+ makeDocumentBucket(bucket), nodes, api::Timestamp(1000));
assignCommandMeta(*cmd);
return cmd;
}
@@ -103,8 +73,7 @@ createGetDiff(const document::BucketId& bucket,
std::shared_ptr<api::ApplyBucketDiffCommand>
createApplyDiff(const document::BucketId& bucket,
const std::vector<api::MergeBucketCommand::Node>& nodes) {
- std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
- new api::ApplyBucketDiffCommand(makeDocumentBucket(bucket), nodes, 1024*1024));
+ auto cmd = std::make_shared<api::ApplyBucketDiffCommand>(makeDocumentBucket(bucket), nodes, 1024*1024);
assignCommandMeta(*cmd);
return cmd;
}
@@ -115,127 +84,104 @@ const document::BucketId innerBucket2(15, 1);
}
-void
-MergeBlockingTest::testRejectMergeForInconsistentInnerBucket()
-{
- TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+TEST_F(MergeBlockingTest, reject_merge_for_inconsistent_inner_bucket) {
+ TestFileStorComponents c(*this);
createBucket(leafBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(innerBucket));
+ auto cmd = createMerge(innerBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(innerBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(innerBucket));
}
-void
-MergeBlockingTest::testRejectMergeForInconsistentLeafBucket()
-{
- TestFileStorComponents c(*this, "testRejectMergeForInconsistentInnerBucket");
+TEST_F(MergeBlockingTest, reject_merge_for_inconsistent_leaf_bucket) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(leafBucket));
+ auto cmd = createMerge(leafBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(leafBucket));
}
-void
-MergeBlockingTest::testRejectGetBucketDiffWithInconsistentBucket()
-{
- TestFileStorComponents c(*this, "testRejectGetBucketDiffWithInconsistentBucket");
- CPPUNIT_ASSERT(innerBucket.contains(leafBucket));
+TEST_F(MergeBlockingTest, reject_get_diff_with_inconsistent_bucket) {
+ TestFileStorComponents c(*this);
+ ASSERT_TRUE(innerBucket.contains(leafBucket));
createBucket(innerBucket);
- std::shared_ptr<api::GetBucketDiffCommand> cmd(createGetDiff(leafBucket, getNodes()));
+ auto cmd = createGetDiff(leafBucket, getNodes());
c.top.sendDown(cmd);
- expectAbortedReply<api::GetBucketDiffReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(leafBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::GetBucketDiffReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(leafBucket));
}
-void
-MergeBlockingTest::testRejectApplyDiffWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectApplyDiffWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_apply_diff_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(leafBucket);
createBucket(innerBucket);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
- createApplyDiff(innerBucket, getNodes()));
+ auto applyDiff = createApplyDiff(innerBucket, getNodes());
c.top.sendDown(applyDiff);
- expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::ApplyBucketDiffReply>(c.top));
}
-void
-MergeBlockingTest::testRejectApplyReplyWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectApplyReplyWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_apply_diff_reply_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyDiff(
- createApplyDiff(innerBucket, getNodesWithForwarding()));
+ auto applyDiff = createApplyDiff(innerBucket, getNodesWithForwarding());
c.top.sendDown(applyDiff);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- api::StorageMessage::SP fwdDiff(
- c.top.getAndRemoveMessage(api::MessageType::APPLYBUCKETDIFF));
- api::ApplyBucketDiffCommand& diffCmd(
- dynamic_cast<api::ApplyBucketDiffCommand&>(*fwdDiff));
+ auto fwdDiff = c.top.getAndRemoveMessage(api::MessageType::APPLYBUCKETDIFF);
+ auto& diffCmd = dynamic_cast<api::ApplyBucketDiffCommand&>(*fwdDiff);
- api::ApplyBucketDiffReply::SP diffReply(
- new api::ApplyBucketDiffReply(diffCmd));
+ auto diffReply = std::make_shared<api::ApplyBucketDiffReply>(diffCmd);
createBucket(leafBucket);
c.top.sendDown(diffReply);
- expectAbortedReply<api::ApplyBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::ApplyBucketDiffReply>(c.top));
}
-void
-MergeBlockingTest::testRejectGetDiffReplyWhenBucketHasBecomeInconsistent()
-{
- TestFileStorComponents c(*this, "testRejectGetDiffReplyWhenBucketHasBecomeInconsistent");
+TEST_F(MergeBlockingTest, reject_get_diff_reply_when_bucket_has_become_inconsistent) {
+ TestFileStorComponents c(*this);
createBucket(innerBucket);
- std::shared_ptr<api::GetBucketDiffCommand> getDiff(
- createGetDiff(innerBucket, getNodesWithForwarding()));
+ auto getDiff = createGetDiff(innerBucket, getNodesWithForwarding());
c.top.sendDown(getDiff);
c.top.waitForMessages(1, MSG_WAIT_TIME);
- api::StorageMessage::SP fwdDiff(
- c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF));
- api::GetBucketDiffCommand& diffCmd(
- dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff));
+ auto fwdDiff = c.top.getAndRemoveMessage(api::MessageType::GETBUCKETDIFF);
+ auto& diffCmd = dynamic_cast<api::GetBucketDiffCommand&>(*fwdDiff);
- api::GetBucketDiffReply::SP diffReply(
- new api::GetBucketDiffReply(diffCmd));
+ auto diffReply = std::make_shared<api::GetBucketDiffReply>(diffCmd);
createBucket(innerBucket2);
c.top.sendDown(diffReply);
- expectAbortedReply<api::GetBucketDiffReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::GetBucketDiffReply>(c.top));
}
/**
* Test case for buckets in ticket 6389558, comment #4.
*/
-void
-MergeBlockingTest::testRejectMergeWhenLowUsedBitCount()
-{
+TEST_F(MergeBlockingTest, reject_merge_when_low_used_bit_count) {
document::BucketId superBucket(1, 0x1);
document::BucketId subBucket(2, 0x1);
- CPPUNIT_ASSERT(superBucket.contains(subBucket));
+ ASSERT_TRUE(superBucket.contains(subBucket));
- TestFileStorComponents c(*this, "testRejectMergeWithInconsistentBucket");
+ TestFileStorComponents c(*this);
createBucket(superBucket);
- std::shared_ptr<api::MergeBucketCommand> cmd(createMerge(subBucket));
+ auto cmd = createMerge(subBucket);
c.top.sendDown(cmd);
- expectAbortedReply<api::MergeBucketReply>(c.top);
- CPPUNIT_ASSERT(!bucketExistsInDb(subBucket));
+ ASSERT_NO_FATAL_FAILURE(expectAbortedReply<api::MergeBucketReply>(c.top));
+ EXPECT_FALSE(bucketExistsInDb(subBucket));
}
} // ns storage
diff --git a/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
index 7895d9e4cd0..1660fed9e38 100644
--- a/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
+++ b/storage/src/tests/persistence/filestorage/modifiedbucketcheckertest.cpp
@@ -1,39 +1,25 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/common/testhelper.h>
-#include <tests/common/storagelinktest.h>
+#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
#include <vespa/config/common/exceptions.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
namespace storage {
-class ModifiedBucketCheckerTest : public CppUnit::TestFixture
-{
-public:
+struct ModifiedBucketCheckerTest : Test {
enum {
MESSAGE_WAIT_TIME = 60*2
};
- void setUp() override;
- void tearDown() override;
-
- void testModifiedBucketThreadSendsRecheckBucketCommands();
- void testDoNotCheckModifiedBucketsIfAlreadyPending();
- void testBucketCheckerOnlySwallowsRecheckBucketReplies();
- void testRecheckRequestsAreChunked();
- void testInvalidChunkSizeConfigIsRejected();
-
- CPPUNIT_TEST_SUITE(ModifiedBucketCheckerTest);
- CPPUNIT_TEST(testModifiedBucketThreadSendsRecheckBucketCommands);
- CPPUNIT_TEST(testDoNotCheckModifiedBucketsIfAlreadyPending);
- CPPUNIT_TEST(testBucketCheckerOnlySwallowsRecheckBucketReplies);
- CPPUNIT_TEST(testRecheckRequestsAreChunked);
- CPPUNIT_TEST(testInvalidChunkSizeConfigIsRejected);
- CPPUNIT_TEST_SUITE_END();
-private:
+ void SetUp() override;
+ void TearDown() override;
+
spi::dummy::DummyPersistence& getDummyPersistence() {
return static_cast<spi::dummy::DummyPersistence&>(
_node->getPersistenceProvider());
@@ -51,10 +37,8 @@ private:
std::unique_ptr<vdstestlib::DirConfig> _config;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ModifiedBucketCheckerTest);
-
void
-ModifiedBucketCheckerTest::setUp()
+ModifiedBucketCheckerTest::SetUp()
{
_config.reset(new vdstestlib::DirConfig(getStandardConfig(true)));
_node.reset(new TestServiceLayerApp(DiskCount(1), NodeIndex(0),
@@ -71,12 +55,12 @@ ModifiedBucketCheckerTest::setUp()
}
void
-ModifiedBucketCheckerTest::tearDown()
+ModifiedBucketCheckerTest::TearDown()
{
_top->close();
- _top.reset(0);
- _node.reset(0);
- _config.reset(0);
+ _top.reset();
+ _node.reset();
+ _config.reset();
}
void
@@ -95,10 +79,8 @@ ModifiedBucketCheckerTest::replyToAll(
uint32_t firstBucket)
{
for (uint32_t i = 0; i < messages.size(); ++i) {
- RecheckBucketInfoCommand& cmd(
- dynamic_cast<RecheckBucketInfoCommand&>(*messages[i]));
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, i+firstBucket),
- cmd.getBucketId());
+ auto& cmd = dynamic_cast<RecheckBucketInfoCommand&>(*messages[i]);
+ ASSERT_EQ(document::BucketId(16, i + firstBucket), cmd.getBucketId());
_bottom->sendUp(cmd.makeReply());
}
}
@@ -108,114 +90,94 @@ ModifiedBucketCheckerTest::expectCommandsAndSendReplies(
uint32_t count, uint32_t firstBucket)
{
std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(count), messages.size());
+ ASSERT_EQ(count, messages.size());
replyToAll(messages, firstBucket);
}
-void
-ModifiedBucketCheckerTest::testModifiedBucketThreadSendsRecheckBucketCommands()
-{
+TEST_F(ModifiedBucketCheckerTest, modified_bucket_thread_sends_recheck_bucket_commands) {
_top->open(); // Multi-threaded test
modifyBuckets(3, 0);
// Should now get 3 RecheckBucketInfo commands down the dummy link.
_bottom->waitForMessages(3, MESSAGE_WAIT_TIME);
- expectCommandsAndSendReplies(3, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(3, 0));
// No replies should reach top link
- CPPUNIT_ASSERT_EQUAL(size_t(0), _top->getNumReplies());
+ EXPECT_EQ(0, _top->getNumReplies());
}
-void
-ModifiedBucketCheckerTest::testDoNotCheckModifiedBucketsIfAlreadyPending()
-{
+TEST_F(ModifiedBucketCheckerTest, do_not_check_modified_buckets_if_already_pending) {
_handler->setUnitTestingSingleThreadedMode();
_top->open();
modifyBuckets(3, 0);
_handler->tick();
- std::vector<api::StorageMessage::SP> messages(_bottom->getCommandsOnce());
- CPPUNIT_ASSERT_EQUAL(size_t(3), messages.size());
+ auto messages = _bottom->getCommandsOnce();
+ ASSERT_EQ(3, messages.size());
modifyBuckets(3, 3);
_handler->tick();
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// After replies received, tick should send new requests again.
- replyToAll(messages, 0);
+ ASSERT_NO_FATAL_FAILURE(replyToAll(messages, 0));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
_handler->tick();
- expectCommandsAndSendReplies(3, 3);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(3, 3));
}
-void
-ModifiedBucketCheckerTest::testBucketCheckerOnlySwallowsRecheckBucketReplies()
-{
+TEST_F(ModifiedBucketCheckerTest, bucket_checker_only_swallows_recheck_bucket_replies) {
_top->open();
DestroyIteratorCommand cmd(spi::IteratorId(123));
_bottom->sendUp(api::StorageMessage::SP(cmd.makeReply()));
- CPPUNIT_ASSERT_EQUAL(size_t(1), _top->getNumReplies());
+ ASSERT_EQ(1, _top->getNumReplies());
}
-void
-ModifiedBucketCheckerTest::testRecheckRequestsAreChunked()
-{
+TEST_F(ModifiedBucketCheckerTest, recheck_requests_are_chunked) {
namespace cfgns = vespa::config::content::core;
_handler->setUnitTestingSingleThreadedMode();
_top->open();
cfgns::StorServerConfigBuilder cfgBuilder;
cfgBuilder.bucketRecheckingChunkSize = 2;
- _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
- new cfgns::StorServerConfig(cfgBuilder)));
+ _handler->configure(std::make_unique<cfgns::StorServerConfig>(cfgBuilder));
modifyBuckets(5, 0);
_handler->tick();
modifyBuckets(1, 10); // should not be checked yet;
// Rechecks should now be done in 3 chunks of 2, 2 and 1 each, respectively.
- expectCommandsAndSendReplies(2, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(2, 0));
_handler->tick();
- expectCommandsAndSendReplies(2, 2);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(2, 2));
_handler->tick();
- expectCommandsAndSendReplies(1, 4);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(1, 4));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// New round of fetching
_handler->tick();
- expectCommandsAndSendReplies(1, 10);
+ ASSERT_NO_FATAL_FAILURE( expectCommandsAndSendReplies(1, 10));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
// And done!
_handler->tick();
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
_handler->tick(); // global bucket space ==> nothing to do
- expectCommandsAndSendReplies(0, 0);
+ ASSERT_NO_FATAL_FAILURE(expectCommandsAndSendReplies(0, 0));
}
-
-void
-ModifiedBucketCheckerTest::testInvalidChunkSizeConfigIsRejected()
-{
+TEST_F(ModifiedBucketCheckerTest, invalid_chunk_size_config_is_rejected) {
namespace cfgns = vespa::config::content::core;
_handler->setUnitTestingSingleThreadedMode();
_top->open();
cfgns::StorServerConfigBuilder cfgBuilder;
cfgBuilder.bucketRecheckingChunkSize = 0;
- try {
- _handler->configure(std::unique_ptr<cfgns::StorServerConfig>(
- new cfgns::StorServerConfig(cfgBuilder)));
- CPPUNIT_FAIL("Expected bad config to be rejected");
- } catch (const config::InvalidConfigException&) {
- // Happy days
- } catch (...) {
- CPPUNIT_FAIL("Got unexpected exception");
- }
+ EXPECT_THROW(_handler->configure(std::make_unique<cfgns::StorServerConfig>(cfgBuilder)),
+ config::InvalidConfigException);
}
// RecheckBucketInfoCommand handling is done in persistence threads,
// so that functionality is tested in the filestor tests.
} // ns storage
-
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index e12f48bcdea..0d43f8a9020 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
@@ -9,11 +8,13 @@
#include <vespa/vespalib/util/barrier.h>
#include <vespa/vespalib/util/thread.h>
#include <vespa/vespalib/stllike/hash_set_insert.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP(".operationabortingtest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
@@ -78,9 +79,7 @@ spi::LoadType defaultLoadType(0, "default");
}
-class OperationAbortingTest : public FileStorTestFixture
-{
-public:
+struct OperationAbortingTest : FileStorTestFixture {
spi::PersistenceProvider::UP _dummyProvider;
BlockingMockProvider* _blockingProvider;
std::unique_ptr<vespalib::Barrier> _queueBarrier;
@@ -99,32 +98,14 @@ public:
const std::vector<document::BucketId>& okReplies,
const std::vector<document::BucketId>& abortedGetDiffs);
- void doTestSpecificOperationsNotAborted(const char* testName,
- const std::vector<api::StorageMessage::SP>& msgs,
+ void doTestSpecificOperationsNotAborted(const std::vector<api::StorageMessage::SP>& msgs,
bool shouldCreateBucketInitially);
api::BucketInfo getBucketInfoFromDB(const document::BucketId&) const;
-public:
- void testAbortMessageClearsRelevantQueuedOperations();
- void testWaitForCurrentOperationCompletionForAbortedBucket();
- void testDoNotAbortCreateBucketCommands();
- void testDoNotAbortRecheckBucketCommands();
- void testDoNotAbortDeleteBucketCommands();
-
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(OperationAbortingTest);
- CPPUNIT_TEST(testAbortMessageClearsRelevantQueuedOperations);
- CPPUNIT_TEST(testWaitForCurrentOperationCompletionForAbortedBucket);
- CPPUNIT_TEST(testDoNotAbortCreateBucketCommands);
- CPPUNIT_TEST(testDoNotAbortRecheckBucketCommands);
- CPPUNIT_TEST(testDoNotAbortDeleteBucketCommands);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(OperationAbortingTest);
-
namespace {
template <typename T, typename Collection>
@@ -136,7 +117,7 @@ existsIn(const T& elem, const Collection& collection) {
}
void
-OperationAbortingTest::setUp()
+OperationAbortingTest::SetUp()
{
}
@@ -146,35 +127,34 @@ OperationAbortingTest::validateReplies(DummyStorageLink& link, size_t repliesTot
const std::vector<document::BucketId>& abortedGetDiffs)
{
link.waitForMessages(repliesTotal, MSG_WAIT_TIME);
- CPPUNIT_ASSERT_EQUAL(repliesTotal, link.getNumReplies());
+ ASSERT_EQ(repliesTotal, link.getNumReplies());
for (uint32_t i = 0; i < repliesTotal; ++i) {
api::StorageReply& reply(dynamic_cast<api::StorageReply&>(*link.getReply(i)));
- LOG(info, "Checking reply %s", reply.toString(true).c_str());
+ LOG(debug, "Checking reply %s", reply.toString(true).c_str());
switch (static_cast<uint32_t>(reply.getType().getId())) {
case api::MessageType::PUT_REPLY_ID:
case api::MessageType::CREATEBUCKET_REPLY_ID:
case api::MessageType::DELETEBUCKET_REPLY_ID:
case api::MessageType::GET_REPLY_ID:
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
break;
case api::MessageType::GETBUCKETDIFF_REPLY_ID:
{
- api::GetBucketDiffReply& gr(
- static_cast<api::GetBucketDiffReply&>(reply));
+ auto& gr = static_cast<api::GetBucketDiffReply&>(reply);
if (existsIn(gr.getBucketId(), abortedGetDiffs)) {
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::ABORTED, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::ABORTED, resultOf(reply));
} else {
- CPPUNIT_ASSERT(existsIn(gr.getBucketId(), okReplies));
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_TRUE(existsIn(gr.getBucketId(), okReplies));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
}
break;
}
case api::MessageType::INTERNAL_REPLY_ID:
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(reply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(reply));
break;
default:
- CPPUNIT_FAIL("got unknown reply type");
+ FAIL() << "got unknown reply type";
}
}
}
@@ -187,12 +167,12 @@ class ExplicitBucketSetPredicate : public AbortBucketOperationsCommand::AbortPre
bool doShouldAbort(const document::Bucket &bucket) const override;
public:
- ~ExplicitBucketSetPredicate();
+ ~ExplicitBucketSetPredicate() override;
template <typename Iterator>
ExplicitBucketSetPredicate(Iterator first, Iterator last)
: _bucketsToAbort(first, last)
- { }
+ {}
const BucketSet& getBucketsToAbort() const {
return _bucketsToAbort;
@@ -204,7 +184,7 @@ ExplicitBucketSetPredicate::doShouldAbort(const document::Bucket &bucket) const
return _bucketsToAbort.find(bucket.getBucketId()) != _bucketsToAbort.end();
}
-ExplicitBucketSetPredicate::~ExplicitBucketSetPredicate() { }
+ExplicitBucketSetPredicate::~ExplicitBucketSetPredicate() = default;
template <typename Container>
AbortBucketOperationsCommand::SP
@@ -216,18 +196,16 @@ makeAbortCmd(const Container& buckets)
}
-void
-OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
-{
+TEST_F(OperationAbortingTest, abort_message_clears_relevant_queued_operations) {
setupProviderAndBarriers(2);
- TestFileStorComponents c(*this, "testAbortMessageClearsRelevantQueuedOperations");
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
- LOG(info, "waiting for test and persistence thread to reach barriers");
+ LOG(debug, "waiting for test and persistence thread to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
/*
* All load we send down to filestor from now on wil be enqueued, as the
* persistence thread is blocked.
@@ -235,12 +213,14 @@ OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
* Cannot abort the bucket we're blocking the thread on since we'll
* deadlock the test if we do.
*/
- std::vector<document::BucketId> bucketsToAbort;
- bucketsToAbort.push_back(document::BucketId(16, 3));
- bucketsToAbort.push_back(document::BucketId(16, 5));
- std::vector<document::BucketId> bucketsToKeep;
- bucketsToKeep.push_back(document::BucketId(16, 2));
- bucketsToKeep.push_back(document::BucketId(16, 4));
+ std::vector<document::BucketId> bucketsToAbort = {
+ document::BucketId(16, 3),
+ document::BucketId(16, 5)
+ };
+ std::vector<document::BucketId> bucketsToKeep = {
+ document::BucketId(16, 2),
+ document::BucketId(16, 4)
+ };
for (uint32_t i = 0; i < bucketsToAbort.size(); ++i) {
createBucket(bucketsToAbort[i]);
@@ -251,17 +231,17 @@ OperationAbortingTest::testAbortMessageClearsRelevantQueuedOperations()
c.sendDummyGetDiff(bucketsToKeep[i]);
}
- AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(bucketsToAbort));
+ auto abortCmd = makeAbortCmd(bucketsToAbort);
c.top.sendDown(abortCmd);
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
// put+abort+get replies
size_t expectedMsgs(2 + bucketsToAbort.size() + bucketsToKeep.size());
- LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+ LOG(debug, "barrier passed, waiting for %zu replies", expectedMsgs);
- validateReplies(c.top, expectedMsgs, bucketsToKeep, bucketsToAbort);
+ ASSERT_NO_FATAL_FAILURE(validateReplies(c.top, expectedMsgs, bucketsToKeep, bucketsToAbort));
}
namespace {
@@ -302,29 +282,27 @@ public:
* impose sufficient ordering guarantees that it never provides false positives
* as long as the tested functionality is in fact correct.
*/
-void
-OperationAbortingTest::testWaitForCurrentOperationCompletionForAbortedBucket()
-{
+TEST_F(OperationAbortingTest, wait_for_current_operation_completion_for_aborted_bucket) {
setupProviderAndBarriers(3);
- TestFileStorComponents c(*this, "testWaitForCurrentOperationCompletionForAbortedBucket");
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
createBucket(bucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
std::vector<document::BucketId> abortSet { bucket };
- AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
+ auto abortCmd = makeAbortCmd(abortSet);
SendTask sendTask(abortCmd, *_queueBarrier, c.top);
vespalib::Thread thread(sendTask);
thread.start();
- LOG(info, "waiting for threads to reach barriers");
+ LOG(debug, "waiting for threads to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
thread.stop();
@@ -333,31 +311,27 @@ OperationAbortingTest::testWaitForCurrentOperationCompletionForAbortedBucket()
// If waiting works, put reply shall always be ordered before the internal
// reply, as it must finish processing fully before the abort returns.
c.top.waitForMessages(2, MSG_WAIT_TIME);
- CPPUNIT_ASSERT_EQUAL(size_t(2), c.top.getNumReplies());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::PUT_REPLY, c.top.getReply(0)->getType());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::INTERNAL_REPLY, c.top.getReply(1)->getType());
+ ASSERT_EQ(2, c.top.getNumReplies());
+ EXPECT_EQ(api::MessageType::PUT_REPLY, c.top.getReply(0)->getType());
+ EXPECT_EQ(api::MessageType::INTERNAL_REPLY, c.top.getReply(1)->getType());
}
-void
-OperationAbortingTest::testDoNotAbortCreateBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_create_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- msgs.push_back(api::StorageMessage::SP(new api::CreateBucketCommand(makeDocumentBucket(bucket))));
+ msgs.emplace_back(std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(false);
- doTestSpecificOperationsNotAborted("testDoNotAbortCreateBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = false;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
-void
-OperationAbortingTest::testDoNotAbortRecheckBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_recheck_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- msgs.push_back(api::StorageMessage::SP(new RecheckBucketInfoCommand(makeDocumentBucket(bucket))));
+ msgs.emplace_back(std::make_shared<RecheckBucketInfoCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(true);
- doTestSpecificOperationsNotAborted("testDoNotAbortRecheckBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = true;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
api::BucketInfo
@@ -365,29 +339,25 @@ OperationAbortingTest::getBucketInfoFromDB(const document::BucketId& id) const
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(id, "foo", StorBucketDatabase::CREATE_IF_NONEXISTING));
- CPPUNIT_ASSERT(entry.exist());
+ assert(entry.exist());
return entry->info;
}
-void
-OperationAbortingTest::testDoNotAbortDeleteBucketCommands()
-{
+TEST_F(OperationAbortingTest, do_not_abort_delete_bucket_commands) {
document::BucketId bucket(16, 1);
std::vector<api::StorageMessage::SP> msgs;
- api::DeleteBucketCommand::SP cmd(new api::DeleteBucketCommand(makeDocumentBucket(bucket)));
- msgs.push_back(cmd);
+ msgs.emplace_back(std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket)));
- bool shouldCreateBucketInitially(true);
- doTestSpecificOperationsNotAborted("testDoNotAbortRecheckBucketCommands", msgs, shouldCreateBucketInitially);
+ bool shouldCreateBucketInitially = true;
+ doTestSpecificOperationsNotAborted(msgs, shouldCreateBucketInitially);
}
void
-OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
- const std::vector<api::StorageMessage::SP>& msgs,
+OperationAbortingTest::doTestSpecificOperationsNotAborted(const std::vector<api::StorageMessage::SP>& msgs,
bool shouldCreateBucketInitially)
{
setupProviderAndBarriers(2);
- TestFileStorComponents c(*this, testName);
+ TestFileStorComponents c(*this);
document::BucketId bucket(16, 1);
document::BucketId blockerBucket(16, 2);
@@ -395,11 +365,11 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
createBucket(bucket);
}
createBucket(blockerBucket);
- LOG(info, "Sending put to trigger thread barrier");
+ LOG(debug, "Sending put to trigger thread barrier");
c.sendPut(blockerBucket, DocumentIndex(0), PutTimestamp(1000));
- LOG(info, "waiting for test and persistence thread to reach barriers");
+ LOG(debug, "waiting for test and persistence thread to reach barriers");
_queueBarrier->await();
- LOG(info, "barrier passed");
+ LOG(debug, "barrier passed");
uint32_t expectedCreateBuckets = 0;
uint32_t expectedDeleteBuckets = 0;
@@ -413,7 +383,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
break;
case api::MessageType::DELETEBUCKET_ID:
{
- api::DeleteBucketCommand& delCmd(dynamic_cast<api::DeleteBucketCommand&>(*msgs[i]));
+ auto& delCmd = dynamic_cast<api::DeleteBucketCommand&>(*msgs[i]);
delCmd.setBucketInfo(getBucketInfoFromDB(delCmd.getBucketId()));
}
++expectedDeleteBuckets;
@@ -424,7 +394,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
++expectedBucketInfoInvocations;
break;
default:
- CPPUNIT_FAIL("unsupported message type");
+ FAIL() << "unsupported message type";
}
c.top.sendDown(msgs[i]);
}
@@ -433,7 +403,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
AbortBucketOperationsCommand::SP abortCmd(makeAbortCmd(abortSet));
c.top.sendDown(abortCmd);
- LOG(info, "waiting on completion barrier");
+ LOG(debug, "waiting on completion barrier");
_completionBarrier->await();
// At this point, the recheck command is still either enqueued, is processing
@@ -443,7 +413,7 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
// put+abort+get + any other creates/deletes/rechecks
size_t expectedMsgs(3 + expectedCreateBuckets + expectedDeleteBuckets + expectedRecheckReplies);
- LOG(info, "barrier passed, waiting for %zu replies", expectedMsgs);
+ LOG(debug, "barrier passed, waiting for %zu replies", expectedMsgs);
std::vector<document::BucketId> okReplies;
okReplies.push_back(bucket);
@@ -451,10 +421,10 @@ OperationAbortingTest::doTestSpecificOperationsNotAborted(const char* testName,
std::vector<document::BucketId> abortedGetDiffs;
validateReplies(c.top, expectedMsgs, okReplies, abortedGetDiffs);
- CPPUNIT_ASSERT_EQUAL(expectedBucketInfoInvocations, _blockingProvider->_bucketInfoInvocations);
- CPPUNIT_ASSERT_EQUAL(expectedCreateBuckets + (shouldCreateBucketInitially ? 2 : 1),
- _blockingProvider->_createBucketInvocations);
- CPPUNIT_ASSERT_EQUAL(expectedDeleteBuckets, _blockingProvider->_deleteBucketInvocations);
+ ASSERT_EQ(expectedBucketInfoInvocations, _blockingProvider->_bucketInfoInvocations);
+ ASSERT_EQ(expectedCreateBuckets + (shouldCreateBucketInitially ? 2 : 1),
+ _blockingProvider->_createBucketInvocations);
+ ASSERT_EQ(expectedDeleteBuckets, _blockingProvider->_deleteBucketInvocations);
}
} // storage
diff --git a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
index 961d2628052..787a63a618c 100644
--- a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
+++ b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/persistence/spi/test.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -10,26 +9,16 @@
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class SanityCheckedDeleteTest : public FileStorTestFixture {
-public:
- void delete_bucket_fails_when_provider_out_of_sync();
- void differing_document_sizes_not_considered_out_of_sync();
-
- CPPUNIT_TEST_SUITE(SanityCheckedDeleteTest);
- CPPUNIT_TEST(delete_bucket_fails_when_provider_out_of_sync);
- CPPUNIT_TEST(differing_document_sizes_not_considered_out_of_sync);
- CPPUNIT_TEST_SUITE_END();
-
+struct SanityCheckedDeleteTest : FileStorTestFixture {
spi::BucketInfo send_put_and_get_bucket_info(TestFileStorComponents &c, const spi::Bucket &spiBucket);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SanityCheckedDeleteTest);
-
-void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
- TestFileStorComponents c(*this, "delete_bucket_fails_when_provider_out_of_sync");
+TEST_F(SanityCheckedDeleteTest, delete_bucket_fails_when_provider_out_of_sync) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(8, 123);
document::BucketId syncBucket(8, 234);
spi::Bucket spiBucket(makeSpiBucket(bucket));
@@ -55,11 +44,10 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
c.top.sendDown(cmd);
c.top.waitForMessages(1, MSG_WAIT_TIME);
api::StorageMessage::SP reply(c.top.getReply(0));
- api::DeleteBucketReply& deleteReply(
- dynamic_cast<api::DeleteBucketReply&>(*reply));
+ auto& deleteReply = dynamic_cast<api::DeleteBucketReply&>(*reply);
// Reply happens in a filestor manager context and before the sanity
// check kicks in, meaning it will always be OK.
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, resultOf(deleteReply));
+ ASSERT_EQ(api::ReturnCode::OK, resultOf(deleteReply));
// At this point we do not know if the scheduled delete has been
// executed; it may still be in the persistence queue.
// Send a put to another bucket to serialize the operation (guaranteed
@@ -69,8 +57,8 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
// Should still be able to get identical bucket info for bucket.
spi::BucketInfoResult infoResult(
_node->getPersistenceProvider().getBucketInfo(spiBucket));
- CPPUNIT_ASSERT_MSG(infoResult.getErrorMessage(), !infoResult.hasError());
- CPPUNIT_ASSERT(infoBefore == infoResult.getBucketInfo());
+ ASSERT_FALSE(infoResult.hasError()) << infoResult.getErrorMessage();
+ EXPECT_TRUE(infoBefore == infoResult.getBucketInfo());
}
spi::BucketInfo SanityCheckedDeleteTest::send_put_and_get_bucket_info(
@@ -83,8 +71,8 @@ spi::BucketInfo SanityCheckedDeleteTest::send_put_and_get_bucket_info(
return _node->getPersistenceProvider().getBucketInfo(spiBucket).getBucketInfo();
}
-void SanityCheckedDeleteTest::differing_document_sizes_not_considered_out_of_sync() {
- TestFileStorComponents c(*this, "differing_document_sizes_not_considered_out_of_sync");
+TEST_F(SanityCheckedDeleteTest, differing_document_sizes_not_considered_out_of_sync) {
+ TestFileStorComponents c(*this);
document::BucketId bucket(8, 123);
spi::Bucket spiBucket(makeSpiBucket(bucket));
@@ -100,7 +88,7 @@ void SanityCheckedDeleteTest::differing_document_sizes_not_considered_out_of_syn
// Bucket should now well and truly be gone. Will trigger a getBucketInfo error response.
spi::BucketInfoResult info_post_delete(
_node->getPersistenceProvider().getBucketInfo(spiBucket));
- CPPUNIT_ASSERT_MSG(info_post_delete.getErrorMessage(), info_post_delete.hasError());
+ ASSERT_TRUE(info_post_delete.hasError()) << info_post_delete.getErrorMessage();
}
} // namespace storage
diff --git a/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
index 8e6340c930c..cda052c787a 100644
--- a/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
+++ b/storage/src/tests/persistence/filestorage/singlebucketjointest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -12,25 +11,15 @@
LOG_SETUP(".singlebucketjointest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class SingleBucketJoinTest : public FileStorTestFixture
-{
-public:
- void testPersistenceCanHandleSingleBucketJoin();
-
- CPPUNIT_TEST_SUITE(SingleBucketJoinTest);
- CPPUNIT_TEST(testPersistenceCanHandleSingleBucketJoin);
- CPPUNIT_TEST_SUITE_END();
+struct SingleBucketJoinTest : FileStorTestFixture {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SingleBucketJoinTest);
-
-void
-SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
-{
- TestFileStorComponents c(*this, "testPersistenceCanHandleSingleBucketJoin");
+TEST_F(SingleBucketJoinTest, persistence_can_handle_single_bucket_join) {
+ TestFileStorComponents c(*this);
document::BucketId targetBucket(16, 1);
document::BucketId sourceBucket(17, 1);
@@ -47,7 +36,7 @@ SingleBucketJoinTest::testPersistenceCanHandleSingleBucketJoin()
c.top.sendDown(cmd);
// If single bucket join locking is not working properly, this
// will hang forever.
- expectOkReply<api::JoinBucketsReply>(c.top);
+ ASSERT_NO_FATAL_FAILURE(expectOkReply<api::JoinBucketsReply>(c.top));
}
} // namespace storage
diff --git a/storage/src/tests/persistence/mergehandlertest.cpp b/storage/src/tests/persistence/mergehandlertest.cpp
index 4378814d27b..0c291a179ae 100644
--- a/storage/src/tests/persistence/mergehandlertest.cpp
+++ b/storage/src/tests/persistence/mergehandlertest.cpp
@@ -2,23 +2,23 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/storage/persistence/mergehandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <tests/distributor/messagesenderstub.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vespalib/objects/nbostream.h>
+#include <gmock/gmock.h>
#include <cmath>
#include <vespa/log/log.h>
LOG_SETUP(".test.persistence.handler.merge");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-struct MergeHandlerTest : public SingleDiskPersistenceTestUtils
-{
+struct MergeHandlerTest : SingleDiskPersistenceTestUtils {
uint32_t _location; // Location used for all merge tests
document::Bucket _bucket; // Bucket used for all merge tests
uint64_t _maxTimestamp;
@@ -29,77 +29,16 @@ struct MergeHandlerTest : public SingleDiskPersistenceTestUtils
template <typename T>
std::shared_ptr<T> fetchSingleMessage();
- void setUp() override;
+ void SetUp() override;
enum ChainPos { FRONT, MIDDLE, BACK };
void setUpChain(ChainPos);
- // Test a regular merge bucket command fetching data, including
- // puts, removes, unrevertable removes & duplicates.
- void testMergeBucketCommand();
- // Test that a simplistic merge with nothing to actually merge,
- // sends get bucket diff through the entire chain of 3 nodes.
void testGetBucketDiffChain(bool midChain);
- void testGetBucketDiffMidChain() { testGetBucketDiffChain(true); }
- void testGetBucketDiffEndOfChain() { testGetBucketDiffChain(false); }
- // Test that a simplistic merge with nothing to actually merge,
- // sends apply bucket diff through the entire chain of 3 nodes.
void testApplyBucketDiffChain(bool midChain);
- void testApplyBucketDiffMidChain() { testApplyBucketDiffChain(true); }
- void testApplyBucketDiffEndOfChain() { testApplyBucketDiffChain(false); }
- // Test that a simplistic merge with one thing to actually merge,
- // sends correct commands and finish.
- void testMasterMessageFlow();
- // Test that a simplistic merge with 1 doc to actually merge,
- // sends apply bucket diff through the entire chain of 3 nodes.
- void testApplyBucketDiffChain();
- void testMergeUnrevertableRemove();
- void testChunkedApplyBucketDiff();
- void testChunkLimitPartiallyFilledDiff();
- void testMaxTimestamp();
- void testSPIFlushGuard();
- void testBucketNotFoundInDb();
- void testMergeProgressSafeGuard();
- void testSafeGuardNotInvokedWhenHasMaskChanges();
- void testEntryRemovedAfterGetBucketDiff();
-
- void testMergeBucketSPIFailures();
- void testGetBucketDiffSPIFailures();
- void testApplyBucketDiffSPIFailures();
- void testGetBucketDiffReplySPIFailures();
- void testApplyBucketDiffReplySPIFailures();
-
- void testRemoveFromDiff();
-
- void testRemovePutOnExistingTimestamp();
-
- CPPUNIT_TEST_SUITE(MergeHandlerTest);
- CPPUNIT_TEST(testMergeBucketCommand);
- CPPUNIT_TEST(testGetBucketDiffMidChain);
- CPPUNIT_TEST(testGetBucketDiffEndOfChain);
- CPPUNIT_TEST(testApplyBucketDiffMidChain);
- CPPUNIT_TEST(testApplyBucketDiffEndOfChain);
- CPPUNIT_TEST(testMasterMessageFlow);
- CPPUNIT_TEST(testMergeUnrevertableRemove);
- CPPUNIT_TEST(testChunkedApplyBucketDiff);
- CPPUNIT_TEST(testChunkLimitPartiallyFilledDiff);
- CPPUNIT_TEST(testMaxTimestamp);
- CPPUNIT_TEST(testSPIFlushGuard);
- CPPUNIT_TEST(testBucketNotFoundInDb);
- CPPUNIT_TEST(testMergeProgressSafeGuard);
- CPPUNIT_TEST(testSafeGuardNotInvokedWhenHasMaskChanges);
- CPPUNIT_TEST(testEntryRemovedAfterGetBucketDiff);
- CPPUNIT_TEST(testMergeBucketSPIFailures);
- CPPUNIT_TEST(testGetBucketDiffSPIFailures);
- CPPUNIT_TEST(testApplyBucketDiffSPIFailures);
- CPPUNIT_TEST(testGetBucketDiffReplySPIFailures);
- CPPUNIT_TEST(testApplyBucketDiffReplySPIFailures);
- CPPUNIT_TEST(testRemoveFromDiff);
- CPPUNIT_TEST(testRemovePutOnExistingTimestamp);
- CPPUNIT_TEST_SUITE_END();
// @TODO Add test to test that buildBucketInfo and mergeLists create minimal list (wrong sorting screws this up)
-private:
+
void fillDummyApplyDiff(std::vector<api::ApplyBucketDiffCommand::Entry>& diff);
std::shared_ptr<api::ApplyBucketDiffCommand> createDummyApplyDiff(
int timestampOffset,
@@ -119,7 +58,7 @@ private:
class HandlerInvoker
{
public:
- virtual ~HandlerInvoker() {}
+ virtual ~HandlerInvoker() = default;
virtual void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) {}
virtual void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) = 0;
virtual std::string afterInvoke(MergeHandlerTest&, MergeHandler&) = 0;
@@ -176,7 +115,7 @@ private:
{
public:
HandleGetBucketDiffReplyInvoker();
- ~HandleGetBucketDiffReplyInvoker();
+ ~HandleGetBucketDiffReplyInvoker() override;
void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
std::string afterInvoke(MergeHandlerTest&, MergeHandler&) override;
@@ -200,7 +139,7 @@ private:
{
public:
HandleApplyBucketDiffReplyInvoker();
- ~HandleApplyBucketDiffReplyInvoker();
+ ~HandleApplyBucketDiffReplyInvoker() override;
void beforeInvoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
void invoke(MergeHandlerTest&, MergeHandler&, spi::Context&) override;
std::string afterInvoke(MergeHandlerTest&, MergeHandler&) override;
@@ -217,33 +156,30 @@ private:
const ExpectedExceptionSpec& spec);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(MergeHandlerTest);
-
-
-MergeHandlerTest::HandleGetBucketDiffReplyInvoker::HandleGetBucketDiffReplyInvoker() {}
-MergeHandlerTest::HandleGetBucketDiffReplyInvoker::~HandleGetBucketDiffReplyInvoker() {}
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::HandleGetBucketDiffReplyInvoker() = default;
+MergeHandlerTest::HandleGetBucketDiffReplyInvoker::~HandleGetBucketDiffReplyInvoker() = default;
MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::HandleApplyBucketDiffReplyInvoker()
: _counter(0),
_stub(),
_applyCmd()
{}
-MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::~HandleApplyBucketDiffReplyInvoker() {}
+MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::~HandleApplyBucketDiffReplyInvoker() = default;
void
-MergeHandlerTest::setUp() {
+MergeHandlerTest::SetUp() {
_context.reset(new spi::Context(documentapi::LoadType::DEFAULT, 0, 0));
- SingleDiskPersistenceTestUtils::setUp();
+ SingleDiskPersistenceTestUtils::SetUp();
_location = 1234;
_bucket = makeDocumentBucket(document::BucketId(16, _location));
_maxTimestamp = 11501;
- LOG(info, "Creating %s in bucket database", _bucket.toString().c_str());
+ LOG(debug, "Creating %s in bucket database", _bucket.toString().c_str());
bucketdb::StorageBucketInfo bucketDBEntry;
bucketDBEntry.disk = 0;
getEnv().getBucketDatabase(_bucket.getBucketSpace()).insert(_bucket.getBucketId(), bucketDBEntry, "mergetestsetup");
- LOG(info, "Creating bucket to merge");
+ LOG(debug, "Creating bucket to merge");
createTestBucket(_bucket);
setUpChain(FRONT);
@@ -261,30 +197,28 @@ MergeHandlerTest::setUpChain(ChainPos pos) {
}
}
-void
-MergeHandlerTest::testMergeBucketCommand()
-{
+// Test a regular merge bucket command fetching data, including
+// puts, removes, unrevertable removes & duplicates.
+TEST_F(MergeHandlerTest, merge_bucket_command) {
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
cmd.setSourceIndex(1234);
MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1234), cmd2.getSourceIndex());
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(17, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+ EXPECT_EQ(1234, cmd2.getSourceIndex());
tracker->generateReply(cmd);
- CPPUNIT_ASSERT(!tracker->getReply().get());
+ EXPECT_FALSE(tracker->getReply().get());
}
void
@@ -293,288 +227,137 @@ MergeHandlerTest::testGetBucketDiffChain(bool midChain)
setUpChain(midChain ? MIDDLE : BACK);
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Verifying that get bucket diff is sent on");
+ LOG(debug, "Verifying that get bucket diff is sent on");
api::GetBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
MessageTracker::UP tracker1 = handler.handleGetBucketDiff(cmd, *_context);
api::StorageMessage::SP replySent = tracker1->getReply();
if (midChain) {
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(
- dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
-
- LOG(info, "Verifying that replying the diff sends on back");
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
-
- CPPUNIT_ASSERT(!replySent.get());
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(17, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+
+ LOG(debug, "Verifying that replying the diff sends on back");
+ auto reply = std::make_unique<api::GetBucketDiffReply>(cmd2);
+
+ ASSERT_FALSE(replySent.get());
MessageSenderStub stub;
handler.handleGetBucketDiffReply(*reply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
replySent = stub.replies[0];
}
- api::GetBucketDiffReply::SP reply2(
- std::dynamic_pointer_cast<api::GetBucketDiffReply>(
- replySent));
- CPPUNIT_ASSERT(reply2.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
- std::vector<api::GetBucketDiffCommand::Entry> diff(reply2->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(17), diff.size());
+ auto reply2 = std::dynamic_pointer_cast<api::GetBucketDiffReply>(replySent);
+ ASSERT_TRUE(reply2.get());
+
+ EXPECT_THAT(_nodes, ContainerEq(reply2->getNodes()));
+ auto diff = reply2->getDiff();
+ EXPECT_EQ(17, diff.size());
+}
+
+TEST_F(MergeHandlerTest, get_bucket_diff_mid_chain) {
+ testGetBucketDiffChain(true);
+}
+
+TEST_F(MergeHandlerTest, get_bucket_diff_end_of_chain) {
+ testGetBucketDiffChain(false);
}
+// Test that a simplistic merge with 1 doc to actually merge,
+// sends apply bucket diff through the entire chain of 3 nodes.
void
MergeHandlerTest::testApplyBucketDiffChain(bool midChain)
{
setUpChain(midChain ? MIDDLE : BACK);
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Verifying that apply bucket diff is sent on");
+ LOG(debug, "Verifying that apply bucket diff is sent on");
api::ApplyBucketDiffCommand cmd(_bucket, _nodes, _maxTimestamp);
MessageTracker::UP tracker1 = handler.handleApplyBucketDiff(cmd, *_context);
api::StorageMessage::SP replySent = tracker1->getReply();
if (midChain) {
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::ApplyBucketDiffCommand& cmd2(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
- CPPUNIT_ASSERT_EQUAL(_nodes, cmd2.getNodes());
- std::vector<api::ApplyBucketDiffCommand::Entry> diff(cmd2.getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(0), diff.size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(1), cmd2.getAddress()->getIndex());
-
- CPPUNIT_ASSERT(!replySent.get());
-
- LOG(info, "Verifying that replying the diff sends on back");
- api::ApplyBucketDiffReply::UP reply(
- new api::ApplyBucketDiffReply(cmd2));
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::APPLYBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::ApplyBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+ EXPECT_THAT(_nodes, ContainerEq(cmd2.getNodes()));
+ auto diff = cmd2.getDiff();
+ EXPECT_EQ(0, diff.size());
+ EXPECT_EQ(1, cmd2.getAddress()->getIndex());
+
+ EXPECT_FALSE(replySent.get());
+
+ LOG(debug, "Verifying that replying the diff sends on back");
+ auto reply = std::make_unique<api::ApplyBucketDiffReply>(cmd2);
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*reply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
replySent = stub.replies[0];
}
- api::ApplyBucketDiffReply::SP reply2(
- std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(replySent));
- CPPUNIT_ASSERT(reply2.get());
+ auto reply2 = std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(replySent);
+ ASSERT_TRUE(reply2.get());
- CPPUNIT_ASSERT_EQUAL(_nodes, reply2->getNodes());
- std::vector<api::ApplyBucketDiffCommand::Entry> diff(reply2->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(0), diff.size());
+ EXPECT_THAT(_nodes, ContainerEq(reply2->getNodes()));
+ auto diff = reply2->getDiff();
+ EXPECT_EQ(0, diff.size());
}
-void
-MergeHandlerTest::testMasterMessageFlow()
-{
+TEST_F(MergeHandlerTest, apply_bucket_diff_mid_chain) {
+ testApplyBucketDiffChain(true);
+}
+
+TEST_F(MergeHandlerTest, apply_bucket_diff_end_of_chain) {
+ testApplyBucketDiffChain(false);
+}
+
+// Test that a simplistic merge with one thing to actually merge,
+// sends correct commands and finish.
+TEST_F(MergeHandlerTest, master_message_flow) {
MergeHandler handler(getPersistenceProvider(), getEnv());
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
-
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
- // End of chain can remove entries all have. This should end up with
- // one entry master node has other node don't have
+ LOG(debug, "Check state");
+ ASSERT_EQ(1, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::GETBUCKETDIFF, messageKeeper()._msgs[0]->getType());
+ auto& cmd2 = dynamic_cast<api::GetBucketDiffCommand&>(*messageKeeper()._msgs[0]);
+
+ auto reply = std::make_unique<api::GetBucketDiffReply>(cmd2);
+ // End of chain can remove entries all have. This should end up with
+ // one entry master node has other node don't have
reply->getDiff().resize(1);
handler.handleGetBucketDiffReply(*reply, messageKeeper());
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(size_t(2), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[1]->getType());
- api::ApplyBucketDiffCommand& cmd3(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[1]));
- api::ApplyBucketDiffReply::UP reply2(new api::ApplyBucketDiffReply(cmd3));
- CPPUNIT_ASSERT_EQUAL(size_t(1), reply2->getDiff().size());
- reply2->getDiff()[0]._entry._hasMask |= 2;
+ LOG(debug, "Check state");
+ ASSERT_EQ(2, messageKeeper()._msgs.size());
+ ASSERT_EQ(api::MessageType::APPLYBUCKETDIFF, messageKeeper()._msgs[1]->getType());
+ auto& cmd3 = dynamic_cast<api::ApplyBucketDiffCommand&>(*messageKeeper()._msgs[1]);
+ auto reply2 = std::make_unique<api::ApplyBucketDiffReply>(cmd3);
+ ASSERT_EQ(1, reply2->getDiff().size());
+ reply2->getDiff()[0]._entry._hasMask |= 2u;
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*reply2, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
-
- api::MergeBucketReply::SP reply3(
- std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]));
- CPPUNIT_ASSERT(reply3.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
- CPPUNIT_ASSERT(reply3->getResult().success());
- CPPUNIT_ASSERT(!fsHandler().isMerging(_bucket));
-}
-
-void
-MergeHandlerTest::testMergeUnrevertableRemove()
-{
-/*
- MergeHandler handler(getPersistenceProvider(), getEnv());
-
- LOG(info, "Handle a merge bucket command");
- api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
- {
- MessageTracker tracker;
- handler.handleMergeBucket(cmd, tracker);
- }
-
- LOG(info, "Check state");
- CPPUNIT_ASSERT_EQUAL(uint64_t(1), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::GETBUCKETDIFF,
- messageKeeper()._msgs[0]->getType());
- api::GetBucketDiffCommand& cmd2(
- dynamic_cast<api::GetBucketDiffCommand&>(
- *messageKeeper()._msgs[0]));
-
- api::GetBucketDiffReply::UP reply(new api::GetBucketDiffReply(cmd2));
-
- std::vector<Timestamp> docTimestamps;
- for (int i = 0; i < 4; ++i) {
- docTimestamps.push_back(Timestamp(reply->getDiff()[i]._timestamp));
- }
- CPPUNIT_ASSERT(reply->getDiff().size() >= 4);
- reply->getDiff().resize(4);
- // Add one non-unrevertable entry for existing timestamp which
- // should not be added
- reply->getDiff()[0]._flags |= Types::DELETED;
- reply->getDiff()[0]._bodySize = 0;
- reply->getDiff()[0]._hasMask = 2;
- // Add a unrevertable entry which should be modified
- reply->getDiff()[1]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
- reply->getDiff()[1]._bodySize = 0;
- reply->getDiff()[1]._hasMask = 2;
- // Add one non-unrevertable entry that is a duplicate put
- // which should not be added or fail the merge.
- LOG(info, "duplicate put has timestamp %zu and flags %u",
- reply->getDiff()[2]._timestamp,
- reply->getDiff()[2]._flags);
- reply->getDiff()[2]._hasMask = 2;
- // Add one unrevertable entry for a timestamp that does not exist
- reply->getDiff()[3]._flags |= Types::DELETED | Types::DELETED_IN_PLACE;
- reply->getDiff()[3]._timestamp = 12345678;
- reply->getDiff()[3]._bodySize = 0;
- reply->getDiff()[3]._hasMask = 2;
- {
- MessageTracker tracker;
- handler.handleGetBucketDiffReply(*reply, tracker);
- }
-
- LOG(info, "%s", reply->toString(true).c_str());
-
- LOG(info, "Create bucket diff reply");
- CPPUNIT_ASSERT_EQUAL(uint64_t(2), messageKeeper()._msgs.size());
- CPPUNIT_ASSERT_EQUAL(api::MessageType::APPLYBUCKETDIFF,
- messageKeeper()._msgs[1]->getType());
- api::ApplyBucketDiffCommand& cmd3(
- dynamic_cast<api::ApplyBucketDiffCommand&>(
- *messageKeeper()._msgs[1]));
- api::ApplyBucketDiffReply::UP reply2(
- new api::ApplyBucketDiffReply(cmd3));
- CPPUNIT_ASSERT_EQUAL(size_t(4), reply2->getDiff().size());
-
- memfile::DataLocation headerLocs[4];
- std::vector<DocumentId> documentIds;
- // So deserialization won't fail, we need some kind of header blob
- // for each entry
-
- for (int i = 0; i < 4; ++i) {
- api::ApplyBucketDiffReply::Entry& entry = reply2->getDiff()[i];
- CPPUNIT_ASSERT_EQUAL(uint16_t(2), entry._entry._hasMask);
-
- memfile::MemFilePtr file(getMemFile(_bucket));
- const memfile::MemSlot* slot = file->getSlotAtTime(docTimestamps[i]);
- CPPUNIT_ASSERT(slot != NULL);
- LOG(info, "Processing slot %s", slot->toString().c_str());
- CPPUNIT_ASSERT(slot->hasBodyContent());
- documentIds.push_back(file->getDocumentId(*slot));
- entry._docName = documentIds.back().toString();
- headerLocs[i] = slot->getLocation(HEADER);
-
- document::Document::UP doc(file->getDocument(*slot, ALL));
- {
- vespalib::nbostream stream;
- doc->serializeHeader(stream);
- std::vector<char> buf(
- stream.peek(), stream.peek() + stream.size());
- entry._headerBlob.swap(buf);
- }
- // Put duplicate needs body blob as well
- if (i == 2) {
- vespalib::nbostream stream;
- doc->serializeBody(stream);
- std::vector<char> buf(
- stream.peek(), stream.peek() + stream.size());
- entry._bodyBlob.swap(buf);
- }
- }
-
- LOG(info, "%s", reply2->toString(true).c_str());
-
- MessageTracker tracker;
- handler.handleApplyBucketDiffReply(*reply2, tracker);
-
- CPPUNIT_ASSERT(tracker._sendReply);
- api::MergeBucketReply::SP reply3(
- std::dynamic_pointer_cast<api::MergeBucketReply>(
- tracker._reply));
- CPPUNIT_ASSERT(reply3.get());
-
- CPPUNIT_ASSERT_EQUAL(_nodes, reply3->getNodes());
- CPPUNIT_ASSERT(reply3->getResult().success());
-
- memfile::MemFilePtr file(getMemFile(_bucket));
- // Existing timestamp should not be modified by
- // non-unrevertable entry
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[0]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(!slot->deleted());
- }
- // Ensure unrevertable remove for existing put was merged in OK
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[1]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(slot->deleted());
- CPPUNIT_ASSERT(slot->deletedInPlace());
- CPPUNIT_ASSERT(!slot->hasBodyContent());
- // Header location should not have changed
- CPPUNIT_ASSERT_EQUAL(headerLocs[1], slot->getLocation(HEADER));
- }
+ ASSERT_EQ(1, stub.replies.size());
- // Non-existing timestamp unrevertable remove should be added as
- // entry with doc id-only header
- {
- const memfile::MemSlot* slot = file->getSlotAtTime(
- Timestamp(reply->getDiff()[3]._timestamp));
- CPPUNIT_ASSERT(slot != NULL);
- CPPUNIT_ASSERT(slot->deleted());
- CPPUNIT_ASSERT(slot->deletedInPlace());
- CPPUNIT_ASSERT(!slot->hasBodyContent());
- CPPUNIT_ASSERT_EQUAL(documentIds[3], file->getDocumentId(*slot));
- }
+ auto reply3 = std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]);
+ ASSERT_TRUE(reply3.get());
-*/
+ EXPECT_THAT(_nodes, ContainerEq(reply3->getNodes()));
+ EXPECT_TRUE(reply3->getResult().success());
+ EXPECT_FALSE(fsHandler().isMerging(_bucket));
}
template <typename T>
@@ -631,9 +414,7 @@ getFilledDataSize(const std::vector<api::ApplyBucketDiffCommand::Entry>& diff)
}
-void
-MergeHandlerTest::testChunkedApplyBucketDiff()
-{
+TEST_F(MergeHandlerTest, chunked_apply_bucket_diff) {
uint32_t docSize = 1024;
uint32_t docCount = 10;
uint32_t maxChunkSize = docSize * 3;
@@ -643,14 +424,12 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
MergeHandler handler(getPersistenceProvider(), getEnv(), maxChunkSize);
- LOG(info, "Handle a merge bucket command");
+ LOG(debug, "Handle a merge bucket command");
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
@@ -659,14 +438,12 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
api::MergeBucketReply::SP reply;
while (seen.size() != totalDiffs) {
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
- LOG(info, "Test that we get chunked diffs in ApplyBucketDiff");
- std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
- applyBucketDiffCmd->getDiff());
- CPPUNIT_ASSERT(getFilledCount(diff) < totalDiffs);
- CPPUNIT_ASSERT(getFilledDataSize(diff) <= maxChunkSize);
+ LOG(debug, "Test that we get chunked diffs in ApplyBucketDiff");
+ auto& diff = applyBucketDiffCmd->getDiff();
+ ASSERT_LT(getFilledCount(diff), totalDiffs);
+ ASSERT_LE(getFilledDataSize(diff), maxChunkSize);
// Include node 1 in hasmask for all diffs to indicate it's done
// Also remember the diffs we've seen thus far to ensure chunking
@@ -675,39 +452,33 @@ MergeHandlerTest::testChunkedApplyBucketDiff()
if (!diff[i].filled()) {
continue;
}
- diff[i]._entry._hasMask |= 2;
- std::pair<std::set<spi::Timestamp>::iterator, bool> inserted(
- seen.insert(spi::Timestamp(diff[i]._entry._timestamp)));
+ diff[i]._entry._hasMask |= 2u;
+ auto inserted = seen.emplace(spi::Timestamp(diff[i]._entry._timestamp));
if (!inserted.second) {
- std::ostringstream ss;
- ss << "Diff for " << diff[i]
- << " has already been seen in another ApplyBucketDiff";
- CPPUNIT_FAIL(ss.str());
+ FAIL() << "Diff for " << diff[i]
+ << " has already been seen in another ApplyBucketDiff";
}
}
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
{
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, messageKeeper());
- if (messageKeeper()._msgs.size()) {
- CPPUNIT_ASSERT(!reply.get());
+ if (!messageKeeper()._msgs.empty()) {
+ ASSERT_FALSE(reply.get());
reply = std::dynamic_pointer_cast<api::MergeBucketReply>(
messageKeeper()._msgs[messageKeeper()._msgs.size() - 1]);
}
}
}
- LOG(info, "Done with applying diff");
+ LOG(debug, "Done with applying diff");
- CPPUNIT_ASSERT(reply.get());
- CPPUNIT_ASSERT_EQUAL(_nodes, reply->getNodes());
- CPPUNIT_ASSERT(reply->getResult().success());
+ ASSERT_TRUE(reply.get());
+ EXPECT_THAT(_nodes, ContainerEq(reply->getNodes()));
+ EXPECT_TRUE(reply->getResult().success());
}
-void
-MergeHandlerTest::testChunkLimitPartiallyFilledDiff()
-{
+TEST_F(MergeHandlerTest, chunk_limit_partially_filled_diff) {
setUpChain(FRONT);
uint32_t docSize = 1024;
@@ -731,24 +502,20 @@ MergeHandlerTest::testChunkLimitPartiallyFilledDiff()
}
setUpChain(MIDDLE);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, maxChunkSize));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, maxChunkSize);
applyBucketDiffCmd->getDiff() = applyDiff;
MergeHandler handler(
getPersistenceProvider(), getEnv(), maxChunkSize);
handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- std::shared_ptr<api::ApplyBucketDiffCommand> fwdDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
+ auto fwdDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
// Should not fill up more than chunk size allows for
- CPPUNIT_ASSERT_EQUAL(size_t(2), getFilledCount(fwdDiffCmd->getDiff()));
- CPPUNIT_ASSERT(getFilledDataSize(fwdDiffCmd->getDiff()) <= maxChunkSize);
+ EXPECT_EQ(2, getFilledCount(fwdDiffCmd->getDiff()));
+ EXPECT_LE(getFilledDataSize(fwdDiffCmd->getDiff()), maxChunkSize);
}
-void
-MergeHandlerTest::testMaxTimestamp()
-{
+TEST_F(MergeHandlerTest, max_timestamp) {
doPut(1234, spi::Timestamp(_maxTimestamp + 10), 1024, 1024);
MergeHandler handler(getPersistenceProvider(), getEnv());
@@ -756,11 +523,10 @@ MergeHandlerTest::testMaxTimestamp()
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
+ auto getCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
- CPPUNIT_ASSERT(!getCmd->getDiff().empty());
- CPPUNIT_ASSERT(getCmd->getDiff().back()._timestamp <= _maxTimestamp);
+ ASSERT_FALSE(getCmd->getDiff().empty());
+ EXPECT_LE(getCmd->getDiff().back()._timestamp, _maxTimestamp);
}
void
@@ -819,8 +585,7 @@ MergeHandlerTest::createDummyApplyDiff(int timestampOffset,
fillDummyApplyDiff(applyDiff);
}
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
return applyBucketDiffCmd;
}
@@ -855,109 +620,86 @@ MergeHandlerTest::createDummyGetBucketDiff(int timestampOffset,
diff.push_back(e);
}
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- new api::GetBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto getBucketDiffCmd = std::make_shared<api::GetBucketDiffCommand>(_bucket, _nodes, 1024*1024);
getBucketDiffCmd->getDiff() = diff;
return getBucketDiffCmd;
}
-void
-MergeHandlerTest::testSPIFlushGuard()
-{
+TEST_F(MergeHandlerTest, spi_flush_guard) {
PersistenceProviderWrapper providerWrapper(
getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
// Fail applying unrevertable remove
providerWrapper.setFailureMask(
PersistenceProviderWrapper::FAIL_REMOVE);
providerWrapper.clearOperationLog();
+
try {
handler.handleApplyBucketDiff(*createDummyApplyDiff(6000), *_context);
- CPPUNIT_FAIL("No exception thrown on failing in-place remove");
+ FAIL() << "No exception thrown on failing in-place remove";
} catch (const std::runtime_error& e) {
- CPPUNIT_ASSERT(std::string(e.what()).find("Failed remove")
- != std::string::npos);
+ EXPECT_TRUE(std::string(e.what()).find("Failed remove") != std::string::npos);
}
// Test that we always flush after applying diff locally, even when
// errors are encountered.
const std::vector<std::string>& opLog(providerWrapper.getOperationLog());
- CPPUNIT_ASSERT(!opLog.empty());
- CPPUNIT_ASSERT_EQUAL(
- std::string("flush(Bucket(0x40000000000004d2, partition 0))"),
- opLog.back());
+ ASSERT_FALSE(opLog.empty());
+ EXPECT_EQ("flush(Bucket(0x40000000000004d2, partition 0))", opLog.back());
}
-void
-MergeHandlerTest::testBucketNotFoundInDb()
-{
+TEST_F(MergeHandlerTest, bucket_not_found_in_db) {
MergeHandler handler(getPersistenceProvider(), getEnv());
// Send merge for unknown bucket
api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 6789)), _nodes, _maxTimestamp);
MessageTracker::UP tracker = handler.handleMergeBucket(cmd, *_context);
- CPPUNIT_ASSERT(tracker->getResult().isBucketDisappearance());
+ EXPECT_TRUE(tracker->getResult().isBucketDisappearance());
}
-void
-MergeHandlerTest::testMergeProgressSafeGuard()
-{
+TEST_F(MergeHandlerTest, merge_progress_safe_guard) {
MergeHandler handler(getPersistenceProvider(), getEnv());
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
MessageSenderStub stub;
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.replies.size());
+ ASSERT_EQ(1, stub.replies.size());
- api::MergeBucketReply::SP mergeReply(
- std::dynamic_pointer_cast<api::MergeBucketReply>(
- stub.replies[0]));
- CPPUNIT_ASSERT(mergeReply.get());
- CPPUNIT_ASSERT(mergeReply->getResult().getResult()
- == api::ReturnCode::INTERNAL_FAILURE);
+ auto mergeReply = std::dynamic_pointer_cast<api::MergeBucketReply>(stub.replies[0]);
+ ASSERT_TRUE(mergeReply.get());
+ EXPECT_EQ(mergeReply->getResult().getResult(), api::ReturnCode::INTERNAL_FAILURE);
}
-void
-MergeHandlerTest::testSafeGuardNotInvokedWhenHasMaskChanges()
-{
+TEST_F(MergeHandlerTest, safe_guard_not_invoked_when_has_mask_changes) {
MergeHandler handler(getPersistenceProvider(), getEnv());
_nodes.clear();
- _nodes.push_back(api::MergeBucketCommand::Node(0, false));
- _nodes.push_back(api::MergeBucketCommand::Node(1, false));
- _nodes.push_back(api::MergeBucketCommand::Node(2, false));
+ _nodes.emplace_back(0, false);
+ _nodes.emplace_back(1, false);
+ _nodes.emplace_back(2, false);
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
- api::GetBucketDiffReply::UP getBucketDiffReply(
- new api::GetBucketDiffReply(*getBucketDiffCmd));
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto getBucketDiffReply = std::make_unique<api::GetBucketDiffReply>(*getBucketDiffCmd);
handler.handleGetBucketDiffReply(*getBucketDiffReply, messageKeeper());
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- fetchSingleMessage<api::ApplyBucketDiffCommand>());
- api::ApplyBucketDiffReply::UP applyBucketDiffReply(
- new api::ApplyBucketDiffReply(*applyBucketDiffCmd));
- CPPUNIT_ASSERT(!applyBucketDiffReply->getDiff().empty());
+ auto applyBucketDiffCmd = fetchSingleMessage<api::ApplyBucketDiffCommand>();
+ auto applyBucketDiffReply = std::make_unique<api::ApplyBucketDiffReply>(*applyBucketDiffCmd);
+ ASSERT_FALSE(applyBucketDiffReply->getDiff().empty());
// Change a hasMask to indicate something changed during merging.
applyBucketDiffReply->getDiff()[0]._entry._hasMask = 0x5;
@@ -965,21 +707,15 @@ MergeHandlerTest::testSafeGuardNotInvokedWhenHasMaskChanges()
LOG(debug, "sending apply bucket diff reply");
handler.handleApplyBucketDiffReply(*applyBucketDiffReply, stub);
- CPPUNIT_ASSERT_EQUAL(1, (int)stub.commands.size());
+ ASSERT_EQ(1, stub.commands.size());
- api::ApplyBucketDiffCommand::SP applyBucketDiffCmd2(
- std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
- stub.commands[0]));
- CPPUNIT_ASSERT(applyBucketDiffCmd2.get());
- CPPUNIT_ASSERT_EQUAL(applyBucketDiffCmd->getDiff().size(),
- applyBucketDiffCmd2->getDiff().size());
- CPPUNIT_ASSERT_EQUAL(uint16_t(0x5),
- applyBucketDiffCmd2->getDiff()[0]._entry._hasMask);
+ auto applyBucketDiffCmd2 = std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(stub.commands[0]);
+ ASSERT_TRUE(applyBucketDiffCmd2.get());
+ ASSERT_EQ(applyBucketDiffCmd->getDiff().size(), applyBucketDiffCmd2->getDiff().size());
+ EXPECT_EQ(0x5, applyBucketDiffCmd2->getDiff()[0]._entry._hasMask);
}
-void
-MergeHandlerTest::testEntryRemovedAfterGetBucketDiff()
-{
+TEST_F(MergeHandlerTest, entry_removed_after_get_bucket_diff) {
MergeHandler handler(getPersistenceProvider(), getEnv());
std::vector<api::ApplyBucketDiffCommand::Entry> applyDiff;
{
@@ -990,22 +726,18 @@ MergeHandlerTest::testEntryRemovedAfterGetBucketDiff()
applyDiff.push_back(e);
}
setUpChain(BACK);
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
- MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+ auto tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- api::ApplyBucketDiffReply::SP applyBucketDiffReply(
- std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
- tracker->getReply()));
- CPPUNIT_ASSERT(applyBucketDiffReply.get());
-
- std::vector<api::ApplyBucketDiffCommand::Entry>& diff(
- applyBucketDiffReply->getDiff());
- CPPUNIT_ASSERT_EQUAL(size_t(1), diff.size());
- CPPUNIT_ASSERT(!diff[0].filled());
- CPPUNIT_ASSERT_EQUAL(uint16_t(0x0), diff[0]._entry._hasMask);
+ auto applyBucketDiffReply = std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(tracker->getReply());
+ ASSERT_TRUE(applyBucketDiffReply.get());
+
+ auto& diff = applyBucketDiffReply->getDiff();
+ ASSERT_EQ(1, diff.size());
+ EXPECT_FALSE(diff[0].filled());
+ EXPECT_EQ(0x0, diff[0]._entry._hasMask);
}
std::string
@@ -1090,15 +822,11 @@ MergeHandlerTest::HandleMergeBucketInvoker::invoke(
handler.handleMergeBucket(cmd, context);
}
-void
-MergeHandlerTest::testMergeBucketSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, merge_bucket_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1112,11 +840,7 @@ MergeHandlerTest::testMergeBucketSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleMergeBucketInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1130,15 +854,11 @@ MergeHandlerTest::HandleGetBucketDiffInvoker::invoke(
handler.handleGetBucketDiff(cmd, context);
}
-void
-MergeHandlerTest::testGetBucketDiffSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, get_bucket_diff_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1153,11 +873,7 @@ MergeHandlerTest::testGetBucketDiffSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleGetBucketDiffInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1173,15 +889,11 @@ MergeHandlerTest::HandleApplyBucketDiffInvoker::invoke(
handler.handleApplyBucketDiff(*cmd, context);
}
-void
-MergeHandlerTest::testApplyBucketDiffSPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, apply_bucket_diff_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
setUpChain(MIDDLE);
ExpectedExceptionSpec exceptions[] = {
@@ -1197,15 +909,11 @@ MergeHandlerTest::testApplyBucketDiffSPIFailures()
for (ExceptionIterator it = exceptions; it != last; ++it) {
HandleApplyBucketDiffInvoker invoker;
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
// Casual, in-place testing of bug 6752085.
// This will fail if we give NaN to the metric in question.
- CPPUNIT_ASSERT(std::isfinite(getEnv()._metrics
- .mergeAverageDataReceivedNeeded.getLast()));
+ EXPECT_TRUE(std::isfinite(getEnv()._metrics
+ .mergeAverageDataReceivedNeeded.getLast()));
}
}
@@ -1248,15 +956,11 @@ MergeHandlerTest::HandleGetBucketDiffReplyInvoker::afterInvoke(
api::ReturnCode::INTERNAL_FAILURE);
}
-void
-MergeHandlerTest::testGetBucketDiffReplySPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, get_bucket_diff_reply_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
HandleGetBucketDiffReplyInvoker invoker;
setUpChain(FRONT);
@@ -1270,11 +974,7 @@ MergeHandlerTest::testGetBucketDiffReplySPIFailures()
ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
for (ExceptionIterator it = exceptions; it != last; ++it) {
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
@@ -1289,23 +989,20 @@ MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::beforeInvoke(
if (getChainPos() == FRONT) {
api::MergeBucketCommand cmd(test._bucket, test._nodes, test._maxTimestamp);
handler.handleMergeBucket(cmd, context);
- std::shared_ptr<api::GetBucketDiffCommand> diffCmd(
- test.fetchSingleMessage<api::GetBucketDiffCommand>());
- std::shared_ptr<api::GetBucketDiffCommand> dummyDiff(
- test.createDummyGetBucketDiff(100000 * _counter, 0x4));
+ auto diffCmd = test.fetchSingleMessage<api::GetBucketDiffCommand>();
+ auto dummyDiff = test.createDummyGetBucketDiff(100000 * _counter, 0x4);
diffCmd->getDiff() = dummyDiff->getDiff();
api::GetBucketDiffReply diffReply(*diffCmd);
handler.handleGetBucketDiffReply(diffReply, _stub);
- CPPUNIT_ASSERT_EQUAL(size_t(1), _stub.commands.size());
+ assert(_stub.commands.size() == 1);
_applyCmd = std::dynamic_pointer_cast<api::ApplyBucketDiffCommand>(
_stub.commands[0]);
} else {
// Pretend last node in chain has data and that it will be fetched when
// chain is unwinded.
- std::shared_ptr<api::ApplyBucketDiffCommand> cmd(
- test.createDummyApplyDiff(100000 * _counter, 0x4, false));
+ auto cmd = test.createDummyApplyDiff(100000 * _counter, 0x4, false);
handler.handleApplyBucketDiff(*cmd, context);
_applyCmd = test.fetchSingleMessage<api::ApplyBucketDiffCommand>();
}
@@ -1345,11 +1042,8 @@ MergeHandlerTest::HandleApplyBucketDiffReplyInvoker::afterInvoke(
}
}
-void
-MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
-{
- PersistenceProviderWrapper providerWrapper(
- getPersistenceProvider());
+TEST_F(MergeHandlerTest, apply_bucket_diff_reply_spi_failures) {
+ PersistenceProviderWrapper providerWrapper(getPersistenceProvider());
HandleApplyBucketDiffReplyInvoker invoker;
for (int i = 0; i < 2; ++i) {
ChainPos pos(i == 0 ? FRONT : MIDDLE);
@@ -1357,8 +1051,7 @@ MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
invoker.setChainPos(pos);
MergeHandler handler(providerWrapper, getEnv());
providerWrapper.setResult(
- spi::Result(spi::Result::PERMANENT_ERROR,
- "who you gonna call?"));
+ spi::Result(spi::Result::PERMANENT_ERROR, "who you gonna call?"));
ExpectedExceptionSpec exceptions[] = {
{ PersistenceProviderWrapper::FAIL_CREATE_ITERATOR, "create iterator" },
@@ -1372,18 +1065,12 @@ MergeHandlerTest::testApplyBucketDiffReplySPIFailures()
ExceptionIterator last = exceptions + sizeof(exceptions)/sizeof(exceptions[0]);
for (ExceptionIterator it = exceptions; it != last; ++it) {
- CPPUNIT_ASSERT_EQUAL(std::string(),
- doTestSPIException(handler,
- providerWrapper,
- invoker,
- *it));
+ EXPECT_EQ("", doTestSPIException(handler, providerWrapper, invoker, *it));
}
}
}
-void
-MergeHandlerTest::testRemoveFromDiff()
-{
+TEST_F(MergeHandlerTest, remove_from_diff) {
framework::defaultimplementation::FakeClock clock;
MergeStatus status(clock, documentapi::LoadType::DEFAULT, 0, 0);
@@ -1408,8 +1095,8 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x7;
- CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT(status.diff.empty());
+ EXPECT_TRUE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_TRUE(status.diff.empty());
}
status.diff.insert(status.diff.end(), diff.begin(), diff.end());
@@ -1424,8 +1111,8 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x6;
- CPPUNIT_ASSERT(!status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ EXPECT_FALSE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_EQ(2, status.diff.size());
}
status.diff.clear();
@@ -1442,14 +1129,12 @@ MergeHandlerTest::testRemoveFromDiff()
applyDiff[1]._entry._flags = 0x3;
applyDiff[1]._entry._hasMask = 0x5;
- CPPUNIT_ASSERT(status.removeFromDiff(applyDiff, 0x7));
- CPPUNIT_ASSERT_EQUAL(size_t(2), status.diff.size());
+ EXPECT_TRUE(status.removeFromDiff(applyDiff, 0x7));
+ EXPECT_EQ(2, status.diff.size());
}
}
-void
-MergeHandlerTest::testRemovePutOnExistingTimestamp()
-{
+TEST_F(MergeHandlerTest, remove_put_on_existing_timestamp) {
setUpChain(BACK);
document::TestDocMan docMan;
@@ -1469,22 +1154,20 @@ MergeHandlerTest::testRemovePutOnExistingTimestamp()
applyDiff.push_back(e);
}
- std::shared_ptr<api::ApplyBucketDiffCommand> applyBucketDiffCmd(
- new api::ApplyBucketDiffCommand(_bucket, _nodes, 1024*1024));
+ auto applyBucketDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(_bucket, _nodes, 1024*1024);
applyBucketDiffCmd->getDiff() = applyDiff;
- MessageTracker::UP tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
+ auto tracker = handler.handleApplyBucketDiff(*applyBucketDiffCmd, *_context);
- api::ApplyBucketDiffReply::SP applyBucketDiffReply(
+ auto applyBucketDiffReply =
std::dynamic_pointer_cast<api::ApplyBucketDiffReply>(
- tracker->getReply()));
- CPPUNIT_ASSERT(applyBucketDiffReply.get());
+ tracker->getReply());
+ ASSERT_TRUE(applyBucketDiffReply.get());
api::MergeBucketCommand cmd(_bucket, _nodes, _maxTimestamp);
handler.handleMergeBucket(cmd, *_context);
- std::shared_ptr<api::GetBucketDiffCommand> getBucketDiffCmd(
- fetchSingleMessage<api::GetBucketDiffCommand>());
+ auto getBucketDiffCmd = fetchSingleMessage<api::GetBucketDiffCommand>();
// Timestamp should now be a regular remove
bool foundTimestamp = false;
@@ -1492,14 +1175,14 @@ MergeHandlerTest::testRemovePutOnExistingTimestamp()
const api::GetBucketDiffCommand::Entry& e(
getBucketDiffCmd->getDiff()[i]);
if (e._timestamp == ts) {
- CPPUNIT_ASSERT_EQUAL(
+ EXPECT_EQ(
uint16_t(MergeHandler::IN_USE | MergeHandler::DELETED),
e._flags);
foundTimestamp = true;
break;
}
}
- CPPUNIT_ASSERT(foundTimestamp);
+ EXPECT_TRUE(foundTimestamp);
}
} // storage
diff --git a/storage/src/tests/persistence/persistencequeuetest.cpp b/storage/src/tests/persistence/persistencequeuetest.cpp
index a212e65efe8..be276dd7f9d 100644
--- a/storage/src/tests/persistence/persistencequeuetest.cpp
+++ b/storage/src/tests/persistence/persistencequeuetest.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
@@ -12,31 +11,16 @@
LOG_SETUP(".persistencequeuetest");
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
class PersistenceQueueTest : public FileStorTestFixture {
public:
- void testFetchNextUnlockedMessageIfBucketLocked();
- void shared_locked_operations_allow_concurrent_bucket_access();
- void exclusive_locked_operation_not_started_if_shared_op_active();
- void shared_locked_operation_not_started_if_exclusive_op_active();
- void exclusive_locked_operation_not_started_if_exclusive_op_active();
- void operation_batching_not_allowed_across_different_lock_modes();
-
std::shared_ptr<api::StorageMessage> createPut(uint64_t bucket, uint64_t docIdx);
std::shared_ptr<api::StorageMessage> createGet(uint64_t bucket) const;
- void setUp() override;
-
- CPPUNIT_TEST_SUITE(PersistenceQueueTest);
- CPPUNIT_TEST(testFetchNextUnlockedMessageIfBucketLocked);
- CPPUNIT_TEST(shared_locked_operations_allow_concurrent_bucket_access);
- CPPUNIT_TEST(exclusive_locked_operation_not_started_if_shared_op_active);
- CPPUNIT_TEST(shared_locked_operation_not_started_if_exclusive_op_active);
- CPPUNIT_TEST(exclusive_locked_operation_not_started_if_exclusive_op_active);
- CPPUNIT_TEST(operation_batching_not_allowed_across_different_lock_modes);
- CPPUNIT_TEST_SUITE_END();
+ void SetUp() override;
struct Fixture {
FileStorTestFixture& parent;
@@ -55,8 +39,6 @@ public:
static constexpr uint16_t _disk = 0;
};
-CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceQueueTest);
-
PersistenceQueueTest::Fixture::Fixture(FileStorTestFixture& parent_)
: parent(parent_),
top(),
@@ -82,7 +64,7 @@ PersistenceQueueTest::Fixture::Fixture(FileStorTestFixture& parent_)
PersistenceQueueTest::Fixture::~Fixture() = default;
-void PersistenceQueueTest::setUp() {
+void PersistenceQueueTest::SetUp() {
setupPersistenceThreads(1);
_node->setPersistenceProvider(std::make_unique<spi::dummy::DummyPersistence>(_node->getTypeRepo(), 1));
}
@@ -103,7 +85,7 @@ std::shared_ptr<api::StorageMessage> PersistenceQueueTest::createGet(uint64_t bu
return cmd;
}
-void PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked() {
+TEST_F(PersistenceQueueTest, fetch_next_unlocked_message_if_bucket_locked) {
Fixture f(*this);
// Send 2 puts, 2 to the first bucket, 1 to the second. Calling
// getNextMessage 2 times should then return a lock on the first bucket,
@@ -114,91 +96,91 @@ void PersistenceQueueTest::testFetchNextUnlockedMessageIfBucketLocked() {
f.filestorHandler->schedule(createPut(5432, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 1234),
- dynamic_cast<api::PutCommand&>(*lock0.second).getBucketId());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(document::BucketId(16, 1234),
+ dynamic_cast<api::PutCommand&>(*lock0.second).getBucketId());
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock1.first.get());
- CPPUNIT_ASSERT_EQUAL(document::BucketId(16, 5432),
- dynamic_cast<api::PutCommand&>(*lock1.second).getBucketId());
+ ASSERT_TRUE(lock1.first.get());
+ EXPECT_EQ(document::BucketId(16, 5432),
+ dynamic_cast<api::PutCommand&>(*lock1.second).getBucketId());
}
-void PersistenceQueueTest::shared_locked_operations_allow_concurrent_bucket_access() {
+TEST_F(PersistenceQueueTest, shared_locked_operations_allow_concurrent_bucket_access) {
Fixture f(*this);
f.filestorHandler->schedule(createGet(1234), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
// Even though we already have a lock on the bucket, Gets allow shared locking and we
// should therefore be able to get another lock.
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock1.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock1.first->lockingRequirements());
+ ASSERT_TRUE(lock1.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock1.first->lockingRequirements());
}
-void PersistenceQueueTest::exclusive_locked_operation_not_started_if_shared_op_active() {
+TEST_F(PersistenceQueueTest, exclusive_locked_operation_not_started_if_shared_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createGet(1234), _disk);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Shared, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::shared_locked_operation_not_started_if_exclusive_op_active() {
+TEST_F(PersistenceQueueTest, shared_locked_operation_not_started_if_exclusive_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::exclusive_locked_operation_not_started_if_exclusive_op_active() {
+TEST_F(PersistenceQueueTest, exclusive_locked_operation_not_started_if_exclusive_op_active) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first.get());
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first.get());
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
// Expected to time out
auto lock1 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(!lock1.first.get());
+ ASSERT_FALSE(lock1.first.get());
}
-void PersistenceQueueTest::operation_batching_not_allowed_across_different_lock_modes() {
+TEST_F(PersistenceQueueTest, operation_batching_not_allowed_across_different_lock_modes) {
Fixture f(*this);
f.filestorHandler->schedule(createPut(1234, 0), _disk);
f.filestorHandler->schedule(createGet(1234), _disk);
auto lock0 = f.filestorHandler->getNextMessage(_disk, f.stripeId);
- CPPUNIT_ASSERT(lock0.first);
- CPPUNIT_ASSERT(lock0.second);
- CPPUNIT_ASSERT_EQUAL(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
+ ASSERT_TRUE(lock0.first);
+ ASSERT_TRUE(lock0.second);
+ EXPECT_EQ(api::LockingRequirements::Exclusive, lock0.first->lockingRequirements());
f.filestorHandler->getNextMessage(_disk, f.stripeId, lock0);
- CPPUNIT_ASSERT(!lock0.second);
+ ASSERT_FALSE(lock0.second);
}
} // namespace storage
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index 327deaf7e82..e32fc056413 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -64,13 +64,8 @@ PersistenceTestEnvironment::PersistenceTestEnvironment(DiskCount numDisks, const
}
}
-PersistenceTestUtils::PersistenceTestUtils()
-{
-}
-
-PersistenceTestUtils::~PersistenceTestUtils()
-{
-}
+PersistenceTestUtils::PersistenceTestUtils() = default;
+PersistenceTestUtils::~PersistenceTestUtils() = default;
std::string
PersistenceTestUtils::dumpBucket(const document::BucketId& bid, uint16_t disk) {
diff --git a/storage/src/tests/persistence/persistencetestutils.h b/storage/src/tests/persistence/persistencetestutils.h
index 8f883115e9d..e418765ecac 100644
--- a/storage/src/tests/persistence/persistencetestutils.h
+++ b/storage/src/tests/persistence/persistencetestutils.h
@@ -11,6 +11,7 @@
#include <vespa/persistence/spi/persistenceprovider.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <vespa/document/base/testdocman.h>
+#include <vespa/vespalib/gtest/gtest.h>
namespace storage {
@@ -34,7 +35,7 @@ struct PersistenceTestEnvironment {
std::vector<std::unique_ptr<PersistenceUtil> > _diskEnvs;
};
-class PersistenceTestUtils : public CppUnit::TestFixture {
+class PersistenceTestUtils : public testing::Test {
public:
std::unique_ptr<PersistenceTestEnvironment> _env;
@@ -50,7 +51,7 @@ public:
void setupDisks(uint32_t disks);
- void tearDown() override {
+ void TearDown() override {
_env.reset();
}
@@ -202,7 +203,7 @@ public:
class SingleDiskPersistenceTestUtils : public PersistenceTestUtils
{
public:
- void setUp() override {
+ void SetUp() override {
setupDisks(1);
}
};
diff --git a/storage/src/tests/persistence/persistencethread_splittest.cpp b/storage/src/tests/persistence/persistencethread_splittest.cpp
index 9c10b9987e0..ea7dce96e0c 100644
--- a/storage/src/tests/persistence/persistencethread_splittest.cpp
+++ b/storage/src/tests/persistence/persistencethread_splittest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/persistence/spi/test.h>
@@ -9,14 +8,14 @@
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
namespace {
- spi::LoadType defaultLoadType(0, "default");
+spi::LoadType defaultLoadType(0, "default");
}
-struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
-{
+struct PersistenceThreadSplitTest : public SingleDiskPersistenceTestUtils {
enum SplitCase {
TOO_MANY_DOCS_SPLIT_ONCE, // Only one split needed to divide
TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS, // Multiple bits needed to divide
@@ -26,7 +25,7 @@ struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS,
TOO_LARGE_DOCS_SINGLE_DOC, // Cannot split single doc even if too large
TOO_LARGE_DOCS_ACTUALLY_NOT, // Other copy is too large, not this one
- // Need to split to X bits to get in line with other copy or distr.
+ // Need to split to X bits to get in line with other copy or distr.
SPLIT_TOO_LITTLE_SINGLE_SPLIT, // Split all to one target
SPLIT_TOO_LITTLE_JUST_RIGHT, // Just manage to split in two at that lvl
SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH, // Has to split shorter
@@ -35,54 +34,60 @@ struct PersistenceThread_SplitTest : public SingleDiskPersistenceTestUtils
};
void doTest(SplitCase);
+};
- void testTooManyDocsSplitOnce()
- { doTest(TOO_MANY_DOCS_SPLIT_ONCE); }
- void testTooManyDocsSplitMulti()
- { doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS); }
- void testTooManyDocsActuallyNot()
- { doTest(TOO_MANY_DOCS_ACTUALLY_NOT); }
- void testTooLargeDocsSplitOnce()
- { doTest(TOO_LARGE_DOCS_SPLIT_ONCE); }
- void testTooLargeDocsSplitMulti()
- { doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS); }
- void testTooLargeDocsSingleDoc()
- { doTest(TOO_LARGE_DOCS_SINGLE_DOC); }
- void testTooLargeDocsActuallyNot()
- { doTest(TOO_LARGE_DOCS_ACTUALLY_NOT); }
- void testSplitTooLittleSingleSplit()
- { doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT); }
- void testSplitTooLittleJustRight()
- { doTest(SPLIT_TOO_LITTLE_JUST_RIGHT); }
- void testSplitTooLittleSplitTowardsEnough()
- { doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH); }
- void testInconsistentSplitHasOneBitFallbackWhen1Doc() {
- doTest(SPLIT_INCONSISTENT_1_DOC);
- }
- void testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid() {
- doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
- }
+TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_many_docs) {
+ doTest(TOO_MANY_DOCS_SPLIT_ONCE);
+}
- CPPUNIT_TEST_SUITE(PersistenceThread_SplitTest);
- CPPUNIT_TEST(testTooManyDocsSplitOnce);
- CPPUNIT_TEST(testTooManyDocsSplitMulti);
- CPPUNIT_TEST(testTooManyDocsActuallyNot);
- CPPUNIT_TEST(testTooLargeDocsSplitOnce);
- CPPUNIT_TEST(testTooLargeDocsSplitMulti);
- CPPUNIT_TEST(testTooLargeDocsSingleDoc);
- CPPUNIT_TEST(testTooLargeDocsActuallyNot);
- CPPUNIT_TEST(testSplitTooLittleSingleSplit);
- CPPUNIT_TEST(testSplitTooLittleJustRight);
- CPPUNIT_TEST(testSplitTooLittleSplitTowardsEnough);
- CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhen1Doc);
- CPPUNIT_TEST(testInconsistentSplitHasOneBitFallbackWhenAllDocsHaveSameGid);
- CPPUNIT_TEST_SUITE_END();
-};
+TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_many_docs) {
+ doTest(TOO_MANY_DOCS_SPLIT_MULTIPLE_BITS);
+}
-CPPUNIT_TEST_SUITE_REGISTRATION(PersistenceThread_SplitTest);
+TEST_F(PersistenceThreadSplitTest, false_positive_too_many_docs) {
+ doTest(TOO_MANY_DOCS_ACTUALLY_NOT);
+}
+
+TEST_F(PersistenceThreadSplitTest, split_single_bit_for_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_SPLIT_ONCE);
+}
+
+TEST_F(PersistenceThreadSplitTest, bucket_split_requires_multiple_bit_increase_for_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_SPLIT_MULTIPLE_BITS);
+}
+
+TEST_F(PersistenceThreadSplitTest, cannot_split_bucket_with_single_too_large_document) {
+ doTest(TOO_LARGE_DOCS_SINGLE_DOC);
+}
+
+TEST_F(PersistenceThreadSplitTest, false_positive_too_large_docs) {
+ doTest(TOO_LARGE_DOCS_ACTUALLY_NOT);
+}
+
+TEST_F(PersistenceThreadSplitTest, request_can_specify_minimum_split_bit_count) {
+ doTest(SPLIT_TOO_LITTLE_SINGLE_SPLIT);
+}
+
+// TODO verify that name actually matches what test does...
+TEST_F(PersistenceThreadSplitTest, can_split_into_2_targets_at_max_split_level) {
+ doTest(SPLIT_TOO_LITTLE_JUST_RIGHT);
+}
+
+// TODO verify that name actually matches what test does...
+TEST_F(PersistenceThreadSplitTest, actual_split_level_can_be_lower_than_max_level) {
+ doTest(SPLIT_TOO_LITTLE_SPLIT_TOWARDS_ENOUGH);
+}
+
+TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_1_doc) {
+ doTest(SPLIT_INCONSISTENT_1_DOC);
+}
+
+TEST_F(PersistenceThreadSplitTest, inconsistent_split_has_one_bit_fallback_when_all_docs_have_same_gid) {
+ doTest(SPLIT_INCONSISTENT_ALL_DOCS_SAME_GID);
+}
void
-PersistenceThread_SplitTest::doTest(SplitCase splitCase)
+PersistenceThreadSplitTest::doTest(SplitCase splitCase)
{
uint32_t maxCount = 4;
uint32_t maxSize = 1000 * 1000;
@@ -119,8 +124,8 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
resultSplitLevel = 3;
break;
case TOO_LARGE_DOCS_SINGLE_DOC:
- // It is possible for bucket to be inconsistent being big enough
- // to split in other copy but this copy has only 1 too big doc.
+ // It is possible for bucket to be inconsistent being big enough
+ // to split in other copy but this copy has only 1 too big doc.
docCount = 1;
docSize = 3000 * 1000;
splitLevelToDivide = 3;
@@ -176,7 +181,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
}
uint64_t location = 0;
- uint64_t splitMask = 1 << (splitLevelToDivide - 1);
+ uint64_t splitMask = 1ULL << (splitLevelToDivide - 1);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
spi::Bucket bucket(makeSpiBucket(document::BucketId(currentSplitLevel, 1)));
@@ -201,8 +206,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
std::unique_ptr<PersistenceThread> thread(createPersistenceThread(0));
getNode().getStateUpdater().setClusterState(
- lib::ClusterState::CSP(
- new lib::ClusterState("distributor:1 storage:1")));
+ std::make_shared<lib::ClusterState>("distributor:1 storage:1"));
api::SplitBucketCommand cmd(makeDocumentBucket(document::BucketId(currentSplitLevel, 1)));
cmd.setMaxSplitBits(maxBits);
cmd.setMinSplitBits(minBits);
@@ -211,10 +215,11 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
cmd.setSourceIndex(0);
MessageTracker::UP result(thread->handleSplitBucket(cmd));
api::ReturnCode code(result->getResult());
- CPPUNIT_ASSERT_EQUAL(error, code);
- if (!code.success()) return;
- api::SplitBucketReply& reply(
- dynamic_cast<api::SplitBucketReply&>(*result->getReply()));
+ EXPECT_EQ(error, code);
+ if (!code.success()) {
+ return;
+ }
+ auto& reply = dynamic_cast<api::SplitBucketReply&>(*result->getReply());
std::set<std::string> expected;
for (uint32_t i=0; i<resultBuckets; ++i) {
document::BucketId b(resultSplitLevel,
@@ -230,7 +235,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
ost << b << " - " << b.getUsedBits();
actual.insert(ost.str());
}
- CPPUNIT_ASSERT_EQUAL(expected, actual);
+ EXPECT_EQ(expected, actual);
}
} // storage
diff --git a/storage/src/tests/persistence/processalltest.cpp b/storage/src/tests/persistence/processalltest.cpp
index 11754d50961..2bf7f7c3855 100644
--- a/storage/src/tests/persistence/processalltest.cpp
+++ b/storage/src/tests/persistence/processalltest.cpp
@@ -2,44 +2,20 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/storage/persistence/processallhandler.h>
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/messages.h>
#include <vespa/documentapi/loadtypes/loadtype.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class ProcessAllHandlerTest : public SingleDiskPersistenceTestUtils
-{
- CPPUNIT_TEST_SUITE(ProcessAllHandlerTest);
- CPPUNIT_TEST(testRemoveLocation);
- CPPUNIT_TEST(testRemoveLocationDocumentSubset);
- CPPUNIT_TEST(testRemoveLocationUnknownDocType);
- CPPUNIT_TEST(testRemoveLocationBogusSelection);
- CPPUNIT_TEST(testStat);
- CPPUNIT_TEST(testStatWithRemove);
- CPPUNIT_TEST(testStatWholeBucket);
- CPPUNIT_TEST_SUITE_END();
-
-public:
- void testRemoveLocation();
- void testRemoveLocationDocumentSubset();
- void testRemoveLocationUnknownDocType();
- void testRemoveLocationEmptySelection();
- void testRemoveLocationBogusSelection();
- void testStat();
- void testStatWithRemove();
- void testStatWholeBucket();
+class ProcessAllHandlerTest : public SingleDiskPersistenceTestUtils {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ProcessAllHandlerTest);
-
-void
-ProcessAllHandlerTest::testRemoveLocation()
-{
+TEST_F(ProcessAllHandlerTest, remove_location) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
doPut(4, spi::Timestamp(2345));
@@ -49,16 +25,12 @@ ProcessAllHandlerTest::testRemoveLocation()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
handler.handleRemoveLocation(removeLocation, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string(
- "DocEntry(1234, 1, id:mail:testdoctype1:n=4:3619.html)\n"
- "DocEntry(2345, 1, id:mail:testdoctype1:n=4:4008.html)\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(2345, 1, id:mail:testdoctype1:n=4:4008.html)\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationDocumentSubset()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_document_subset) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -74,70 +46,49 @@ ProcessAllHandlerTest::testRemoveLocationDocumentSubset()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
handler.handleRemoveLocation(removeLocation, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(100, 1, id:mail:testdoctype1:n=4:3619.html)\n"
- "DocEntry(101, 0, Doc(id:mail:testdoctype1:n=4:33113.html))\n"
- "DocEntry(102, 1, id:mail:testdoctype1:n=4:62608.html)\n"
- "DocEntry(103, 0, Doc(id:mail:testdoctype1:n=4:26566.html))\n"
- "DocEntry(104, 1, id:mail:testdoctype1:n=4:56061.html)\n"
- "DocEntry(105, 0, Doc(id:mail:testdoctype1:n=4:20019.html))\n"
- "DocEntry(106, 1, id:mail:testdoctype1:n=4:49514.html)\n"
- "DocEntry(107, 0, Doc(id:mail:testdoctype1:n=4:13472.html))\n"
- "DocEntry(108, 1, id:mail:testdoctype1:n=4:42967.html)\n"
- "DocEntry(109, 0, Doc(id:mail:testdoctype1:n=4:6925.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(100, 1, id:mail:testdoctype1:n=4:3619.html)\n"
+ "DocEntry(101, 0, Doc(id:mail:testdoctype1:n=4:33113.html))\n"
+ "DocEntry(102, 1, id:mail:testdoctype1:n=4:62608.html)\n"
+ "DocEntry(103, 0, Doc(id:mail:testdoctype1:n=4:26566.html))\n"
+ "DocEntry(104, 1, id:mail:testdoctype1:n=4:56061.html)\n"
+ "DocEntry(105, 0, Doc(id:mail:testdoctype1:n=4:20019.html))\n"
+ "DocEntry(106, 1, id:mail:testdoctype1:n=4:49514.html)\n"
+ "DocEntry(107, 0, Doc(id:mail:testdoctype1:n=4:13472.html))\n"
+ "DocEntry(108, 1, id:mail:testdoctype1:n=4:42967.html)\n"
+ "DocEntry(109, 0, Doc(id:mail:testdoctype1:n=4:6925.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationUnknownDocType()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_throws_exception_on_unknown_doc_type) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
api::RemoveLocationCommand
removeLocation("unknowndoctype.headerval % 2 == 0", makeDocumentBucket(bucketId));
- bool gotException = false;
- try {
- ProcessAllHandler handler(getEnv(), getPersistenceProvider());
- spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
- handler.handleRemoveLocation(removeLocation, context);
- } catch (...) {
- gotException = true;
- }
- CPPUNIT_ASSERT(gotException);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ ASSERT_THROW(handler.handleRemoveLocation(removeLocation, context), std::exception);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testRemoveLocationBogusSelection()
-{
+TEST_F(ProcessAllHandlerTest, remove_location_throws_exception_on_bogus_selection) {
document::BucketId bucketId(16, 4);
doPut(4, spi::Timestamp(1234));
api::RemoveLocationCommand removeLocation("id.bogus != badgers", makeDocumentBucket(bucketId));
- bool gotException = false;
- try {
- ProcessAllHandler handler(getEnv(), getPersistenceProvider());
- spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
- handler.handleRemoveLocation(removeLocation, context);
- } catch (...) {
- gotException = true;
- }
- CPPUNIT_ASSERT(gotException);
+ ProcessAllHandler handler(getEnv(), getPersistenceProvider());
+ spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
+ ASSERT_THROW(handler.handleRemoveLocation(removeLocation, context), std::exception);
- CPPUNIT_ASSERT_EQUAL(
- std::string("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n"),
- dumpBucket(bucketId));
+ EXPECT_EQ("DocEntry(1234, 0, Doc(id:mail:testdoctype1:n=4:3619.html))\n",
+ dumpBucket(bucketId));
}
-void
-ProcessAllHandlerTest::testStat()
-{
+TEST_F(ProcessAllHandlerTest, bucket_stat_request_returns_document_metadata_matching_selection) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -153,10 +104,9 @@ ProcessAllHandlerTest::testStat()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -167,12 +117,10 @@ ProcessAllHandlerTest::testStat()
" Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
-void
-ProcessAllHandlerTest::testStatWithRemove()
-{
+TEST_F(ProcessAllHandlerTest, stat_bucket_request_can_returned_removed_entries) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -191,10 +139,9 @@ ProcessAllHandlerTest::testStatWithRemove()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -219,13 +166,11 @@ ProcessAllHandlerTest::testStatWithRemove()
" Timestamp: 208, id:mail:testdoctype1:n=4:42967.html, gid(0x04000000f19ece1668e6de48) (remove)\n"
" Timestamp: 209, id:mail:testdoctype1:n=4:6925.html, gid(0x04000000667c0b3cada830be) (remove)\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
-
-void
-ProcessAllHandlerTest::testStatWholeBucket()
-{
+// TODO is this test neccessary? Seems to not test anything more than the above tests
+TEST_F(ProcessAllHandlerTest, bucket_stat_request_can_return_all_put_entries_in_bucket) {
document::BucketId bucketId(16, 4);
ProcessAllHandler handler(getEnv(), getPersistenceProvider());
@@ -240,10 +185,9 @@ ProcessAllHandlerTest::testStatWholeBucket()
spi::Context context(documentapi::LoadType::DEFAULT, 0, 0);
MessageTracker::UP tracker = handler.handleStatBucket(statBucket, context);
- CPPUNIT_ASSERT(tracker->getReply().get());
- api::StatBucketReply& reply =
- dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
- CPPUNIT_ASSERT_EQUAL(api::ReturnCode::OK, reply.getResult().getResult());
+ ASSERT_TRUE(tracker->getReply().get());
+ auto& reply = dynamic_cast<api::StatBucketReply&>(*tracker->getReply().get());
+ EXPECT_EQ(api::ReturnCode::OK, reply.getResult().getResult());
vespalib::string expected =
"Persistence bucket BucketId(0x4000000000000004), partition 0\n"
@@ -258,7 +202,7 @@ ProcessAllHandlerTest::testStatWholeBucket()
" Timestamp: 108, Doc(id:mail:testdoctype1:n=4:42967.html), gid(0x04000000f19ece1668e6de48), size: 206\n"
" Timestamp: 109, Doc(id:mail:testdoctype1:n=4:6925.html), gid(0x04000000667c0b3cada830be), size: 130\n";
- CPPUNIT_ASSERT_EQUAL(expected, reply.getResults());
+ EXPECT_EQ(expected, reply.getResults());
}
}
diff --git a/storage/src/tests/persistence/provider_error_wrapper_test.cpp b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
index b6b87b33666..36238abb238 100644
--- a/storage/src/tests/persistence/provider_error_wrapper_test.cpp
+++ b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/persistence/spi/test.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
@@ -9,25 +8,9 @@ using storage::spi::test::makeSpiBucket;
namespace storage {
-class ProviderErrorWrapperTest : public SingleDiskPersistenceTestUtils {
-public:
- CPPUNIT_TEST_SUITE(ProviderErrorWrapperTest);
- CPPUNIT_TEST(fatal_error_invokes_listener);
- CPPUNIT_TEST(resource_exhaustion_error_invokes_listener);
- CPPUNIT_TEST(listener_not_invoked_on_success);
- CPPUNIT_TEST(listener_not_invoked_on_regular_errors);
- CPPUNIT_TEST(multiple_listeners_can_be_registered);
- CPPUNIT_TEST_SUITE_END();
-
- void fatal_error_invokes_listener();
- void resource_exhaustion_error_invokes_listener();
- void listener_not_invoked_on_success();
- void listener_not_invoked_on_regular_errors();
- void multiple_listeners_can_be_registered();
+struct ProviderErrorWrapperTest : SingleDiskPersistenceTestUtils {
};
-CPPUNIT_TEST_SUITE_REGISTRATION(ProviderErrorWrapperTest);
-
namespace {
struct MockErrorListener : ProviderErrorListener {
@@ -70,61 +53,61 @@ struct Fixture {
void check_no_listener_invoked_for_error(MockErrorListener& listener, spi::Result::ErrorType error) {
providerWrapper.setResult(spi::Result(error, "beep boop"));
perform_spi_operation();
- CPPUNIT_ASSERT(!listener._seen_fatal_error);
- CPPUNIT_ASSERT(!listener._seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener._seen_fatal_error);
+ EXPECT_FALSE(listener._seen_resource_exhaustion_error);
}
};
}
-void ProviderErrorWrapperTest::fatal_error_invokes_listener() {
+TEST_F(ProviderErrorWrapperTest, fatal_error_invokes_listener) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.providerWrapper.setResult(spi::Result(spi::Result::FATAL_ERROR, "eject! eject!"));
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT(listener->_seen_fatal_error);
- CPPUNIT_ASSERT_EQUAL(vespalib::string("eject! eject!"), listener->_fatal_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener->_seen_fatal_error);
+ EXPECT_EQ(vespalib::string("eject! eject!"), listener->_fatal_error);
}
-void ProviderErrorWrapperTest::resource_exhaustion_error_invokes_listener() {
+TEST_F(ProviderErrorWrapperTest, resource_exhaustion_error_invokes_listener) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.providerWrapper.setResult(spi::Result(spi::Result::RESOURCE_EXHAUSTED, "out of juice"));
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
- CPPUNIT_ASSERT(listener->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT_EQUAL(vespalib::string("out of juice"), listener->_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
+ EXPECT_TRUE(listener->_seen_resource_exhaustion_error);
+ EXPECT_EQ(vespalib::string("out of juice"), listener->_resource_exhaustion_error);
}
-void ProviderErrorWrapperTest::listener_not_invoked_on_success() {
+TEST_F(ProviderErrorWrapperTest, listener_not_invoked_on_success) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
f.perform_spi_operation();
- CPPUNIT_ASSERT(!listener->_seen_fatal_error);
- CPPUNIT_ASSERT(!listener->_seen_resource_exhaustion_error);
+ EXPECT_FALSE(listener->_seen_fatal_error);
+ EXPECT_FALSE(listener->_seen_resource_exhaustion_error);
}
-void ProviderErrorWrapperTest::listener_not_invoked_on_regular_errors() {
+TEST_F(ProviderErrorWrapperTest, listener_not_invoked_on_regular_errors) {
Fixture f(getPersistenceProvider());
auto listener = std::make_shared<MockErrorListener>();
f.errorWrapper.register_error_listener(listener);
- f.check_no_listener_invoked_for_error(*listener, spi::Result::TRANSIENT_ERROR);
- f.check_no_listener_invoked_for_error(*listener, spi::Result::PERMANENT_ERROR);
+ EXPECT_NO_FATAL_FAILURE(f.check_no_listener_invoked_for_error(*listener, spi::Result::TRANSIENT_ERROR));
+ EXPECT_NO_FATAL_FAILURE(f.check_no_listener_invoked_for_error(*listener, spi::Result::PERMANENT_ERROR));
}
-void ProviderErrorWrapperTest::multiple_listeners_can_be_registered() {
+TEST_F(ProviderErrorWrapperTest, multiple_listeners_can_be_registered) {
Fixture f(getPersistenceProvider());
auto listener1 = std::make_shared<MockErrorListener>();
auto listener2 = std::make_shared<MockErrorListener>();
@@ -134,8 +117,8 @@ void ProviderErrorWrapperTest::multiple_listeners_can_be_registered() {
f.providerWrapper.setResult(spi::Result(spi::Result::RESOURCE_EXHAUSTED, "out of juice"));
f.perform_spi_operation();
- CPPUNIT_ASSERT(listener1->_seen_resource_exhaustion_error);
- CPPUNIT_ASSERT(listener2->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener1->_seen_resource_exhaustion_error);
+ EXPECT_TRUE(listener2->_seen_resource_exhaustion_error);
}
} // ns storage
diff --git a/storage/src/tests/persistence/splitbitdetectortest.cpp b/storage/src/tests/persistence/splitbitdetectortest.cpp
index 01baa8f4e98..a2d17117886 100644
--- a/storage/src/tests/persistence/splitbitdetectortest.cpp
+++ b/storage/src/tests/persistence/splitbitdetectortest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/storage/persistence/splitbitdetector.h>
#include <vespa/vespalib/io/fileutil.h>
@@ -9,67 +8,37 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/document/bucket/bucketidfactory.h>
#include <vespa/metrics/loadmetric.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <algorithm>
using storage::spi::test::makeSpiBucket;
+using namespace ::testing;
namespace storage {
namespace {
- spi::LoadType defaultLoadType(0, "default");
+spi::LoadType defaultLoadType(0, "default");
}
-struct SplitBitDetectorTest : public CppUnit::TestFixture {
- void testSingleUser();
- void testTwoUsers();
- void testMaxBits();
- void testMaxBitsOneBelowMax();
- void testUnsplittable();
- void testUnsplittableMinCount();
- void testEmpty();
- void testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc();
- void testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision();
- void findBucketCollisionIds();
-
- spi::DocEntry::UP
- generateDocEntry(uint32_t userId,
- uint32_t docNum,
- spi::Timestamp timestamp)
+struct SplitBitDetectorTest : Test {
+ document::TestDocMan testDocMan;
+ spi::dummy::DummyPersistence provider;
+ spi::Bucket bucket;
+ spi::Context context;
+
+ SplitBitDetectorTest()
+ : testDocMan(),
+ provider(testDocMan.getTypeRepoSP(), 1),
+ bucket(makeSpiBucket(document::BucketId(1, 1))),
+ context(defaultLoadType, spi::Priority(0),
+ spi::Trace::TraceLevel(0))
{
- std::ostringstream ost;
- ost << "id:storage_test:testdoctype1:n=" << userId << ":" << docNum;
- return spi::DocEntry::UP(new spi::DocEntry(
- timestamp, 0, document::DocumentId(ost.str())));
- };
-
- CPPUNIT_TEST_SUITE(SplitBitDetectorTest);
- CPPUNIT_TEST(testSingleUser);
- CPPUNIT_TEST(testTwoUsers);
- CPPUNIT_TEST(testMaxBits);
- CPPUNIT_TEST(testMaxBitsOneBelowMax);
- CPPUNIT_TEST(testUnsplittable);
- CPPUNIT_TEST(testUnsplittableMinCount);
- CPPUNIT_TEST(testEmpty);
- CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc);
- CPPUNIT_TEST(testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision);
- CPPUNIT_TEST_DISABLED(findBucketCollisionIds);
- CPPUNIT_TEST_SUITE_END();
+ provider.getPartitionStates();
+ provider.createBucket(bucket, context);
+ }
};
-CPPUNIT_TEST_SUITE_REGISTRATION(SplitBitDetectorTest);
-
-void
-SplitBitDetectorTest::testTwoUsers()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, two_users) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 5; ++i) {
document::Document::SP doc(
@@ -85,24 +54,12 @@ SplitBitDetectorTest::testTwoUsers()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testSingleUser()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, single_user) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
document::Document::SP doc(
@@ -112,25 +69,14 @@ SplitBitDetectorTest::testSingleUser()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(33: BucketId(0x8400000000000001), "
- "BucketId(0x8400000100000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(33: BucketId(0x8400000000000001), "
+ "BucketId(0x8400000100000001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testMaxBits()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+TEST_F(SplitBitDetectorTest, max_bits) {
int minContentSize = 1, maxContentSize = 1;
- provider.createBucket(bucket, context);
-
std::vector<spi::DocEntry::UP> entries;
for (uint32_t seed = 0; seed < 10; ++seed) {
int location = 1;
@@ -141,62 +87,39 @@ SplitBitDetectorTest::testMaxBits()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 3, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(3: BucketId(0x0c00000000000001), "
- "[ BucketId(0x0c00000000000005) ])"),
- result.toString());
+ EXPECT_EQ("SplitTargets(3: BucketId(0x0c00000000000001), "
+ "[ BucketId(0x0c00000000000005) ])",
+ result.toString());
}
-void
-SplitBitDetectorTest::testMaxBitsOneBelowMax()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(15, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
+TEST_F(SplitBitDetectorTest, max_bits_one_below_max) {
+ spi::Bucket my_bucket(makeSpiBucket(document::BucketId(15, 1)));
int minContentSize = 1, maxContentSize = 1;
- provider.createBucket(bucket, context);
+ provider.createBucket(my_bucket, context);
std::vector<spi::DocEntry::UP> entries;
for (uint32_t seed = 0; seed < 10; ++seed) {
int location = 1 | (seed % 2 == 0 ? 0x8000 : 0);
document::Document::SP doc(testDocMan.createRandomDocumentAtLocation(
location, seed, minContentSize, maxContentSize));
- provider.put(bucket, spi::Timestamp(1000 + seed), doc, context);
+ provider.put(my_bucket, spi::Timestamp(1000 + seed), doc, context);
}
- //std::cerr << provider.dumpBucket(bucket) << "\n";
-
SplitBitDetector::Result result(
- SplitBitDetector::detectSplit(provider, bucket, 15, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(error: No use in trying to split "
- "Bucket(0x3c00000000000001, partition 0) when max split"
- " bit is set to 15.)"),
- result.toString());
-
- result = SplitBitDetector::detectSplit(provider, bucket, 16, context);
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(16: BucketId(0x4000000000000001), "
- "BucketId(0x4000000000008001))"),
- result.toString());
+ SplitBitDetector::detectSplit(provider, my_bucket, 15, context));
+ EXPECT_EQ("SplitTargets(error: No use in trying to split "
+ "Bucket(0x3c00000000000001, partition 0) when max split"
+ " bit is set to 15.)",
+ result.toString());
+
+ result = SplitBitDetector::detectSplit(provider, my_bucket, 16, context);
+ EXPECT_EQ("SplitTargets(16: BucketId(0x4000000000000001), "
+ "BucketId(0x4000000000008001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testUnsplittable()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, unsplittable) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
@@ -207,24 +130,12 @@ SplitBitDetectorTest::testUnsplittable()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 100));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
- "BucketId(0xeb4c074f00000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testUnsplittableMinCount()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, unsplittable_min_count) {
std::vector<spi::DocEntry::UP> entries;
for (uint32_t i = 0; i < 10; ++i) {
@@ -236,66 +147,30 @@ SplitBitDetectorTest::testUnsplittableMinCount()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 5, 0));
// Still no other choice than split out to 58 bits regardless of minCount.
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(58: BucketId(0xe94c074f00000001), "
- "BucketId(0xeb4c074f00000001))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(58: BucketId(0xe94c074f00000001), "
+ "BucketId(0xeb4c074f00000001))",
+ result.toString());
}
-
-void
-SplitBitDetectorTest::testEmpty()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
-
+TEST_F(SplitBitDetectorTest, empty) {
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(source empty)"),
- result.toString());
+ EXPECT_EQ("SplitTargets(source empty)", result.toString());
}
-void
-SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
+TEST_F(SplitBitDetectorTest, zero_doc_limit_falls_back_to_one_bit_increase_with_1_doc) {
document::Document::SP doc(
testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
provider.put(bucket, spi::Timestamp(1000), doc, context);
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
-void
-SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
-{
- document::TestDocMan testDocMan;
- spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
- provider.getPartitionStates();
- spi::Bucket bucket(makeSpiBucket(document::BucketId(1, 1)));
- spi::Context context(defaultLoadType, spi::Priority(0),
- spi::Trace::TraceLevel(0));
-
- provider.createBucket(bucket, context);
+TEST_F(SplitBitDetectorTest, zero_doc_limit_falls_back_to_one_bit_increase_on_gid_collision) {
document::Document::SP doc(
testDocMan.createRandomDocumentAtLocation(1, 0, 1, 1));
provider.put(bucket, spi::Timestamp(1000), doc, context);
@@ -303,10 +178,9 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
SplitBitDetector::Result result(
SplitBitDetector::detectSplit(provider, bucket, 58, context, 0, 0));
- CPPUNIT_ASSERT_EQUAL(
- std::string("SplitTargets(2: BucketId(0x0800000000000001), "
- "BucketId(0x0800000000000003))"),
- result.toString());
+ EXPECT_EQ("SplitTargets(2: BucketId(0x0800000000000001), "
+ "BucketId(0x0800000000000003))",
+ result.toString());
}
/**
@@ -314,9 +188,7 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
* document IDs that map to the same 58-bit bucket ID. Disabled by default since
* it costs CPU to do this and is not necessary during normal testing.
*/
-void
-SplitBitDetectorTest::findBucketCollisionIds()
-{
+TEST_F(SplitBitDetectorTest, DISABLED_find_bucket_collision_ids) {
using document::DocumentId;
using document::BucketId;
diff --git a/storage/src/tests/persistence/testandsettest.cpp b/storage/src/tests/persistence/testandsettest.cpp
index 197aa95fc22..4c4a7c9a0be 100644
--- a/storage/src/tests/persistence/testandsettest.cpp
+++ b/storage/src/tests/persistence/testandsettest.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// @author Vegard Sjonfjell
-#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,14 +13,13 @@
using std::unique_ptr;
using std::shared_ptr;
-using namespace std::string_literals;
using storage::spi::test::makeSpiBucket;
using document::test::makeDocumentBucket;
+using namespace ::testing;
namespace storage {
-class TestAndSetTest : public SingleDiskPersistenceTestUtils
-{
+struct TestAndSetTest : SingleDiskPersistenceTestUtils {
static constexpr int MIN_DOCUMENT_SIZE = 0;
static constexpr int MAX_DOCUMENT_SIZE = 128;
static constexpr int RANDOM_SEED = 1234;
@@ -36,9 +34,8 @@ class TestAndSetTest : public SingleDiskPersistenceTestUtils
shared_ptr<document::Document> testDoc;
document::DocumentId testDocId;
-public:
- void setUp() override {
- SingleDiskPersistenceTestUtils::setUp();
+ void SetUp() override {
+ SingleDiskPersistenceTestUtils::SetUp();
spi::Context context(
spi::LoadType(0, "default"),
@@ -55,38 +52,11 @@ public:
testDocId = testDoc->getId();
}
- void tearDown() override {
+ void TearDown() override {
thread.reset(nullptr);
- SingleDiskPersistenceTestUtils::tearDown();
+ SingleDiskPersistenceTestUtils::TearDown();
}
- void conditional_put_not_executed_on_condition_mismatch();
- void conditional_put_executed_on_condition_match();
- void conditional_remove_not_executed_on_condition_mismatch();
- void conditional_remove_executed_on_condition_match();
- void conditional_update_not_executed_on_condition_mismatch();
- void conditional_update_executed_on_condition_match();
- void conditional_update_not_executed_when_no_document_and_no_auto_create();
- void conditional_update_executed_when_no_document_but_auto_create_is_enabled();
- void invalid_document_selection_should_fail();
- void conditional_put_to_non_existing_document_should_fail();
- void document_with_no_type_should_fail();
-
- CPPUNIT_TEST_SUITE(TestAndSetTest);
- CPPUNIT_TEST(conditional_put_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_put_executed_on_condition_match);
- CPPUNIT_TEST(conditional_remove_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_remove_executed_on_condition_match);
- CPPUNIT_TEST(conditional_update_not_executed_on_condition_mismatch);
- CPPUNIT_TEST(conditional_update_executed_on_condition_match);
- CPPUNIT_TEST(conditional_update_not_executed_when_no_document_and_no_auto_create);
- CPPUNIT_TEST(conditional_update_executed_when_no_document_but_auto_create_is_enabled);
- CPPUNIT_TEST(invalid_document_selection_should_fail);
- CPPUNIT_TEST(conditional_put_to_non_existing_document_should_fail);
- CPPUNIT_TEST(document_with_no_type_should_fail);
- CPPUNIT_TEST_SUITE_END();
-
-protected:
std::unique_ptr<api::UpdateCommand> conditional_update_test(
bool createIfMissing,
api::Timestamp updateTimestamp);
@@ -103,32 +73,29 @@ protected:
spi::DocumentMetaFlags removeFlag = spi::NONE);
};
-CPPUNIT_TEST_SUITE_REGISTRATION(TestAndSetTest);
-
-void TestAndSetTest::conditional_put_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_put_not_executed_on_condition_mismatch) {
// Put document with mismatching header
api::Timestamp timestampOne = 0;
putTestDocument(false, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally replace document, but fail due to lack of woofy dog
api::Timestamp timestampTwo = 1;
api::PutCommand putTwo(makeDocumentBucket(BUCKET_ID), testDoc, timestampTwo);
setTestCondition(putTwo);
- CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(putTwo)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_put_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_put_executed_on_condition_match) {
// Put document with matching header
api::Timestamp timestampOne = 0;
putTestDocument(true, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Update content of document
testDoc->setValue(testDoc->getField("content"), NEW_CONTENT);
@@ -138,51 +105,50 @@ void TestAndSetTest::conditional_put_executed_on_condition_match()
api::PutCommand putTwo(makeDocumentBucket(BUCKET_ID), testDoc, timestampTwo);
setTestCondition(putTwo);
- CPPUNIT_ASSERT(thread->handlePut(putTwo)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(putTwo)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::conditional_remove_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_remove_not_executed_on_condition_mismatch) {
// Put document with mismatching header
api::Timestamp timestampOne = 0;
putTestDocument(false, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally remove document, fail in doing so
api::Timestamp timestampTwo = 1;
api::RemoveCommand remove(makeDocumentBucket(BUCKET_ID), testDocId, timestampTwo);
setTestCondition(remove);
- CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleRemove(remove)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Assert that the document is still there
retrieveTestDocument();
}
-void TestAndSetTest::conditional_remove_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_remove_executed_on_condition_match) {
// Put document with matching header
api::Timestamp timestampOne = 0;
putTestDocument(true, timestampOne);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
// Conditionally remove document, succeed in doing so
api::Timestamp timestampTwo = 1;
api::RemoveCommand remove(makeDocumentBucket(BUCKET_ID), testDocId, timestampTwo);
setTestCondition(remove);
- CPPUNIT_ASSERT(thread->handleRemove(remove)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId, spi::REMOVE_ENTRY),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleRemove(remove)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId, spi::REMOVE_ENTRY),
+ dumpBucket(BUCKET_ID));
}
std::unique_ptr<api::UpdateCommand> TestAndSetTest::conditional_update_test(
@@ -200,66 +166,63 @@ std::unique_ptr<api::UpdateCommand> TestAndSetTest::conditional_update_test(
return updateUp;
}
-void TestAndSetTest::conditional_update_not_executed_on_condition_mismatch()
-{
+TEST_F(TestAndSetTest, conditional_update_not_executed_on_condition_mismatch) {
api::Timestamp timestampOne = 0;
api::Timestamp timestampTwo = 1;
putTestDocument(false, timestampOne);
auto updateUp = conditional_update_test(false, timestampTwo);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId), dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(OLD_CONTENT);
}
-void TestAndSetTest::conditional_update_executed_on_condition_match()
-{
+TEST_F(TestAndSetTest, conditional_update_executed_on_condition_match) {
api::Timestamp timestampOne = 0;
api::Timestamp timestampTwo = 1;
putTestDocument(true, timestampOne);
auto updateUp = conditional_update_test(false, timestampTwo);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(timestampOne, testDocId) +
- expectedDocEntryString(timestampTwo, testDocId),
- dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(timestampOne, testDocId) +
+ expectedDocEntryString(timestampTwo, testDocId),
+ dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::conditional_update_not_executed_when_no_document_and_no_auto_create() {
+TEST_F(TestAndSetTest, conditional_update_not_executed_when_no_document_and_no_auto_create) {
api::Timestamp updateTimestamp = 200;
auto updateUp = conditional_update_test(false, updateTimestamp);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_update_executed_when_no_document_but_auto_create_is_enabled() {
+TEST_F(TestAndSetTest, conditional_update_executed_when_no_document_but_auto_create_is_enabled) {
api::Timestamp updateTimestamp = 200;
auto updateUp = conditional_update_test(true, updateTimestamp);
- CPPUNIT_ASSERT(thread->handleUpdate(*updateUp)->getResult() == api::ReturnCode::Result::OK);
- CPPUNIT_ASSERT_EQUAL(expectedDocEntryString(updateTimestamp, testDocId), dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handleUpdate(*updateUp)->getResult().getResult(), api::ReturnCode::Result::OK);
+ EXPECT_EQ(expectedDocEntryString(updateTimestamp, testDocId), dumpBucket(BUCKET_ID));
assertTestDocumentFoundAndMatchesContent(NEW_CONTENT);
}
-void TestAndSetTest::invalid_document_selection_should_fail()
-{
+TEST_F(TestAndSetTest, invalid_document_selection_should_fail) {
// Conditionally replace nonexisting document
// Fail early since document selection is invalid
api::Timestamp timestamp = 0;
api::PutCommand put(makeDocumentBucket(BUCKET_ID), testDoc, timestamp);
put.setCondition(documentapi::TestAndSetCondition("bjarne"));
- CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(put)->getResult().getResult(), api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::conditional_put_to_non_existing_document_should_fail()
-{
+TEST_F(TestAndSetTest, conditional_put_to_non_existing_document_should_fail) {
// Conditionally replace nonexisting document
// Fail since no document exists to match with test and set
api::Timestamp timestamp = 0;
@@ -267,12 +230,12 @@ void TestAndSetTest::conditional_put_to_non_existing_document_should_fail()
setTestCondition(put);
thread->handlePut(put);
- CPPUNIT_ASSERT(thread->handlePut(put)->getResult() == api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ ASSERT_EQ(thread->handlePut(put)->getResult().getResult(),
+ api::ReturnCode::Result::TEST_AND_SET_CONDITION_FAILED);
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
-void TestAndSetTest::document_with_no_type_should_fail()
-{
+TEST_F(TestAndSetTest, document_with_no_type_should_fail) {
// Conditionally replace nonexisting document
// Fail since no document exists to match with test and set
api::Timestamp timestamp = 0;
@@ -281,9 +244,9 @@ void TestAndSetTest::document_with_no_type_should_fail()
setTestCondition(remove);
auto code = thread->handleRemove(remove)->getResult();
- CPPUNIT_ASSERT(code == api::ReturnCode::Result::ILLEGAL_PARAMETERS);
- CPPUNIT_ASSERT(code.getMessage() == "Document id has no doctype");
- CPPUNIT_ASSERT_EQUAL(""s, dumpBucket(BUCKET_ID));
+ EXPECT_EQ(code.getResult(), api::ReturnCode::Result::ILLEGAL_PARAMETERS);
+ EXPECT_EQ(code.getMessage(), "Document id has no doctype");
+ EXPECT_EQ("", dumpBucket(BUCKET_ID));
}
document::Document::SP
@@ -306,10 +269,10 @@ document::Document::SP TestAndSetTest::retrieveTestDocument()
{
api::GetCommand get(makeDocumentBucket(BUCKET_ID), testDocId, "[all]");
auto tracker = thread->handleGet(get);
- CPPUNIT_ASSERT(tracker->getResult() == api::ReturnCode::Result::OK);
+ assert(tracker->getResult() == api::ReturnCode::Result::OK);
auto & reply = static_cast<api::GetReply &>(*tracker->getReply());
- CPPUNIT_ASSERT(reply.wasFound());
+ assert(reply.wasFound());
return reply.getDocument();
}
@@ -333,7 +296,7 @@ void TestAndSetTest::assertTestDocumentFoundAndMatchesContent(const document::Fi
auto doc = retrieveTestDocument();
auto & field = doc->getField("content");
- CPPUNIT_ASSERT_EQUAL(*doc->getValue(field), value);
+ EXPECT_EQ(*doc->getValue(field), value);
}
std::string TestAndSetTest::expectedDocEntryString(
diff --git a/storage/src/tests/storageserver/bucketintegritycheckertest.cpp b/storage/src/tests/storageserver/bucketintegritycheckertest.cpp
index 4ff980b7950..a4a6cbae9cf 100644
--- a/storage/src/tests/storageserver/bucketintegritycheckertest.cpp
+++ b/storage/src/tests/storageserver/bucketintegritycheckertest.cpp
@@ -8,7 +8,7 @@
#include <vespa/storage/storageserver/bucketintegritychecker.h>
#include <vespa/storageapi/message/persistence.h>
#include <tests/common/testhelper.h>
-#include <tests/common/storagelinktest.h>
+#include <tests/common/dummystoragelink.h>
#include <vespa/vespalib/io/fileutil.h>
#include <tests/common/teststorageapp.h>
@@ -218,13 +218,13 @@ void BucketIntegrityCheckerTest::testBasicFunctionality()
// Answering a message on disk with no more buckets does not trigger new
std::shared_ptr<RepairBucketReply> reply1(
new RepairBucketReply(*cmd3));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply1));
+ CPPUNIT_ASSERT(checker.onUp(reply1));
FastOS_Thread::Sleep(10); // Give next message chance to come
ASSERT_COMMAND_COUNT(4, *dummyLink);
// Answering a message on disk with more buckets trigger new repair
std::shared_ptr<RepairBucketReply> reply2(
new RepairBucketReply(*cmd2));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply2));
+ CPPUNIT_ASSERT(checker.onUp(reply2));
dummyLink->waitForMessages(5, _timeout);
FastOS_Thread::Sleep(10); // Give 6th message chance to come
ASSERT_COMMAND_COUNT(5, *dummyLink);
@@ -238,7 +238,7 @@ void BucketIntegrityCheckerTest::testBasicFunctionality()
std::shared_ptr<RepairBucketReply> reply3(
new RepairBucketReply(*cmd1));
reply3->setResult(api::ReturnCode(api::ReturnCode::IGNORED));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply3));
+ CPPUNIT_ASSERT(checker.onUp(reply3));
dummyLink->waitForMessages(6, _timeout);
FastOS_Thread::Sleep(10); // Give 7th message chance to come
ASSERT_COMMAND_COUNT(6, *dummyLink);
@@ -252,7 +252,7 @@ void BucketIntegrityCheckerTest::testBasicFunctionality()
std::shared_ptr<RepairBucketReply> reply4(
new RepairBucketReply(*cmd4));
reply3->setResult(api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply4));
+ CPPUNIT_ASSERT(checker.onUp(reply4));
FastOS_Thread::Sleep(10); // Give 7th message chance to come
ASSERT_COMMAND_COUNT(6, *dummyLink);
@@ -261,13 +261,13 @@ void BucketIntegrityCheckerTest::testBasicFunctionality()
std::shared_ptr<RepairBucketReply> reply5(
new RepairBucketReply(*cmd5, newInfo));
reply5->setAltered(true);
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply5));
+ CPPUNIT_ASSERT(checker.onUp(reply5));
// Finish run. New iteration should not start yet as min
// cycle time has not passed
std::shared_ptr<RepairBucketReply> reply6(
new RepairBucketReply(*cmd6));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply6));
+ CPPUNIT_ASSERT(checker.onUp(reply6));
dummyLink->waitForMessages(7, _timeout);
ASSERT_COMMAND_COUNT(7, *dummyLink);
RepairBucketCommand *cmd7 = dynamic_cast<RepairBucketCommand*>(
@@ -277,7 +277,7 @@ void BucketIntegrityCheckerTest::testBasicFunctionality()
cmd7->getBucketId());
std::shared_ptr<RepairBucketReply> reply7(
new RepairBucketReply(*cmd7));
- CPPUNIT_ASSERT(StorageLinkTest::callOnUp(checker, reply7));
+ CPPUNIT_ASSERT(checker.onUp(reply7));
FastOS_Thread::Sleep(10); // Give 8th message chance to come
ASSERT_COMMAND_COUNT(7, *dummyLink);
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index 3d469fc4252..5815bbd67c8 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -3,7 +3,7 @@
#include <vespa/vespalib/util/document_runnable.h>
#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
#include <tests/common/testhelper.h>
-#include <tests/common/storagelinktest.h>
+#include <tests/common/dummystoragelink.h>
#include <tests/common/teststorageapp.h>
#include <tests/common/dummystoragelink.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -595,7 +595,7 @@ MergeThrottlerTest::test42DistributorBehaviorDoesNotTakeOwnership()
// Flush throttler (synchronously). Should NOT generate a reply
// for the merge command, as it is not owned by the throttler
- StorageLinkTest::callOnFlush(*_throttlers[1], true);
+ _throttlers[1]->onFlush(true);
CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[1]->getNumCommands());
CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[1]->getNumReplies());
@@ -653,7 +653,7 @@ MergeThrottlerTest::testEndOfChainExecutionDoesNotTakeOwnership()
// Flush throttler (synchronously). Should NOT generate a reply
// for the merge command, as it is not owned by the throttler
- StorageLinkTest::callOnFlush(*_throttlers[2], true);
+ _throttlers[2]->onFlush(true);
CPPUNIT_ASSERT_EQUAL(std::size_t(0), _bottomLinks[2]->getNumCommands());
CPPUNIT_ASSERT_EQUAL(std::size_t(0), _topLinks[2]->getNumReplies());
diff --git a/storage/src/vespa/storage/bucketdb/lockablemap.hpp b/storage/src/vespa/storage/bucketdb/lockablemap.hpp
index 440a76b92fd..3cef17c9025 100644
--- a/storage/src/vespa/storage/bucketdb/lockablemap.hpp
+++ b/storage/src/vespa/storage/bucketdb/lockablemap.hpp
@@ -8,6 +8,7 @@
#include <vespa/vespalib/stllike/hash_set.hpp>
#include <thread>
#include <chrono>
+#include <ostream>
namespace storage {
diff --git a/storage/src/vespa/storage/bucketdb/stdmapwrapper.h b/storage/src/vespa/storage/bucketdb/stdmapwrapper.h
index 8cd2d6a7578..889227f1747 100644
--- a/storage/src/vespa/storage/bucketdb/stdmapwrapper.h
+++ b/storage/src/vespa/storage/bucketdb/stdmapwrapper.h
@@ -12,6 +12,7 @@
#include <map>
#include <vespa/vespalib/util/printable.h>
+#include <ostream>
namespace storage {
diff --git a/storage/src/vespa/storage/common/storagelinkqueued.cpp b/storage/src/vespa/storage/common/storagelinkqueued.cpp
index 5b3aacd11de..036a1269958 100644
--- a/storage/src/vespa/storage/common/storagelinkqueued.cpp
+++ b/storage/src/vespa/storage/common/storagelinkqueued.cpp
@@ -65,7 +65,7 @@ void StorageLinkQueued::logError(const char* err) {
};
void StorageLinkQueued::logDebug(const char* err) {
- LOG(info, "%s", err);
+ LOG(debug, "%s", err);
};
template class StorageLinkQueued::Dispatcher<storage::api::StorageMessage>;
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index 799b3641c64..e143f4d8570 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "simplemaintenancescanner.h"
#include <vespa/storage/distributor/distributor_bucket_space.h>
+#include <ostream>
namespace storage::distributor {
diff --git a/storage/src/vespa/storage/distributor/messagetracker.h b/storage/src/vespa/storage/distributor/messagetracker.h
index 626335e1ba6..75ae287d98f 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.h
+++ b/storage/src/vespa/storage/distributor/messagetracker.h
@@ -4,6 +4,7 @@
#include <vespa/storage/common/messagesender.h>
#include <vector>
#include <map>
+#include <string>
namespace storage::api {
class BucketCommand;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index 784ea7253b6..66ce4fc0485 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -3,6 +3,7 @@
#include <vespa/storage/distributor/idealstatemanager.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <array>
#include <vespa/log/bufferedlogger.h>
LOG_SETUP(".distributor.operation.idealstate.merge");
diff --git a/storage/src/vespa/storage/persistence/diskthread.h b/storage/src/vespa/storage/persistence/diskthread.h
index 0489ad3144a..926b766aca0 100644
--- a/storage/src/vespa/storage/persistence/diskthread.h
+++ b/storage/src/vespa/storage/persistence/diskthread.h
@@ -16,6 +16,7 @@
#include <vespa/vespalib/util/document_runnable.h>
#include <vespa/config-stor-filestor.h>
#include <vespa/storageframework/generic/thread/runnable.h>
+#include <ostream>
namespace storage {
diff --git a/storage/src/vespa/storage/persistence/filestorage/mergestatus.cpp b/storage/src/vespa/storage/persistence/filestorage/mergestatus.cpp
index 86c70a4a287..24f4c9cd731 100644
--- a/storage/src/vespa/storage/persistence/filestorage/mergestatus.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/mergestatus.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "mergestatus.h"
+#include <ostream>
#include <vespa/log/log.h>
LOG_SETUP(".mergestatus");
diff --git a/storage/src/vespa/storage/persistence/messages.cpp b/storage/src/vespa/storage/persistence/messages.cpp
index f38d3d0fac3..61a10f71868 100644
--- a/storage/src/vespa/storage/persistence/messages.cpp
+++ b/storage/src/vespa/storage/persistence/messages.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "messages.h"
+#include <ostream>
using document::BucketSpace;
diff --git a/storage/src/vespa/storage/tools/generatedistributionbits.cpp b/storage/src/vespa/storage/tools/generatedistributionbits.cpp
index 73f11b39e67..98c5e56b90c 100644
--- a/storage/src/vespa/storage/tools/generatedistributionbits.cpp
+++ b/storage/src/vespa/storage/tools/generatedistributionbits.cpp
@@ -9,6 +9,7 @@
#include <iomanip>
#include <iostream>
#include <algorithm>
+#include <sstream>
#include <vespa/config-stor-distribution.h>
namespace storage {
diff --git a/storage/src/vespa/storage/visiting/commandqueue.h b/storage/src/vespa/storage/visiting/commandqueue.h
index 8cc565884ec..d129506eb64 100644
--- a/storage/src/vespa/storage/visiting/commandqueue.h
+++ b/storage/src/vespa/storage/visiting/commandqueue.h
@@ -19,6 +19,7 @@
#include <vespa/fastos/timestamp.h>
#include <vespa/storageframework/generic/clock/clock.h>
#include <list>
+#include <ostream>
namespace storage {
diff --git a/storage/src/vespa/storage/visiting/visitor.h b/storage/src/vespa/storage/visiting/visitor.h
index c8d34139364..f53ca5a60a0 100644
--- a/storage/src/vespa/storage/visiting/visitor.h
+++ b/storage/src/vespa/storage/visiting/visitor.h
@@ -24,6 +24,7 @@
#include <vespa/persistence/spi/read_consistency.h>
#include <list>
#include <deque>
+#include <ostream>
namespace document {
class Document;
diff --git a/storageapi/src/vespa/storageapi/message/datagram.cpp b/storageapi/src/vespa/storageapi/message/datagram.cpp
index 66b753185a4..3376761ee41 100644
--- a/storageapi/src/vespa/storageapi/message/datagram.cpp
+++ b/storageapi/src/vespa/storageapi/message/datagram.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "datagram.h"
+#include <ostream>
using document::BucketSpace;
diff --git a/storageapi/src/vespa/storageapi/message/documentsummary.cpp b/storageapi/src/vespa/storageapi/message/documentsummary.cpp
index e1bf84a8e7c..1d81c6a4c16 100644
--- a/storageapi/src/vespa/storageapi/message/documentsummary.cpp
+++ b/storageapi/src/vespa/storageapi/message/documentsummary.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "documentsummary.h"
+#include <ostream>
namespace storage {
namespace api {
diff --git a/storageapi/src/vespa/storageapi/message/queryresult.cpp b/storageapi/src/vespa/storageapi/message/queryresult.cpp
index 67b39d19f6d..bf81083b954 100644
--- a/storageapi/src/vespa/storageapi/message/queryresult.cpp
+++ b/storageapi/src/vespa/storageapi/message/queryresult.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "queryresult.h"
+#include <ostream>
namespace storage {
namespace api {
diff --git a/storageapi/src/vespa/storageapi/message/searchresult.cpp b/storageapi/src/vespa/storageapi/message/searchresult.cpp
index caa698fa67b..4299109e6b5 100644
--- a/storageapi/src/vespa/storageapi/message/searchresult.cpp
+++ b/storageapi/src/vespa/storageapi/message/searchresult.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "searchresult.h"
+#include <ostream>
using vdslib::SearchResult;
diff --git a/storageapi/src/vespa/storageapi/message/visitor.cpp b/storageapi/src/vespa/storageapi/message/visitor.cpp
index f5850de98ad..8adec61a34e 100644
--- a/storageapi/src/vespa/storageapi/message/visitor.cpp
+++ b/storageapi/src/vespa/storageapi/message/visitor.cpp
@@ -3,6 +3,7 @@
#include "visitor.h"
#include <vespa/vespalib/util/array.hpp>
#include <climits>
+#include <ostream>
namespace storage::api {
diff --git a/storageframework/src/vespa/storageframework/generic/clock/time.cpp b/storageframework/src/vespa/storageframework/generic/clock/time.cpp
index 0e8178b03bb..7ead5e50077 100644
--- a/storageframework/src/vespa/storageframework/generic/clock/time.cpp
+++ b/storageframework/src/vespa/storageframework/generic/clock/time.cpp
@@ -5,6 +5,7 @@
#include <iomanip>
#include <vector>
#include <cassert>
+#include <sstream>
namespace storage {
namespace framework {
diff --git a/streamingvisitors/src/vespa/searchvisitor/queryenvironment.h b/streamingvisitors/src/vespa/searchvisitor/queryenvironment.h
index b9391ac838c..db0f95cb6bb 100644
--- a/streamingvisitors/src/vespa/searchvisitor/queryenvironment.h
+++ b/streamingvisitors/src/vespa/searchvisitor/queryenvironment.h
@@ -54,6 +54,8 @@ public:
// inherit documentation
virtual const search::attribute::IAttributeContext & getAttributeContext() const override { return *_attrCtx; }
+ double get_average_field_length(const vespalib::string &) const override { return 1.0; }
+
// inherit documentation
virtual const search::fef::IIndexEnvironment & getIndexEnvironment() const override { return _indexEnv; }
diff --git a/tenant-auth/OWNERS b/tenant-auth/OWNERS
new file mode 100644
index 00000000000..d0a102ecbf4
--- /dev/null
+++ b/tenant-auth/OWNERS
@@ -0,0 +1 @@
+jonmv
diff --git a/tenant-auth/README.md b/tenant-auth/README.md
new file mode 100644
index 00000000000..0514b68400e
--- /dev/null
+++ b/tenant-auth/README.md
@@ -0,0 +1 @@
+# Utilities that authenticate users to the hosted Vespa API, or to hosted Vespa applications.
diff --git a/tenant-auth/pom.xml b/tenant-auth/pom.xml
new file mode 100644
index 00000000000..be8b42dd6c2
--- /dev/null
+++ b/tenant-auth/pom.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>7-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <artifactId>tenant-auth</artifactId>
+ <description>Provides resources for authenticating with the hosted Vespa API or application containers</description>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>hosted-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>config-provisioning</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>security-utils</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+</project>
diff --git a/tenant-auth/src/main/java/ai/vespa/hosted/auth/ApiAuthenticator.java b/tenant-auth/src/main/java/ai/vespa/hosted/auth/ApiAuthenticator.java
new file mode 100644
index 00000000000..2b5bbb188dc
--- /dev/null
+++ b/tenant-auth/src/main/java/ai/vespa/hosted/auth/ApiAuthenticator.java
@@ -0,0 +1,16 @@
+package ai.vespa.hosted.auth;
+
+import ai.vespa.hosted.api.ControllerHttpClient;
+import ai.vespa.hosted.api.Properties;
+
+public class ApiAuthenticator implements ai.vespa.hosted.api.ApiAuthenticator {
+
+ /** Returns an authenticating controller client, using private key signatures for authentication. */
+ @Override
+ public ControllerHttpClient controller() {
+ return ControllerHttpClient.withSignatureKey(Properties.endpoint(),
+ Properties.privateKeyFile(),
+ Properties.application());
+ }
+
+}
diff --git a/tenant-auth/src/main/java/ai/vespa/hosted/auth/EndpointAuthenticator.java b/tenant-auth/src/main/java/ai/vespa/hosted/auth/EndpointAuthenticator.java
new file mode 100644
index 00000000000..abb4197bda1
--- /dev/null
+++ b/tenant-auth/src/main/java/ai/vespa/hosted/auth/EndpointAuthenticator.java
@@ -0,0 +1,68 @@
+package ai.vespa.hosted.auth;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.security.KeyUtils;
+import com.yahoo.security.SslContextBuilder;
+import com.yahoo.security.X509CertificateUtils;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.net.http.HttpRequest;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.NoSuchAlgorithmException;
+import java.security.PrivateKey;
+import java.security.cert.X509Certificate;
+import java.time.Instant;
+import java.util.Optional;
+
+import static ai.vespa.hosted.api.Properties.getNonBlankProperty;
+
+/**
+ * Authenticates against the hosted Vespa API using private key signatures, and against Vespa applications using mutual TLS.
+ *
+ * @author jonmv
+ */
+public class EndpointAuthenticator implements ai.vespa.hosted.api.EndpointAuthenticator {
+
+ /** Don't touch. */
+ public EndpointAuthenticator(@SuppressWarnings("unused") SystemName __) { }
+
+ /**
+ * If {@code System.getProperty("vespa.test.credentials.root")} is set, key and certificate files
+ * "key" and "cert" in that directory are used; otherwise, the system default SSLContext is returned.
+ */
+ @Override
+ public SSLContext sslContext() {
+ try {
+ Optional<String> credentialsRootProperty = getNonBlankProperty("vespa.test.credentials.root");
+ if (credentialsRootProperty.isEmpty())
+ return SSLContext.getDefault();
+
+ Path credentialsRoot = Path.of(credentialsRootProperty.get());
+ Path certificateFile = credentialsRoot.resolve("cert");
+ Path privateKeyFile = credentialsRoot.resolve("key");
+
+ X509Certificate certificate = X509CertificateUtils.fromPem(new String(Files.readAllBytes(certificateFile)));
+ if ( Instant.now().isBefore(certificate.getNotBefore().toInstant())
+ || Instant.now().isAfter(certificate.getNotAfter().toInstant()))
+ throw new IllegalStateException("Certificate at '" + certificateFile + "' is valid between " +
+ certificate.getNotBefore() + " and " + certificate.getNotAfter() + " — not now.");
+
+ PrivateKey privateKey = KeyUtils.fromPemEncodedPrivateKey(new String(Files.readAllBytes(privateKeyFile)));
+ return new SslContextBuilder().withKeyStore(privateKey, certificate).build();
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ catch (NoSuchAlgorithmException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ @Override
+ public HttpRequest.Builder authenticated(HttpRequest.Builder request) {
+ return request;
+ }
+
+}
diff --git a/tenant-auth/src/test/java/ai/vespa/hosted/auth/AuthenticatorTest.java b/tenant-auth/src/test/java/ai/vespa/hosted/auth/AuthenticatorTest.java
new file mode 100644
index 00000000000..ff4bebce3ff
--- /dev/null
+++ b/tenant-auth/src/test/java/ai/vespa/hosted/auth/AuthenticatorTest.java
@@ -0,0 +1,5 @@
+package ai.vespa.hosted.auth;
+
+public class AuthenticatorTest {
+
+}
diff --git a/tenant-base/pom.xml b/tenant-base/pom.xml
index 8d5fb626789..3c48d22085e 100644
--- a/tenant-base/pom.xml
+++ b/tenant-base/pom.xml
@@ -37,11 +37,11 @@
<target_jdk_version>11</target_jdk_version>
<compiler_plugin_version>3.8.0</compiler_plugin_version>
<surefire_version>2.22.0</surefire_version> <!-- NOTE bjorncs 15.06.2017: Version 2.20 has OoM issues -->
+ <endpoint>https://api.vespa-external.aws.oath.cloud:4443</endpoint>
</properties>
<dependencyManagement>
<dependencies>
-
<dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>container-dependency-versions</artifactId>
@@ -49,7 +49,6 @@
<type>pom</type>
<scope>import</scope>
</dependency>
-
</dependencies>
</dependencyManagement>
@@ -224,21 +223,9 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire_version}</version>
- <configuration>
- <groups>com.yahoo.vespa.tenant.cd.SystemTest</groups>
- <excludedGroups />
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
- <redirectTestOutputToFile>false</redirectTestOutputToFile>
- <trimStackTrace>false</trimStackTrace>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-report-plugin</artifactId>
- <version>${surefire_version}</version>
<configuration>
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
+ <groups>ai.vespa.hosted.cd.SystemTest</groups>
+ <excludedGroups>ai.vespa.hosted.cd.EmptyGroup</excludedGroups>
</configuration>
</plugin>
</plugins>
@@ -252,21 +239,9 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire_version}</version>
- <configuration>
- <groups>com.yahoo.vespa.tenant.cd.StagingTest</groups>
- <excludedGroups />
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
- <redirectTestOutputToFile>false</redirectTestOutputToFile>
- <trimStackTrace>false</trimStackTrace>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-report-plugin</artifactId>
- <version>${surefire_version}</version>
<configuration>
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
+ <groups>ai.vespa.hosted.cd.StagingTest</groups>
+ <excludedGroups>ai.vespa.hosted.cd.EmptyGroup</excludedGroups>
</configuration>
</plugin>
</plugins>
@@ -280,21 +255,9 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire_version}</version>
<configuration>
- <groups>com.yahoo.vespa.tenant.cd.ProductionTest</groups>
- <excludedGroups />
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
- <redirectTestOutputToFile>false</redirectTestOutputToFile>
- <trimStackTrace>false</trimStackTrace>
- </configuration>
- </plugin>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-report-plugin</artifactId>
- <version>${surefire_version}</version>
- <configuration>
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
+ <groups>ai.vespa.hosted.cd.ProductionTest</groups>
+ <excludedGroups>ai.vespa.hosted.cd.EmptyGroup</excludedGroups>
</configuration>
</plugin>
</plugins>
@@ -304,8 +267,39 @@
<build>
<finalName>${project.artifactId}</finalName>
- <plugins>
+ <pluginManagement>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <version>${surefire_version}</version>
+ <configuration>
+ <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
+ <redirectTestOutputToFile>false</redirectTestOutputToFile>
+ <trimStackTrace>false</trimStackTrace>
+ <systemPropertyVariables>
+ <application>${application}</application>
+ <tenant>${tenant}</tenant>
+ <instance>${instance}</instance>
+ <endpoint>${endpoint}</endpoint>
+ <privateKeyFile>${privateKeyFile}</privateKeyFile>
+ <certificateFile>${certificateFile}</certificateFile>
+ </systemPropertyVariables>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-surefire-report-plugin</artifactId>
+ <version>${surefire_version}</version>
+ <configuration>
+ <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
+ </configuration>
+ </plugin>
+ </plugins>
+ </pluginManagement>
+
+ <plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
@@ -348,6 +342,12 @@
<plugin>
<groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespa-maven-plugin</artifactId>
+ <version>${vespaversion}</version>
+ </plugin>
+
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
<artifactId>vespa-application-maven-plugin</artifactId>
<version>${vespaversion}</version>
<executions>
@@ -369,25 +369,14 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
- <version>${surefire_version}</version>
<configuration>
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
<excludedGroups>
- com.yahoo.vespa.tenant.cd.SystemTest,
- com.yahoo.vespa.tenant.cd.StagingTest,
- com.yahoo.vespa.tenant.cd.ProductionTest
+ ai.vespa.hosted.cd.SystemTest,
+ ai.vespa.hosted.cd.StagingTest,
+ ai.vespa.hosted.cd.ProductionTest
</excludedGroups>
</configuration>
</plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-report-plugin</artifactId>
- <version>${surefire_version}</version>
- <configuration>
- <reportsDirectory>${env.TEST_DIR}</reportsDirectory>
- </configuration>
- </plugin>
</plugins>
</build>
</project>
diff --git a/tenant-cd/pom.xml b/tenant-cd/pom.xml
index 8907e56762c..18c4084a173 100644
--- a/tenant-cd/pom.xml
+++ b/tenant-cd/pom.xml
@@ -5,6 +5,7 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
+ <groupId>com.yahoo.vespa</groupId>
<artifactId>tenant-cd</artifactId>
<name>Hosted Vespa tenant CD</name>
<description>Test library for hosted Vespa applications.</description>
@@ -20,6 +21,36 @@
<dependencies>
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>security-utils</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespajlib</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>config-provisioning</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>tenant-auth</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>hosted-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+
+ <dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Deployment.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Deployment.java
new file mode 100644
index 00000000000..e0abdde4687
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Deployment.java
@@ -0,0 +1,19 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * A deployment of a Vespa application, which contains endpoints for document and metrics retrieval.
+ *
+ * @author jonmv
+ */
+public interface Deployment {
+
+ /** Returns an Endpoint in the cluster with the "default" id. */
+ Endpoint endpoint();
+
+ /** Returns an Endpoint in the cluster with the given id. */
+ Endpoint endpoint(String id);
+
+ /** Returns a {@link TestDeployment} view of this, or throws if this is a production deployment. */
+ TestDeployment asTestDeployment();
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Digest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Digest.java
new file mode 100644
index 00000000000..dee13fdca13
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Digest.java
@@ -0,0 +1,28 @@
+package ai.vespa.hosted.cd;
+
+import java.util.Set;
+
+/**
+ * An immutable report of the outcome of a {@link Feed} sent to a {@link TestEndpoint}.
+ *
+ * @author jonmv
+ */
+public class Digest {
+
+ public Set<DocumentId> created() {
+ return null;
+ }
+
+ public Set<DocumentId> updated() {
+ return null;
+ }
+
+ public Set<DocumentId> deleted() {
+ return null;
+ }
+
+ public Set<DocumentId> failed() {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Document.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Document.java
new file mode 100644
index 00000000000..91adeded65c
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Document.java
@@ -0,0 +1,16 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * A schema-less representation of a generic Vespa document.
+ *
+ * @author jonmv
+ */
+public class Document {
+
+
+ /** Returns a copy of this document, updated with the data in the given document. */
+ public Document updatedBy(Document update) {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/DocumentId.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/DocumentId.java
new file mode 100644
index 00000000000..9aa8e80c977
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/DocumentId.java
@@ -0,0 +1,71 @@
+package ai.vespa.hosted.cd;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Unique, immutable ID of a Vespa document, which contains information pertinent to its storage.
+ *
+ * @author jonmv
+ */
+public class DocumentId {
+
+ private final String namespace;
+ private final String documentType;
+ private final String group;
+ private final Long number;
+ private final String userDefined;
+
+ private DocumentId(String namespace, String documentType, String group, Long number, String userDefined) {
+ this.namespace = namespace;
+ this.documentType = documentType;
+ this.group = group;
+ this.number = number;
+ this.userDefined = userDefined;
+ }
+
+ public static DocumentId of(String namespace, String documentType, String id) {
+ return new DocumentId(requireNonEmpty(namespace), requireNonEmpty(documentType), null, null, requireNonEmpty(id));
+ }
+
+ public static DocumentId of(String namespace, String documentType, String group, String id) {
+ return new DocumentId(requireNonEmpty(namespace), requireNonEmpty(documentType), requireNonEmpty(group), null, requireNonEmpty(id));
+ }
+
+ public static DocumentId of(String namespace, String documentType, long number, String id) {
+ return new DocumentId(requireNonEmpty(namespace), requireNonEmpty(documentType), null, number, requireNonEmpty(id));
+ }
+
+ public static DocumentId ofValue(String value) {
+ List<String> parts = Arrays.asList(value.split(":"));
+ String id = String.join(":", parts.subList(4, parts.size()));
+ if ( parts.size() < 5
+ || ! parts.get(0).equals("id")
+ || id.isEmpty()
+ || ! parts.get(3).matches("((n=\\d+)|(g=\\w+))?"))
+ throw new IllegalArgumentException("Document id must be on the form" +
+ " 'id:<namespace>:<document type>:n=<integer>|g=<name>|<empty>:<user defined id>'," +
+ " but was '" + value + "'.");
+
+ if (parts.get(3).matches("n=\\d+"))
+ return of(parts.get(1), parts.get(2), Long.parseLong(parts.get(3).substring(2)), id);
+ if (parts.get(3).matches("g=\\w+"))
+ return of(parts.get(1), parts.get(2), parts.get(3).substring(2), id);
+ return of(parts.get(1), parts.get(2), id);
+ }
+
+ public String asValue() {
+ return "id:" + namespace + ":" + documentType + ":" + grouper() + ":" + userDefined;
+ }
+
+ private String grouper() {
+ return group != null ? group : number != null ? number.toString() : "";
+ }
+
+ private static String requireNonEmpty(String string) {
+ if (string.isEmpty())
+ throw new IllegalArgumentException("The empty string is not allowed.");
+ return string;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/EmptyGroup.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/EmptyGroup.java
new file mode 100644
index 00000000000..8deca3cfb11
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/EmptyGroup.java
@@ -0,0 +1,9 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * The Surefire configuration element &lt;excludedGroups&gt; requires a non-empty argument to reset another.
+ * This class serves that purpose. Without it, no tests run in the various integration test profiles.
+ *
+ * @author jonmv
+ */
+public interface EmptyGroup { }
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Endpoint.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Endpoint.java
new file mode 100644
index 00000000000..efeb4214ebd
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Endpoint.java
@@ -0,0 +1,25 @@
+package ai.vespa.hosted.cd;
+
+import ai.vespa.hosted.cd.metric.Metrics;
+
+import java.net.URI;
+
+/**
+ * An endpoint in a Vespa application {@link Deployment}, which allows document and metrics retrieval.
+ *
+ * The endpoint translates {@link Query}s to {@link Search}s, and {@link Selection}s to {@link Visit}s.
+ * It also supplies {@link Metrics}.
+ *
+ * @author jonmv
+ */
+public interface Endpoint {
+
+ URI uri();
+
+ Search search(Query query);
+
+ Visit visit(Selection selection);
+
+ Metrics metrics();
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Feed.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Feed.java
new file mode 100644
index 00000000000..e9a0a0aeff0
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Feed.java
@@ -0,0 +1,25 @@
+package ai.vespa.hosted.cd;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * An immutable set of document feed / update / delete operations, which can be sent to a Vespa {@link TestEndpoint}.
+ *
+ * @author jonmv
+ */
+public class Feed {
+
+ Map<DocumentId, Document> creations() {
+ return null;
+ }
+
+ Map<DocumentId, Document> updates() {
+ return null;
+ }
+
+ Set<DocumentId> deletions() {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java
new file mode 100644
index 00000000000..e6beb313d28
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/FunctionalTest.java
@@ -0,0 +1,31 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * Tests that compare the behaviour of a Vespa application deployment against a fixed specification.
+ *
+ * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform
+ * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to
+ * the Vespa application is not rolled out.
+ *
+ * A typical functional test is to feed some documents, optionally verifying that the documents have been processed
+ * as expected, and then to see that queries give the expected results. Another common use is to verify integration
+ * with external services.
+ *
+ * @author jonmv
+ */
+public interface FunctionalTest {
+
+ // Want to feed some documents.
+ // Want to verify document processing and routing is as expected.
+ // Want to check recall on those documents.
+ // Want to verify queries give expected documents.
+ // Want to verify searchers.
+ // Want to verify updates.
+ // Want to verify deletion.
+ // May want to verify reprocessing.
+ // Must likely delete documents between tests.
+ // Must be able to feed documents, setting route.
+ // Must be able to search.
+ // Must be able to visit.
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
index a756b665c1a..6cf5fb07f58 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/ProductionTest.java
@@ -1,6 +1,23 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
-public class ProductionTest {
+/**
+ * Tests that verify the health of production deployments of Vespa applications.
+ *
+ * These tests are typically run some time after deployment to a production zone, to ensure
+ * the deployment is still healthy and working as expected. When these tests fail, deployment
+ * of the tested change is halted until it succeeds, or is superseded by a remedying change.
+ *
+ * A typical production test is to verify that a set of metrics, measured by the Vespa
+ * deployment itself, are within specified parameters, or that some higher-level measure
+ * of quality, such as engagement among end users of the application, is as expected.
+ *
+ * @author jonmv
+ */
+public interface ProductionTest {
+
+ // Want to verify metrics (Vespa).
+ // Want to verify external metrics (YAMAS, other).
+ // May want to verify search gives expected results.
}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Query.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Query.java
new file mode 100644
index 00000000000..9895f9df3a2
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Query.java
@@ -0,0 +1,70 @@
+package ai.vespa.hosted.cd;
+
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Stream;
+
+import static java.util.Map.copyOf;
+import static java.util.stream.Collectors.joining;
+import static java.util.stream.Collectors.toUnmodifiableMap;
+
+/**
+ * An immutable query to send to a Vespa {@link Endpoint}, to receive a {@link Search}.
+ *
+ * @author jonmv
+ */
+public class Query {
+
+ private final String rawQuery;
+ private final Map<String, String> parameters;
+
+ private Query(String rawQuery, Map<String, String> parameters) {
+ this.rawQuery = rawQuery;
+ this.parameters = parameters;
+ }
+
+ /** Creates a query with the given raw query part, e.g. {@code Query.ofRaw("yql=select * ...")}. */
+ public static Query ofRaw(String rawQuery) {
+ if (rawQuery.isBlank())
+ throw new IllegalArgumentException("Query can not be blank.");
+
+ return new Query(rawQuery,
+ Stream.of(rawQuery.split("&"))
+ .map(pair -> pair.split("="))
+ .collect(toUnmodifiableMap(pair -> pair[0], pair -> pair[1])));
+ }
+
+ /** Creates a query with the given name-value pairs, e.g. {@code Query.ofParameters(Map.of("yql", "select * ..."))}. */
+ public static Query ofParameters(Map<String, String> parameters) {
+ if (parameters.isEmpty())
+ throw new IllegalArgumentException("Parameters can not be empty.");
+
+ return new Query(parameters.entrySet().stream()
+ .map(entry -> entry.getKey() + "=" + entry.getValue())
+ .collect(joining("&")),
+ copyOf(parameters));
+ }
+
+ /** Returns a copy of this with the given name-value pair added, potentially overriding any current value. */
+ public Query withParameter(String name, String value) {
+ return ofParameters(Stream.concat(parameters.entrySet().stream().filter(entry -> ! entry.getKey().equals(name)),
+ Stream.of(Map.entry(name, value)))
+ .collect(toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)));
+ }
+
+ /** Returns the raw string representation of this query. */
+ public String rawQuery() { return rawQuery; }
+
+ /** Returns the parameters of this query. */
+ public Map<String, String> parameters() { return parameters; }
+
+ /** Returns the timeout parameter of the request, if one is set. */
+ public Optional<Duration> timeout() {
+ return Optional.ofNullable(parameters.get("timeout"))
+ .map(timeout -> Duration.of(Long.parseLong(timeout.replaceAll("\\s*m?s", "")),
+ timeout.contains("ms") ? ChronoUnit.MILLIS : ChronoUnit.SECONDS));
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Search.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Search.java
new file mode 100644
index 00000000000..ace6262bb7c
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Search.java
@@ -0,0 +1,32 @@
+package ai.vespa.hosted.cd;
+
+import java.util.Map;
+
+/**
+ * The immutable result of sending a {@link Query} to a Vespa {@link Endpoint}.
+ *
+ * @author jonmv
+ */
+public class Search {
+
+ private final String raw;
+
+ public Search(String raw) {
+ this.raw = raw;
+ }
+
+ public String rawOutput() { return raw; }
+
+ // hits
+ // coverage
+ // searched
+ // full?
+ // results?
+ // resultsFull?
+
+ /** Returns the documents that were returned as the result, with iteration order as returned. */
+ Map<DocumentId, Document> documents() {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Selection.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Selection.java
new file mode 100644
index 00000000000..158ae279cb6
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Selection.java
@@ -0,0 +1,58 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * A document selection expression, type and cluster, which can be used to visit an {@link Endpoint}.
+ *
+ * @author jonmv
+ */
+public class Selection {
+
+ private final String selection;
+ private final String namespace;
+ private final String type;
+ private final String group;
+ private final String cluster;
+ private final int concurrency;
+
+ private Selection(String selection, String namespace, String type, String group, String cluster, int concurrency) {
+ this.selection = selection;
+ this.namespace = namespace;
+ this.type = type;
+ this.group = group;
+ this.cluster = cluster;
+ this.concurrency = concurrency;
+ }
+
+ /** Returns a new selection which will visit documents in the given cluster. */
+ public static Selection in(String cluster) {
+ if (cluster.isBlank()) throw new IllegalArgumentException("Cluster name can not be blank.");
+ return new Selection(null, null, null, cluster, null, 1);
+ }
+
+ /** Returns a new selection which will visit documents in the given namespace and of the given type. */
+ public static Selection of(String namespace, String type) {
+ if (namespace.isBlank()) throw new IllegalArgumentException("Namespace can not be blank.");
+ if (type.isBlank()) throw new IllegalArgumentException("Document type can not be blank.");
+ return new Selection(null, namespace, type, null, null, 1);
+ }
+
+ /** Returns a copy of this with the given selection criterion set. */
+ public Selection matching(String selection) {
+ if (selection.isBlank()) throw new IllegalArgumentException("Selection can not be blank.");
+ return new Selection(selection, namespace, type, cluster, group, concurrency);
+ }
+
+ /** Returns a copy of this selection, with the group set to the specified value. Requires namespace and type to be set. */
+ public Selection limitedTo(String group) {
+ if (namespace == null || type == null) throw new IllegalArgumentException("Namespace and type must be specified to set group.");
+ if (group.isBlank()) throw new IllegalArgumentException("Group name can not be blank.");
+ return new Selection(selection, namespace, type, cluster, group, concurrency);
+ }
+
+ /** Returns a copy of this, with concurrency set to the given positive value. */
+ public Selection concurrently(int concurrency) {
+ if (concurrency < 1) throw new IllegalArgumentException("Concurrency must be a positive integer.");
+ return new Selection(selection, namespace, type, cluster, group, concurrency);
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
index 789b9deadb0..ee2ee0add4c 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java
@@ -1,6 +1,10 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
+/**
+ * @deprecated Use {@link UpgradeTest}.
+ */
+@Deprecated
public class StagingTest {
}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
index 889acb8b9c4..6a8d1b4cbe4 100644
--- a/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java
@@ -1,6 +1,10 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.hosted.cd;
+/**
+ * @deprecated use {@link FunctionalTest}.
+ */
+@Deprecated
public class SystemTest {
}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestDeployment.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestDeployment.java
new file mode 100644
index 00000000000..bc00b6e0dc6
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestDeployment.java
@@ -0,0 +1,18 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * A deployment of a Vespa application, which also contains endpoints for document manipulation.
+ *
+ * @author jonmv
+ */
+public interface TestDeployment extends Deployment {
+
+ /** Returns a {@link TestEndpoint} in the cluster with the "default" id. */
+ @Override
+ TestEndpoint endpoint();
+
+ /** Returns a {@link TestEndpoint} in the cluster with the given id. */
+ @Override
+ TestEndpoint endpoint(String id);
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestEndpoint.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestEndpoint.java
new file mode 100644
index 00000000000..f6f8a722f19
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestEndpoint.java
@@ -0,0 +1,13 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * An endpoint in a Vespa application {@link TestDeployment}, which also translates {@link Feed}s to {@link Digest}s.
+ *
+ * @author jonmv
+ */
+public interface TestEndpoint extends Endpoint {
+
+ /** Sends the given Feed to this TestEndpoint, blocking until it is digested, and returns a feed report. */
+ Digest digest(Feed feed);
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestRuntime.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestRuntime.java
new file mode 100644
index 00000000000..4ae1c0b7a5e
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/TestRuntime.java
@@ -0,0 +1,98 @@
+package ai.vespa.hosted.cd;
+
+import ai.vespa.hosted.api.ApiAuthenticator;
+import ai.vespa.hosted.api.EndpointAuthenticator;
+import ai.vespa.hosted.api.ControllerHttpClient;
+import ai.vespa.hosted.api.Properties;
+import ai.vespa.hosted.api.TestConfig;
+import ai.vespa.hosted.cd.http.HttpDeployment;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.zone.ZoneId;
+
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Map;
+
+import static java.util.stream.Collectors.toUnmodifiableMap;
+
+/**
+ * The place to obtain environment-dependent configuration for test of a Vespa deployment.
+ *
+ * @author jvenstad
+ */
+public class TestRuntime {
+
+ private static TestRuntime theRuntime;
+
+ private final TestConfig config;
+ private final Map<String, Deployment> productionDeployments;
+ private final Deployment deploymentToTest;
+
+ private TestRuntime(TestConfig config, EndpointAuthenticator authenticator) {
+ this.config = config;
+ this.productionDeployments = config.deployments().entrySet().stream()
+ .filter(zoneDeployment -> zoneDeployment.getKey().environment() == Environment.prod)
+ .collect(toUnmodifiableMap(zoneDeployment -> zoneDeployment.getKey().region().value(),
+ zoneDeployment -> new HttpDeployment(zoneDeployment.getValue(),
+ config.zone(),
+ authenticator)));
+ this.deploymentToTest = new HttpDeployment(config.deployments().get(config.zone()), config.zone(), authenticator);
+ }
+
+ /**
+ * Returns the config for this test, or null if it has not been provided.
+ *
+ * If the system property {@code "vespa.test.config"} is set (to a file path), a file at that location
+ * is attempted read, and config parsed from it.
+ * Otherwise, config is fetched over HTTP from the hosted Vespa API, assuming the deployment indicated
+ * by the optional {@code "environment"} and {@code "region"} system properties exists.
+ * When environment is not specified, it defaults to {@link Environment#dev},
+ * while region must be set unless the environment is {@link Environment#dev} or {@link Environment#perf}.
+ */
+ public static synchronized TestRuntime get() {
+ if (theRuntime == null) {
+ String configPath = System.getProperty("vespa.test.config");
+ TestConfig config = configPath != null ? fromFile(configPath) : fromController();
+ theRuntime = new TestRuntime(config,
+ new ai.vespa.hosted.auth.EndpointAuthenticator(config.system()));
+ }
+ return theRuntime;
+ }
+
+ /** Returns a copy of this runtime, with the given endpoint authenticator. */
+ public TestRuntime with(EndpointAuthenticator authenticator) {
+ return new TestRuntime(config, authenticator);
+ }
+
+ /** Returns the full id of the application this is testing. */
+ public ApplicationId application() { return config.application(); }
+
+ /** Returns the zone of the deployment this is testing. */
+ public ZoneId zone() { return config.zone(); }
+
+ /** Returns all production deployments of the application this is testing. */
+ public Map<String, Deployment> productionDeployments() { return productionDeployments; }
+
+ /** Returns the deployment this is testing. */
+ public Deployment deploymentToTest() { return deploymentToTest; }
+
+ private static TestConfig fromFile(String path) {
+ try {
+ return TestConfig.fromJson(Files.readAllBytes(Paths.get(path)));
+ }
+ catch (Exception e) {
+ throw new IllegalArgumentException("Failed reading config from '" + path + "'!", e);
+ }
+ }
+
+ private static TestConfig fromController() {
+ ControllerHttpClient controller = new ai.vespa.hosted.auth.ApiAuthenticator().controller();
+ ApplicationId id = Properties.application();
+ Environment environment = Properties.environment().orElse(Environment.dev);
+ ZoneId zone = Properties.region().map(region -> ZoneId.from(environment, region))
+ .orElseGet(() -> controller.defaultZone(environment));
+ return controller.testConfig(id, zone);
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java
new file mode 100644
index 00000000000..32083fbd5f6
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/UpgradeTest.java
@@ -0,0 +1,23 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * Tests that assert continuity of behaviour for Vespa application deployments, through upgrades.
+ *
+ * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform
+ * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to
+ * the Vespa application is not rolled out.
+ *
+ * A typical upgrade test is to do some operations against a test deployment prior to upgrade, like feed and
+ * search for some documents, perhaps recording some metrics from the deployment, and then to upgrade it,
+ * repeat the exercise, and compare the results from pre and post upgrade.
+ *
+ * TODO Split in platform upgrades and application upgrades?
+ *
+ * @author jonmv
+ */
+public interface UpgradeTest {
+
+ // Want to verify documents are not damaged by upgrade.
+ // May want to verify metrics during upgrade.
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/Visit.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Visit.java
new file mode 100644
index 00000000000..3bb2f59de97
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/Visit.java
@@ -0,0 +1,17 @@
+package ai.vespa.hosted.cd;
+
+import java.util.Map;
+
+/**
+ * A stateful visit operation against a {@link Endpoint}.
+ *
+ * @author jonmv
+ */
+public class Visit {
+
+ // Delegate to a blocking iterator, which can be used for iteration as visit is ongoing.
+ public Map<DocumentId, Document> documents() {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/VisitEndpoint.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/VisitEndpoint.java
new file mode 100644
index 00000000000..618a004a571
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/VisitEndpoint.java
@@ -0,0 +1,10 @@
+package ai.vespa.hosted.cd;
+
+/**
+ * A remote endpoint in a Vespa application {@link Deployment}, which translates {@link Selection}s to {@link Visit}s.
+ *
+ * @author jonmv
+ */
+public interface VisitEndpoint {
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpDeployment.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpDeployment.java
new file mode 100644
index 00000000000..22c622effae
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpDeployment.java
@@ -0,0 +1,53 @@
+package ai.vespa.hosted.cd.http;
+
+import ai.vespa.hosted.api.EndpointAuthenticator;
+import ai.vespa.hosted.cd.TestDeployment;
+import ai.vespa.hosted.cd.TestEndpoint;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.zone.ZoneId;
+
+import java.net.URI;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.stream.Collectors;
+
+/**
+ * A remote deployment of a Vespa application, reachable over HTTP. Contains {@link HttpEndpoint}s.
+ *
+ * @author jonmv
+ */
+public class HttpDeployment implements TestDeployment {
+
+ private final ZoneId zone;
+ private final Map<String, HttpEndpoint> endpoints;
+
+ /** Creates a representation of the given deployment endpoints, using the authenticator for data plane access. */
+ public HttpDeployment(Map<String, URI> endpoints, ZoneId zone, EndpointAuthenticator authenticator) {
+ this.zone = zone;
+ this.endpoints = endpoints.entrySet().stream()
+ .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey(),
+ entry -> new HttpEndpoint(entry.getValue(), authenticator)));
+ }
+
+ @Override
+ public TestEndpoint endpoint() {
+ return endpoint("default");
+ }
+
+ @Override
+ public TestEndpoint endpoint(String id) {
+ if ( ! endpoints.containsKey(id))
+ throw new NoSuchElementException("No cluster with id '" + id + "'");
+
+ return endpoints.get(id);
+ }
+
+ @Override
+ public TestDeployment asTestDeployment() {
+ if (zone.environment() == Environment.prod)
+ throw new IllegalArgumentException("Won't return a mutable view of a production deployment");
+
+ return this;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpEndpoint.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpEndpoint.java
new file mode 100644
index 00000000000..02c34501dd2
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/http/HttpEndpoint.java
@@ -0,0 +1,97 @@
+package ai.vespa.hosted.cd.http;
+
+import ai.vespa.hosted.api.EndpointAuthenticator;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.JsonDecoder;
+import com.yahoo.slime.Slime;
+import ai.vespa.hosted.cd.Digest;
+import ai.vespa.hosted.cd.Feed;
+import ai.vespa.hosted.cd.Query;
+import ai.vespa.hosted.cd.Search;
+import ai.vespa.hosted.cd.Selection;
+import ai.vespa.hosted.cd.TestEndpoint;
+import ai.vespa.hosted.cd.Visit;
+import ai.vespa.hosted.cd.metric.Metrics;
+
+import java.net.URI;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.time.Duration;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A remote endpoint in a {@link HttpDeployment} of a Vespa application, reachable over HTTP.
+ *
+ * @author jonmv
+ */
+public class HttpEndpoint implements TestEndpoint {
+
+ static final String metricsPath = "/state/v1/metrics"; // TODO metrics/v1/values?
+ static final String documentApiPath = "/document/v1";
+ static final String searchApiPath = "/search";
+
+ private final URI endpoint;
+ private final HttpClient client;
+ private final EndpointAuthenticator authenticator;
+
+ public HttpEndpoint(URI endpoint, EndpointAuthenticator authenticator) {
+ this.endpoint = requireNonNull(endpoint);
+ this.authenticator = requireNonNull(authenticator);
+ this.client = HttpClient.newBuilder()
+ .sslContext(authenticator.sslContext())
+ .connectTimeout(Duration.ofSeconds(5))
+ .version(HttpClient.Version.HTTP_1_1)
+ .build();
+ }
+
+ @Override
+ public Digest digest(Feed feed) {
+ return null;
+ }
+
+ @Override
+ public URI uri() {
+ return endpoint;
+ }
+
+ @Override
+ public Search search(Query query) {
+ try {
+ URI target = endpoint.resolve(searchApiPath).resolve("?" + query.rawQuery());
+ HttpRequest request = authenticator.authenticated(HttpRequest.newBuilder()
+ .timeout(query.timeout().orElse(Duration.ofMillis(500))
+ .plus(Duration.ofSeconds(1)))
+ .uri(target))
+ .build();
+ HttpResponse<byte[]> response = client.send(request, HttpResponse.BodyHandlers.ofByteArray());
+ if (response.statusCode() / 100 != 2) // TODO consider allowing 504 if specified.
+ throw new RuntimeException("Non-OK status code " + response.statusCode() + " at " + target +
+ ", with response \n" + new String(response.body()));
+
+ return toSearch(response.body());
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static Search toSearch(byte[] body) {
+ // TODO jvenstad
+ // Inspector rootObject = new JsonDecoder().decode(new Slime(), body).get();
+ return new Search(new String(body, UTF_8));
+ }
+
+ @Override
+ public Visit visit(Selection selection) {
+ return null;
+ }
+
+ @Override
+ public Metrics metrics() {
+ return null;
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metric.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metric.java
new file mode 100644
index 00000000000..cb3c8e77a9a
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metric.java
@@ -0,0 +1,87 @@
+package ai.vespa.hosted.cd.metric;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.StringJoiner;
+
+import static java.util.Map.copyOf;
+import static java.util.stream.Collectors.toUnmodifiableMap;
+
+/**
+ * A set of statistics for a metric, for points over a space with named dimensions of arbitrary type.
+ *
+ * @author jonmv
+ */
+public class Metric {
+
+ private final Map<Map<String, ?>, Statistic> statistics;
+
+ private Metric(Map<Map<String, ?>, Statistic> statistics) {
+ this.statistics = statistics;
+ }
+
+ /** Creates a new Metric with a copy of the given data. */
+ public static Metric of(Map<Map<String, ?>, Statistic> data) {
+ if (data.isEmpty())
+ throw new IllegalArgumentException("No data given.");
+
+ Map<Map<String, ?>, Statistic> copies = new HashMap<>();
+ Set<String> dimensions = data.keySet().iterator().next().keySet();
+ data.forEach((point, statistic) -> {
+ if ( ! point.keySet().equals(dimensions))
+ throw new IllegalArgumentException("Given data has inconsistent dimensions: '" + dimensions + "' vs '" + point.keySet() + "'.");
+
+ copies.put(copyOf(point), statistic);
+ });
+
+ return new Metric(copyOf(copies));
+ }
+
+ /** Returns a Metric view of the subset of points in the given hyperplane; its dimensions must be a subset of those of this Metric. */
+ public Metric at(Map<String, ?> hyperplane) {
+ return new Metric(statistics.keySet().stream()
+ .filter(point -> point.entrySet().containsAll(hyperplane.entrySet()))
+ .collect(toUnmodifiableMap(point -> point, statistics::get)));
+ }
+
+ /** Returns a version of this where statistics along the given hyperspace are aggregated. This does not preserve last, 95 and 99 percentile values. */
+ public Metric collapse(Set<String> hyperspace) {
+ return new Metric(statistics.keySet().stream()
+ .collect(toUnmodifiableMap(point -> point.keySet().stream()
+ .filter(dimension -> ! hyperspace.contains(dimension))
+ .collect(toUnmodifiableMap(dimension -> dimension, point::get)),
+ statistics::get,
+ Statistic::mergedWith)));
+ }
+
+ /** Returns a collapsed version of this, with all statistics aggregated. This does not preserve last, 95 and 99 percentile values. */
+ public Metric collapse() {
+ return collapse(statistics.keySet().iterator().next().keySet());
+ }
+
+ /** If this Metric contains a single point, returns the Statistic of that point; otherwise, throws an exception. */
+ public Statistic statistic() {
+ if (statistics.size() == 1)
+ return statistics.values().iterator().next();
+
+ if (statistics.isEmpty())
+ throw new NoSuchElementException("This Metric has no data.");
+
+ throw new IllegalStateException("This Metric has more than one point of data.");
+ }
+
+ /** Returns the underlying, unmodifiable Map. */
+ public Map<Map<String, ?>, Statistic> asMap() {
+ return statistics;
+ }
+
+ @Override
+ public String toString() {
+ return new StringJoiner(", ", Metric.class.getSimpleName() + "[", "]")
+ .add("statistics=" + statistics)
+ .toString();
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metrics.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metrics.java
new file mode 100644
index 00000000000..3aa5a126745
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Metrics.java
@@ -0,0 +1,73 @@
+package ai.vespa.hosted.cd.metric;
+
+import ai.vespa.hosted.cd.Endpoint;
+
+import java.time.Instant;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.StringJoiner;
+
+import static java.util.Map.copyOf;
+
+/**
+ * Metrics from a Vespa application {@link Endpoint}, indexed by their names, and optionally by a set of custom dimensions.
+ *
+ * Metrics are collected from the <a href="https://docs.vespa.ai/documentation/reference/metrics-health-format.html">metrics</a>
+ * API of a Vespa endpoint, and contain the current health status of the endpoint, values for all configured metrics in
+ * that endpoint, and the time interval from which these metrics were sampled.
+ *
+ * Each metric is indexed by a name, and, optionally, along a custom set of dimensions, given by a {@code Map<String, String>}.
+ *
+ * @author jonmv
+ */
+public class Metrics {
+
+ private final Instant start, end;
+ private final Map<String, Metric> metrics;
+
+ private Metrics(Instant start, Instant end, Map<String, Metric> metrics) {
+ this.start = start;
+ this.end = end;
+ this.metrics = metrics;
+ }
+
+ public static Metrics of(Instant start, Instant end, Map<String, Metric> metrics) {
+ if ( ! start.isBefore(end))
+ throw new IllegalArgumentException("Given time interval must be positive: '" + start + "' to '" + end + "'.");
+
+ return new Metrics(start, end, copyOf(metrics));
+ }
+
+ /** Returns the start of the time window from which these metrics were sampled, or throws if the status is {@code Status.down}. */
+ public Instant start() {
+ return start;
+ }
+
+ /** Returns the end of the time window from which these metrics were sampled, or throws if the status is {@code Status.down}. */
+ public Instant end() {
+ return end;
+ }
+
+ /** Returns the metric with the given name, or throws a NoSuchElementException if no such Metric is known. */
+ public Metric get(String name) {
+ if ( ! metrics.containsKey(name))
+ throw new NoSuchElementException("No metric with name '" + name + "'.");
+
+ return metrics.get(name);
+ }
+
+ /** Returns the underlying, unmodifiable Map. */
+ public Map<String, Metric> asMap() {
+ return metrics;
+ }
+
+ @Override
+ public String toString() {
+ return new StringJoiner(", ", Metrics.class.getSimpleName() + "[", "]")
+ .add("start=" + start)
+ .add("end=" + end)
+ .add("metrics=" + metrics)
+ .toString();
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Space.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Space.java
new file mode 100644
index 00000000000..a1ece1e2aa8
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Space.java
@@ -0,0 +1,44 @@
+package ai.vespa.hosted.cd.metric;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.IntStream;
+
+import static java.util.stream.Collectors.toUnmodifiableMap;
+
+/**
+ * Used to easily generate points (Map&lt;String, ?&gt;) for a space defined here by its dimension names.
+ *
+ * @author jonmv
+ */
+public class Space {
+
+ private final List<String> dimensions;
+
+ private Space(List<String> dimensions) {
+ this.dimensions = dimensions;
+ }
+
+ /** Creates a new space with the given named dimensions, in order. */
+ public static Space of(List<String> dimensions) {
+ if (Set.copyOf(dimensions).size() != dimensions.size())
+ throw new IllegalArgumentException("Duplicated dimension names in '" + dimensions + "'.");
+
+ return new Space(List.copyOf(dimensions));
+ }
+
+ /** Returns a point in this space, with the given values along each dimensions, in order. */
+ public Map<String, ?> at(List<?> values) {
+ if (dimensions.size() != values.size())
+ throw new IllegalArgumentException("This space has " + dimensions.size() + " dimensions, but " + values.size() + " were given.");
+
+ return IntStream.range(0, dimensions.size()).boxed().collect(toUnmodifiableMap(dimensions::get, values::get));
+ }
+
+ /** Returns a point in this space, with the given values along each dimensions, in order. */
+ public Map<String, ?> at(Object... values) {
+ return at(List.of(values));
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Statistic.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Statistic.java
new file mode 100644
index 00000000000..fc52900bdac
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Statistic.java
@@ -0,0 +1,68 @@
+package ai.vespa.hosted.cd.metric;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.StringJoiner;
+
+import static java.util.Map.copyOf;
+
+/**
+ * Known statistic about a metric, at a certain point.
+ *
+ * @author jonmv
+ */
+public class Statistic {
+
+ private final Map<Type, Double> data;
+
+ /** Creates a new Statistic with a copy of the given data. */
+ private Statistic(Map<Type, Double> data) {
+ this.data = data;
+ }
+
+ public static Statistic of(Map<Type, Double> data) {
+ return new Statistic(copyOf(data));
+ }
+
+ /** Returns the value of the given type, or throws a NoSuchElementException if this isn't known. */
+ public double get(Type key) {
+ if ( ! data.containsKey(key))
+ throw new NoSuchElementException("No value with key '" + key + "' is known.");
+
+ return data.get(key);
+ }
+
+ /** Returns the underlying, unmodifiable Map. */
+ public Map<Type, Double> asMap() {
+ return data;
+ }
+
+ Statistic mergedWith(Statistic other) {
+ if (data.keySet().equals(other.data.keySet()))
+ throw new IllegalArgumentException("Incompatible key sets '" + data.keySet() + "' and '" + other.data.keySet() + "'.");
+
+ Map<Type, Double> merged = new HashMap<>();
+ double n1 = get(Type.count), n2 = other.get(Type.count);
+ for (Type type : data.keySet()) switch (type) {
+ case count: merged.put(type, n1 + n2); break;
+ case rate: merged.put(type, get(Type.rate) + other.get(Type.rate)); break;
+ case max: merged.put(type, Math.max(get(Type.max), other.get(Type.max))); break;
+ case min: merged.put(type, Math.min(get(Type.min), other.get(Type.min))); break;
+ case average: merged.put(type, (n1 * get(Type.average) + n2 * other.get(Type.average)) / (n1 + n2)); break;
+ case last:
+ case percentile95:
+ case percentile99: break;
+ default: throw new IllegalArgumentException("Unexpected type '" + type + "'.");
+ }
+ return of(merged);
+ }
+
+ @Override
+ public String toString() {
+ return new StringJoiner(", ", Statistic.class.getSimpleName() + "[", "]")
+ .add("data=" + data)
+ .toString();
+ }
+
+}
diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Type.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Type.java
new file mode 100644
index 00000000000..d48b4566f6d
--- /dev/null
+++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/metric/Type.java
@@ -0,0 +1,32 @@
+package ai.vespa.hosted.cd.metric;
+
+/**
+ * Known statistic types.
+ */
+public enum Type {
+
+ /** 95th percentile measurement. */
+ percentile95,
+
+ /** 99th percentile measurement. */
+ percentile99,
+
+ /** Average over all measurements. */
+ average,
+
+ /** Number of measurements. */
+ count,
+
+ /** Last measurement. */
+ last,
+
+ /** Maximum measurement. */
+ max,
+
+ /** Minimum measurement. */
+ min,
+
+ /** Number of measurements per second. */
+ rate;
+
+}
diff --git a/vbench/src/vbench/http/benchmark_headers.h b/vbench/src/vbench/http/benchmark_headers.h
index 92c4a05271b..04b29813760 100644
--- a/vbench/src/vbench/http/benchmark_headers.h
+++ b/vbench/src/vbench/http/benchmark_headers.h
@@ -5,6 +5,7 @@
#include <vbench/core/string.h>
#include <vespa/vespalib/locale/c.h>
+#include <cerrno>
namespace vbench {
diff --git a/vdslib/src/vespa/vdslib/container/parameters.cpp b/vdslib/src/vespa/vdslib/container/parameters.cpp
index 0803c346b0a..c830f89d5a9 100644
--- a/vdslib/src/vespa/vdslib/container/parameters.cpp
+++ b/vdslib/src/vespa/vdslib/container/parameters.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/objects/hexdump.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/util/xmlstream.h>
+#include <ostream>
using namespace vdslib;
diff --git a/vespa-documentgen-plugin/etc/complex/music3.sd b/vespa-documentgen-plugin/etc/complex/music3.sd
index 45ce11fd581..8bd612268df 100644
--- a/vespa-documentgen-plugin/etc/complex/music3.sd
+++ b/vespa-documentgen-plugin/etc/complex/music3.sd
@@ -5,7 +5,7 @@ search music3 {
}
field pos type position {
-
+ indexing: attribute
}
}
}
diff --git a/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/DeployMojo.java b/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/DeployMojo.java
index 32ff03ae202..d62ccb1bba4 100644
--- a/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/DeployMojo.java
+++ b/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/DeployMojo.java
@@ -4,6 +4,7 @@ import ai.vespa.hosted.api.Deployment;
import ai.vespa.hosted.api.DeploymentLog;
import ai.vespa.hosted.api.DeploymentResult;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.zone.ZoneId;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
@@ -41,7 +42,11 @@ public class DeployMojo extends AbstractVespaMojo {
projectPathOf("target", "application.zip"))));
if (vespaVersion != null) deployment = deployment.atVersion(vespaVersion);
- ZoneId zone = environment == null || region == null ? controller.devZone() : ZoneId.from(environment, region);
+ ZoneId zone = region == null
+ ? controller.defaultZone(environment == null
+ ? Environment.dev
+ : Environment.from(environment))
+ : ZoneId.from(environment, region);
DeploymentResult result = controller.deploy(deployment, id, zone);
getLog().info(result.message());
diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json
index 04e68e60178..b2b895040bc 100644
--- a/vespajlib/abi-spec.json
+++ b/vespajlib/abi-spec.json
@@ -1352,7 +1352,8 @@
"public static com.yahoo.tensor.TensorType$Value[] values()",
"public static com.yahoo.tensor.TensorType$Value valueOf(java.lang.String)",
"public static com.yahoo.tensor.TensorType$Value largestOf(java.util.List)",
- "public static com.yahoo.tensor.TensorType$Value largestOf(com.yahoo.tensor.TensorType$Value, com.yahoo.tensor.TensorType$Value)"
+ "public static com.yahoo.tensor.TensorType$Value largestOf(com.yahoo.tensor.TensorType$Value, com.yahoo.tensor.TensorType$Value)",
+ "public java.lang.String toString()"
],
"fields": [
"public static final enum com.yahoo.tensor.TensorType$Value DOUBLE",
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/IndexedDoubleTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/IndexedDoubleTensor.java
index 7f1351cc42b..219a3fa2278 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/IndexedDoubleTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/IndexedDoubleTensor.java
@@ -108,7 +108,13 @@ class IndexedDoubleTensor extends IndexedTensor {
@Override
public void cellByDirectIndex(long index, double value) {
- values[(int)index] = value;
+ try {
+ values[(int) index] = value;
+ }
+ catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("Can not set the cell at position " + index + " in a tensor " +
+ "of type " + type + ": Index is too large");
+ }
}
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
index aeb3da8ac40..aca2bfc1b0f 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
@@ -16,7 +16,7 @@ import java.util.Set;
import java.util.function.DoubleBinaryOperator;
/**
- * An indexed (dense) tensor backed by a double array.
+ * An indexed (dense) tensor backed by an array.
*
* @author bratseth
*/
@@ -143,9 +143,8 @@ public abstract class IndexedTensor implements Tensor {
long valueIndex = 0;
for (int i = 0; i < indexes.length; i++) {
- if (indexes[i] >= sizes.size(i)) {
- throw new IllegalArgumentException(indexes + " are not within bounds");
- }
+ if (indexes[i] >= sizes.size(i))
+ throw new IllegalArgumentException(Arrays.toString(indexes) + " are not within bounds");
valueIndex += productOfDimensionsAfter(i, sizes) * indexes[i];
}
return valueIndex;
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
index 22ff793e6fa..c2aa155d6bb 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
@@ -333,7 +333,7 @@ public interface Tensor {
} else {
x = Math.nextAfter(x, y);
}
- return x==y;
+ return x == y;
}
// ----------------- Factories
@@ -367,9 +367,7 @@ public interface Tensor {
return TensorParser.tensorFrom(tensorString, Optional.empty());
}
- /**
- * Returns a double as a tensor: A dimensionless tensor containing the value as its cell
- */
+ /** Returns a double as a tensor: A dimensionless tensor containing the value as its cell */
static Tensor from(double value) {
return Tensor.Builder.of(TensorType.empty).cell(value).build();
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java b/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
index 45a9992c9ad..4d8b34b7dcf 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
@@ -8,44 +8,59 @@ import java.util.Optional;
*/
class TensorParser {
- static Tensor tensorFrom(String tensorString, Optional<TensorType> type) {
+ static Tensor tensorFrom(String tensorString, Optional<TensorType> explicitType) {
+ Optional<TensorType> type;
+ String valueString;
+
tensorString = tensorString.trim();
- try {
- if (tensorString.startsWith("tensor")) {
- int colonIndex = tensorString.indexOf(':');
- String typeString = tensorString.substring(0, colonIndex);
- String valueString = tensorString.substring(colonIndex + 1);
- TensorType typeFromString = TensorTypeParser.fromSpec(typeString);
- if (type.isPresent() && ! type.get().equals(typeFromString))
- throw new IllegalArgumentException("Got tensor with type string '" + typeString + "', but was " +
- "passed type " + type.get());
- return tensorFromValueString(valueString, typeFromString);
- }
- else if (tensorString.startsWith("{")) {
- return tensorFromValueString(tensorString, type.orElse(typeFromValueString(tensorString)));
- }
- else {
- if (type.isPresent() && ! type.get().equals(TensorType.empty))
- throw new IllegalArgumentException("Got zero-dimensional tensor '" + tensorString +
- "' where type " + type.get() + " is required");
+ if (tensorString.startsWith("tensor")) {
+ int colonIndex = tensorString.indexOf(':');
+ String typeString = tensorString.substring(0, colonIndex);
+ TensorType typeFromString = TensorTypeParser.fromSpec(typeString);
+ if (explicitType.isPresent() && ! explicitType.get().equals(typeFromString))
+ throw new IllegalArgumentException("Got tensor with type string '" + typeString + "', but was " +
+ "passed type " + explicitType.get());
+ type = Optional.of(typeFromString);
+ valueString = tensorString.substring(colonIndex + 1);
+ }
+ else {
+ type = explicitType;
+ valueString = tensorString;
+ }
+
+ valueString = valueString.trim();
+ if (valueString.startsWith("{")) {
+ return tensorFromSparseValueString(valueString, type);
+ }
+ else if (valueString.startsWith("[")) {
+ return tensorFromDenseValueString(valueString, type);
+ }
+ else {
+ if (explicitType.isPresent() && ! explicitType.get().equals(TensorType.empty))
+ throw new IllegalArgumentException("Got a zero-dimensional tensor value ('" + tensorString +
+ "') where type " + explicitType.get() + " is required");
+ try {
return Tensor.Builder.of(TensorType.empty).cell(Double.parseDouble(tensorString)).build();
}
- }
- catch (NumberFormatException e) {
- throw new IllegalArgumentException("Excepted a number or a string starting by { or tensor(, got '" +
- tensorString + "'");
+ catch (NumberFormatException e) {
+ throw new IllegalArgumentException("Excepted a number or a string starting by {, [ or tensor(...):, got '" +
+ tensorString + "'");
+ }
}
}
- /** Derive the tensor type from the first address string in the given tensor string */
- private static TensorType typeFromValueString(String s) {
- s = s.substring(1).trim(); // remove tensor start
+ /** Derives the tensor type from the first address string in the given tensor string */
+ private static TensorType typeFromSparseValueString(String valueString) {
+ String s = valueString.substring(1).trim(); // remove tensor start
int firstKeyOrTensorEnd = s.indexOf('}');
+ if (firstKeyOrTensorEnd < 0)
+ throw new IllegalArgumentException("Excepted a number or a string starting by {, [ or tensor(...):, got '" +
+ valueString + "'");
String addressBody = s.substring(0, firstKeyOrTensorEnd).trim();
if (addressBody.isEmpty()) return TensorType.empty; // Empty tensor
if ( ! addressBody.startsWith("{")) return TensorType.empty; // Single value tensor
- addressBody = addressBody.substring(1); // remove key start
+ addressBody = addressBody.substring(1, addressBody.length()); // remove key start
if (addressBody.isEmpty()) return TensorType.empty; // Empty key
TensorType.Builder builder = new TensorType.Builder(TensorType.Value.DOUBLE);
@@ -60,21 +75,76 @@ class TensorParser {
return builder.build();
}
- private static Tensor tensorFromValueString(String tensorValueString, TensorType type) {
- Tensor.Builder builder = Tensor.Builder.of(type);
- tensorValueString = tensorValueString.trim();
+ private static Tensor tensorFromSparseValueString(String valueString, Optional<TensorType> type) {
try {
- if (tensorValueString.startsWith("{"))
- return fromCellString(builder, tensorValueString);
- else
- return builder.cell(Double.parseDouble(tensorValueString)).build();
+ valueString = valueString.trim();
+ Tensor.Builder builder = Tensor.Builder.of(type.orElse(typeFromSparseValueString(valueString)));
+ return fromCellString(builder, valueString);
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("Excepted a number or a string starting by { or tensor(, got '" +
- tensorValueString + "'");
+ valueString + "'");
}
}
+ private static Tensor tensorFromDenseValueString(String valueString, Optional<TensorType> type) {
+ if (type.isEmpty())
+ throw new IllegalArgumentException("The dense tensor form requires an explicit tensor type " +
+ "on the form 'tensor(dimensions):...");
+ if (type.get().dimensions().stream().anyMatch(d -> ( d.size().isEmpty())))
+ throw new IllegalArgumentException("The dense tensor form requires a tensor type containing " +
+ "only dense dimensions with a given size");
+
+ IndexedTensor.BoundBuilder builder = (IndexedTensor.BoundBuilder)IndexedTensor.Builder.of(type.get());
+ long index = 0;
+ int currentChar;
+ int nextNumberEnd = 0;
+ // Since we know the dimensions the brackets are just syntactic sugar:
+ while ((currentChar = nextStartCharIndex(nextNumberEnd + 1, valueString)) < valueString.length()) {
+ nextNumberEnd = nextStopCharIndex(currentChar, valueString);
+ if (currentChar == nextNumberEnd) return builder.build();
+
+ TensorType.Value cellValueType = builder.type().valueType();
+ String cellValueString = valueString.substring(currentChar, nextNumberEnd);
+ try {
+ if (cellValueType == TensorType.Value.DOUBLE)
+ builder.cellByDirectIndex(index, Double.parseDouble(cellValueString));
+ else if (cellValueType == TensorType.Value.FLOAT)
+ builder.cellByDirectIndex(index, Float.parseFloat(cellValueString));
+ else
+ throw new IllegalArgumentException(cellValueType + " is not supported");
+ }
+ catch (NumberFormatException e) {
+ throw new IllegalArgumentException("At index " + index + ": '" +
+ cellValueString + "' is not a valid " + cellValueType);
+ }
+ index++;
+ }
+ return builder.build();
+ }
+
+ /** Returns the position of the next character that should contain a number, or if none the string length */
+ private static int nextStartCharIndex(int charIndex, String valueString) {
+ for (; charIndex < valueString.length(); charIndex++) {
+ if (valueString.charAt(charIndex) == ']') continue;
+ if (valueString.charAt(charIndex) == '[') continue;
+ if (valueString.charAt(charIndex) == ',') continue;
+ if (valueString.charAt(charIndex) == ' ') continue;
+ return charIndex;
+ }
+ return valueString.length();
+ }
+
+ private static int nextStopCharIndex(int charIndex, String valueString) {
+ while (charIndex < valueString.length()) {
+ if (valueString.charAt(charIndex) == ',') return charIndex;
+ if (valueString.charAt(charIndex) == ']') return charIndex;
+ charIndex++;
+ }
+ throw new IllegalArgumentException("Malformed tensor value '" + valueString +
+ "': Expected a ',' or ']' after position " + charIndex);
+ }
+
private static Tensor fromCellString(Tensor.Builder builder, String s) {
int index = 1;
index = skipSpace(index, s);
@@ -97,8 +167,21 @@ class TensorParser {
}
TensorAddress address = addressBuilder.build();
- Double value = asDouble(address, s.substring(index, valueEnd).trim());
- builder.cell(address, value);
+ TensorType.Value cellValueType = builder.type().valueType();
+ String cellValueString = s.substring(index, valueEnd).trim();
+ try {
+ if (cellValueType == TensorType.Value.DOUBLE)
+ builder.cell(address, Double.parseDouble(cellValueString));
+ else if (cellValueType == TensorType.Value.FLOAT)
+ builder.cell(address, Float.parseFloat(cellValueString));
+ else
+ throw new IllegalArgumentException(cellValueType + " is not supported");
+ }
+ catch (NumberFormatException e) {
+ throw new IllegalArgumentException("At " + address.toString(builder.type()) + ": '" +
+ cellValueString + "' is not a valid " + cellValueType);
+ }
+
index = valueEnd+1;
index = skipSpace(index, s);
}
@@ -130,13 +213,4 @@ class TensorParser {
}
}
- private static Double asDouble(TensorAddress address, String s) {
- try {
- return Double.valueOf(s);
- }
- catch (NumberFormatException e) {
- throw new IllegalArgumentException("At " + address + ": Expected a floating point number, got '" + s + "'");
- }
- }
-
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java b/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
index b1c7a2341c0..8e566fac0b6 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
@@ -48,6 +48,9 @@ public class TensorType {
return FLOAT;
}
+ @Override
+ public String toString() { return name().toLowerCase(); }
+
};
/** The empty tensor type - which is the same as a double */
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TensorTypeParser.java b/vespajlib/src/main/java/com/yahoo/tensor/TensorTypeParser.java
index d5f77be0dd0..1f426942c5f 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/TensorTypeParser.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TensorTypeParser.java
@@ -24,6 +24,7 @@ public class TensorTypeParser {
private static final Pattern mappedPattern = Pattern.compile("(\\w+)\\{\\}");
public static TensorType fromSpec(String specString) {
+ specString = specString.trim();
if ( ! specString.startsWith(START_STRING) || ! specString.endsWith(END_STRING))
throw formatException(specString);
String specBody = specString.substring(START_STRING.length(), specString.length() - END_STRING.length());
@@ -112,9 +113,9 @@ public class TensorTypeParser {
private static IllegalArgumentException formatException(String spec, Optional<String> errorDetail) {
throw new IllegalArgumentException("A tensor type spec must be on the form " +
- "tensor[<valuetype>]?(dimensionidentifier[{}|[length?]*), but was '" + spec + "'. " +
+ "tensor[<valuetype>]?(dimensionidentifier[{}|[length]*), but was '" + spec + "'. " +
errorDetail.map(s -> s + ". ").orElse("") +
- "Examples: tensor(x[]), tensor<float>(name{}, x[10])");
+ "Examples: tensor(x[3]), tensor<float>(name{}, x[10])");
}
}
diff --git a/vespajlib/src/main/java/com/yahoo/text/Lowercase.java b/vespajlib/src/main/java/com/yahoo/text/Lowercase.java
index a04ba1cfe13..6304f7f0a39 100644
--- a/vespajlib/src/main/java/com/yahoo/text/Lowercase.java
+++ b/vespajlib/src/main/java/com/yahoo/text/Lowercase.java
@@ -12,64 +12,6 @@ import java.util.Locale;
*/
public final class Lowercase {
- private static final char[] lowercase = new char[123];
-
- static {
- lowercase[0x41] = 'a';
- lowercase[0x42] = 'b';
- lowercase[0x43] = 'c';
- lowercase[0x44] = 'd';
- lowercase[0x45] = 'e';
- lowercase[0x46] = 'f';
- lowercase[0x47] = 'g';
- lowercase[0x48] = 'h';
- lowercase[0x49] = 'i';
- lowercase[0x4A] = 'j';
- lowercase[0x4B] = 'k';
- lowercase[0x4C] = 'l';
- lowercase[0x4D] = 'm';
- lowercase[0x4E] = 'n';
- lowercase[0x4F] = 'o';
- lowercase[0x50] = 'p';
- lowercase[0x51] = 'q';
- lowercase[0x52] = 'r';
- lowercase[0x53] = 's';
- lowercase[0x54] = 't';
- lowercase[0x55] = 'u';
- lowercase[0x56] = 'v';
- lowercase[0x57] = 'w';
- lowercase[0x58] = 'x';
- lowercase[0x59] = 'y';
- lowercase[0x5A] = 'z';
-
- lowercase[0x61] = 'a';
- lowercase[0x62] = 'b';
- lowercase[0x63] = 'c';
- lowercase[0x64] = 'd';
- lowercase[0x65] = 'e';
- lowercase[0x66] = 'f';
- lowercase[0x67] = 'g';
- lowercase[0x68] = 'h';
- lowercase[0x69] = 'i';
- lowercase[0x6A] = 'j';
- lowercase[0x6B] = 'k';
- lowercase[0x6C] = 'l';
- lowercase[0x6D] = 'm';
- lowercase[0x6E] = 'n';
- lowercase[0x6F] = 'o';
- lowercase[0x70] = 'p';
- lowercase[0x71] = 'q';
- lowercase[0x72] = 'r';
- lowercase[0x73] = 's';
- lowercase[0x74] = 't';
- lowercase[0x75] = 'u';
- lowercase[0x76] = 'v';
- lowercase[0x77] = 'w';
- lowercase[0x78] = 'x';
- lowercase[0x79] = 'y';
- lowercase[0x7A] = 'z';
- }
-
/**
* Return a lowercased version of the given string. Since this is language
* independent, this is more of a case normalization operation than
@@ -80,40 +22,11 @@ public final class Lowercase {
* @return a string containing only lowercase character
*/
public static String toLowerCase(String in) {
- // def is picked from http://docs.oracle.com/javase/6/docs/api/java/lang/String.html#toLowerCase%28%29
- String lower = toLowerCasePrintableAsciiOnly(in);
- return (lower == null) ? in.toLowerCase(Locale.ENGLISH) : lower;
+ return in.toLowerCase(Locale.ENGLISH);
+
}
public static String toUpperCase(String in) {
- // def is picked from http://docs.oracle.com/javase/6/docs/api/java/lang/String.html#toLowerCase%28%29
return in.toUpperCase(Locale.ENGLISH);
}
- private static String toLowerCasePrintableAsciiOnly(String in) {
- boolean anyUpper = false;
- for (int i = 0; i < in.length(); i++) {
- char c = in.charAt(i);
- if (c < 0x41) { //lower than A-Z
- return null;
- }
- if (c > 0x5A && c < 0x61) { //between A-Z and a-z
- return null;
- }
- if (c > 0x7A) { //higher than a-z
- return null;
- }
- if (c != lowercase[c]) {
- anyUpper = true;
- }
- }
- if (!anyUpper) {
- return in;
- }
- StringBuilder builder = new StringBuilder(in.length());
- for (int i = 0; i < in.length(); i++) {
- builder.append((char) (in.charAt(i) | ((char) 0x20)));
- }
- return builder.toString();
- }
-
}
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
index 04ea118280c..63fe40565bd 100644
--- a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
@@ -9,13 +9,58 @@ import static org.junit.Assert.fail;
public class TensorParserTestCase {
@Test
- public void testParsing() {
+ public void testSparseParsing() {
assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor()")).build(),
Tensor.from("{}"));
assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x{})")).cell(1.0, 0).build(),
Tensor.from("{{x:0}:1.0}"));
assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x{})")).cell().label("x", "l0").value(1.0).build(),
Tensor.from("{{x:l0}:1.0}"));
+ assertEquals("If the type is specified, a dense tensor can be created from the sparse text form",
+ Tensor.Builder.of(TensorType.fromSpec("tensor(x[1])")).cell(1.0, 0).build(),
+ Tensor.from("tensor(x[1]):{{x:0}:1.0}"));
+ }
+
+ @Test
+ public void testDenseParsing() {
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor()")).build(),
+ Tensor.from("tensor():[]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1])")).cell(1.0, 0).build(),
+ Tensor.from("tensor(x[1]):[1.0]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2])")).cell(1.0, 0).cell(2.0, 1).build(),
+ Tensor.from("tensor(x[2]):[1.0, 2.0]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[3])"))
+ .cell(1.0, 0, 0)
+ .cell(2.0, 0, 1)
+ .cell(3.0, 0, 2)
+ .cell(4.0, 1, 0)
+ .cell(5.0, 1, 1)
+ .cell(6.0, 1, 2).build(),
+ Tensor.from("tensor(x[2],y[3]):[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1],y[2],z[3])"))
+ .cell(1.0, 0, 0, 0)
+ .cell(2.0, 0, 0, 1)
+ .cell(3.0, 0, 0, 2)
+ .cell(4.0, 0, 1, 0)
+ .cell(5.0, 0, 1, 1)
+ .cell(6.0, 0, 1, 2).build(),
+ Tensor.from("tensor(x[1],y[2],z[3]):[[[1.0], [2.0]], [[3.0], [4.0]], [[5.0], [6.0]]]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])"))
+ .cell(1.0, 0, 0, 0)
+ .cell(2.0, 0, 1, 0)
+ .cell(3.0, 1, 0, 0)
+ .cell(4.0, 1, 1, 0)
+ .cell(5.0, 2, 0, 0)
+ .cell(6.0, 2, 1, 0).build(),
+ Tensor.from("tensor(x[3],y[2],z[1]):[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]"));
+ assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])"))
+ .cell( 1.0, 0, 0, 0)
+ .cell( 2.0, 0, 1, 0)
+ .cell( 3.0, 1, 0, 0)
+ .cell( 4.0, 1, 1, 0)
+ .cell( 5.0, 2, 0, 0)
+ .cell(-6.0, 2, 1, 0).build(),
+ Tensor.from("tensor( x[3],y[2],z[1]) : [ [ [1.0, 2.0, 3.0] , [4.0, 5,-6.0] ] ]"));
}
@Test
@@ -26,6 +71,10 @@ public class TensorParserTestCase {
"{{'x':\"l0\"}:1.0}");
assertIllegal("dimension must be an identifier or integer, not '\"x\"'",
"{{\"x\":\"l0\", \"y\":\"l0\"}:1.0, {\"x\":\"l0\", \"y\":\"l1\"}:2.0}");
+ assertIllegal("At {x:0}: '1-.0' is not a valid double",
+ "{{x:0}:1-.0}");
+ assertIllegal("At index 0: '1-.0' is not a valid double",
+ "tensor(x[1]):[1-.0]");
}
private void assertIllegal(String message, String tensor) {
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java
index b01d171792c..c53db160806 100644
--- a/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java
@@ -54,7 +54,7 @@ public class TensorTestCase {
fail("Expected parse error");
}
catch (IllegalArgumentException expected) {
- assertEquals("Excepted a number or a string starting by { or tensor(, got '--'", expected.getMessage());
+ assertEquals("Excepted a number or a string starting by {, [ or tensor(...):, got '--'", expected.getMessage());
}
}
diff --git a/vespajlib/src/test/java/com/yahoo/text/LowercaseTestCase.java b/vespajlib/src/test/java/com/yahoo/text/LowercaseTestCase.java
index 8a3e6ed134d..a1379594ba0 100644
--- a/vespajlib/src/test/java/com/yahoo/text/LowercaseTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/text/LowercaseTestCase.java
@@ -7,6 +7,7 @@ import org.junit.Test;
import java.util.Locale;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
/**
@@ -49,12 +50,28 @@ public class LowercaseTestCase {
}
@Test
+ public void test7bitAscii() {
+ for(char c = 0; c < 128; c++) {
+ char [] carray = {c};
+ String s = new String(carray);
+ assertEquals(Lowercase.toLowerCase(s), s.toLowerCase(Locale.ENGLISH));
+ assertEquals(Lowercase.toUpperCase(s), s.toUpperCase(Locale.ENGLISH));
+ }
+ }
+
+ @Test
@Ignore
public void performance() {
+ for (int i=0; i < 2; i++) {
+ benchmark(i);
+ }
+ }
+
+ private void benchmark(int i) {
Lowercase.toLowerCase("warmup");
- String lowercaseInput = "abcdefghijklmnopqrstuvwxyz";
- String uppercaseInput = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
- String mixedcaseInput = "AbCDEfGHIJklmnoPQRStuvwXyz";
+ String lowercaseInput = "abcdefghijklmnopqrstuvwxyz" + i;
+ String uppercaseInput = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + i;
+ String mixedcaseInput = "AbCDEfGHIJklmnoPQRStuvwXyz" + i;
System.err.println("Lowercase input: ");
testPerformance(lowercaseInput);
@@ -67,12 +84,14 @@ public class LowercaseTestCase {
}
private void testPerformance(String input) {
- final int NUM = 10000000;
+ final int NUM = 100000000;
long elapsedTimeOwnImpl;
+ long ownCount = 0;
+ long javaCount = 0;
{
long startTimeOwnImpl = System.currentTimeMillis();
for (int i = 0; i < NUM; i++) {
- Lowercase.toLowerCase(input);
+ ownCount += Lowercase.toLowerCase(input).length();
}
elapsedTimeOwnImpl = System.currentTimeMillis() - startTimeOwnImpl;
System.err.println("Own implementation: " + elapsedTimeOwnImpl);
@@ -82,7 +101,7 @@ public class LowercaseTestCase {
{
long startTimeJava = System.currentTimeMillis();
for (int i = 0; i < NUM; i++) {
- input.toLowerCase(Locale.ENGLISH);
+ javaCount += input.toLowerCase(Locale.ENGLISH).length();
}
elapsedTimeJava = System.currentTimeMillis() - startTimeJava;
System.err.println("Java's implementation: " + elapsedTimeJava);
@@ -90,7 +109,6 @@ public class LowercaseTestCase {
long diff = elapsedTimeJava - elapsedTimeOwnImpl;
double diffPercentage = (((double) diff) / ((double) elapsedTimeJava)) * 100.0;
- System.err.println("Own implementation is " + diffPercentage + " % faster.");
-
+ System.err.println("Own implementation is " + diffPercentage + " % faster. owncount=" + ownCount + " javaCount=" + javaCount);
}
}
diff --git a/vespalib/src/tests/stllike/asciistream_test.cpp b/vespalib/src/tests/stllike/asciistream_test.cpp
index b0f6d8cb455..b1ba70e6ae2 100644
--- a/vespalib/src/tests/stllike/asciistream_test.cpp
+++ b/vespalib/src/tests/stllike/asciistream_test.cpp
@@ -4,7 +4,9 @@
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/locale/c.h>
#include <iomanip>
+#include <float.h>
using namespace vespalib;
@@ -26,6 +28,7 @@ public:
void testMoveIsWellDefined();
void testIllegalNumbers();
void testDouble();
+ void testFloat();
void testStateSaver();
};
@@ -83,10 +86,10 @@ AsciistreamTest::testIllegalNumbers()
is << "777777777777";
EXPECT_EQUAL(24u, is.size());
uint64_t l(0);
- EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "value is outside of range '777777777777777777777777'");
+ EXPECT_EXCEPTION(is >> l, IllegalArgumentException, "value '777777777777777777777777' is outside of range");
EXPECT_EQUAL(24u, is.size());
int64_t li(0);
- EXPECT_EXCEPTION(is >> li, IllegalArgumentException, "value is outside of range '777777777777777777777777'");
+ EXPECT_EXCEPTION(is >> li, IllegalArgumentException, "value '777777777777777777777777' is outside of range");
EXPECT_EQUAL(24u, is.size());
}
{
@@ -102,13 +105,13 @@ AsciistreamTest::testIllegalNumbers()
asciistream is("7777777777777777777777777777777777777777");
EXPECT_EQUAL(40u, is.size());
float f(0);
- EXPECT_EXCEPTION(is >> f, IllegalArgumentException, "float value is outside of range '7777777777777777777777777777777777777777'");
+ EXPECT_EXCEPTION(is >> f, IllegalArgumentException, "float value '7777777777777777777777777777777777777777' is outside of range");
EXPECT_EQUAL(40u, is.size());
vespalib::string tmp = is.str();
is << "e" << tmp;
EXPECT_EQUAL(81u, is.size());
double d(0);
- EXPECT_EXCEPTION(is >> d, IllegalArgumentException, "double value is outside of range '7777777777777777777777777777777777777777e7777777777777777777777777777777777777777'");
+ EXPECT_EXCEPTION(is >> d, IllegalArgumentException, "double value '7777777777777777777777777777777777777777e7777777777777777777777777777777777777777' is outside of range");
EXPECT_EQUAL(81u, is.size());
}
{
@@ -481,6 +484,68 @@ AsciistreamTest::testDouble() {
VERIFY_DOUBLE_SERIALIZATION(0.0, "0.0", automatic << forcedot, 1);
VERIFY_DOUBLE_SERIALIZATION(0.0, "0.0", automatic << forcedot, 16);
VERIFY_DOUBLE_SERIALIZATION(maxInteger, "9007199254740992.0", automatic << forcedot, 16);
+
+ asciistream as;
+ as.clear();
+ as << (3 * std::numeric_limits<double>::min());
+ double dv = 0;
+ as >> dv;
+ EXPECT_TRUE(dv > 0);
+
+ as.clear();
+ as << (3 * std::numeric_limits<double>::denorm_min());
+ dv = 0;
+ as >> dv;
+ EXPECT_TRUE(dv > 0);
+
+ as.clear();
+ as << "1.0e-325";
+ dv = 42.0;
+ as >> dv;
+ EXPECT_EQUAL(dv, 0.0);
+
+ as.clear();
+ as << "1.0e666";
+ dv = 42.0;
+ EXPECT_EXCEPTION(as >> dv, IllegalArgumentException, "double value '1.0e666' is outside of range.");
+ EXPECT_EQUAL(dv, 42.0);
+}
+
+void
+AsciistreamTest::testFloat() {
+ float f = 0;
+ asciistream as("-5.490412E-39");
+ as >> f;
+ EXPECT_EQUAL(f, -5.490412E-39f);
+
+ as.clear();
+ as << "0.0001E-50";
+ f = 42.0;
+ as >> f;
+ EXPECT_EQUAL(f, 0.0);
+
+ as.clear();
+ as << "123.4E50";
+ f = 42.0;
+ EXPECT_EXCEPTION(as >> f, IllegalArgumentException, "float value '123.4E50' is outside of range.");
+ EXPECT_EQUAL(f, 42.0);
+
+ errno = 0;
+ char *ep;
+ f = locale::c::strtof_au("-5.490412E-39", &ep);
+ EXPECT_EQUAL(f, -5.490412E-39f);
+ EXPECT_EQUAL(errno, 0);
+ EXPECT_EQUAL(*ep, 0);
+
+ f = locale::c::strtof_au("0.0001E-50", &ep);
+ EXPECT_EQUAL(f, 0.0);
+ EXPECT_EQUAL(errno, 0);
+ EXPECT_EQUAL(*ep, 0);
+
+ f = locale::c::strtof_au("123.4E50", &ep);
+ EXPECT_EQUAL(f, HUGE_VALF);
+ EXPECT_EQUAL(errno, ERANGE);
+ EXPECT_EQUAL(*ep, 0);
}
void
@@ -547,6 +612,7 @@ AsciistreamTest::Main()
testGetLine();
testIllegalNumbers();
testDouble();
+ testFloat();
testStateSaver();
TEST_DONE();
}
diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp
index 928d8d6cfcd..7bba2e936f3 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btree.hpp
@@ -4,8 +4,7 @@
#include "btree.h"
-namespace search {
-namespace btree {
+namespace search::btree {
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, class AggrCalcT>
@@ -24,7 +23,4 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree()
_alloc.clearHoldLists();
}
-
-} // namespace search::btree
-} // namespace search
-
+}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeinserter.cpp b/vespalib/src/vespa/vespalib/btree/btreeinserter.cpp
index f307c474f90..c00d2342eed 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeinserter.cpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeinserter.cpp
@@ -6,16 +6,11 @@
#include "btreeinserter.hpp"
#include "btreenode.hpp"
-#include <vespa/log/log.h>
-LOG_SETUP(".searchlib.btree.btreeinserter");
-
namespace search::btree {
template class BTreeInserter<uint32_t, uint32_t, NoAggregated>;
template class BTreeInserter<uint32_t, BTreeNoLeafData, NoAggregated>;
template class BTreeInserter<uint32_t, int32_t, MinMaxAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits,
- MinMaxAggrCalc>;
+ std::less<uint32_t>, BTreeDefaultTraits, MinMaxAggrCalc>;
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeinserter.h b/vespalib/src/vespa/vespalib/btree/btreeinserter.h
index a3fa2916a88..a66a7bc3f92 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeinserter.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeinserter.h
@@ -10,11 +10,7 @@
#include "minmaxaggrcalc.h"
#include "btreeiterator.h"
-namespace search
-{
-
-namespace btree
-{
+namespace search::btree {
template <typename KeyT,
typename DataT,
@@ -32,12 +28,9 @@ public:
TraitsT::INTERNAL_SLOTS,
TraitsT::LEAF_SLOTS,
AggrCalcT> Aggregator;
- typedef BTreeIterator<KeyT, DataT, AggrT,
- CompareT, TraitsT> Iterator;
- typedef BTreeInternalNode<KeyT, AggrT, TraitsT::INTERNAL_SLOTS>
- InternalNodeType;
- typedef BTreeLeafNode<KeyT, DataT, AggrT, TraitsT::LEAF_SLOTS>
- LeafNodeType;
+ typedef BTreeIterator<KeyT, DataT, AggrT, CompareT, TraitsT> Iterator;
+ typedef BTreeInternalNode<KeyT, AggrT, TraitsT::INTERNAL_SLOTS> InternalNodeType;
+ typedef BTreeLeafNode<KeyT, DataT, AggrT, TraitsT::LEAF_SLOTS> LeafNodeType;
typedef KeyT KeyType;
typedef DataT DataType;
typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair;
@@ -49,19 +42,12 @@ private:
public:
static void
- insert(BTreeNode::Ref &root,
- Iterator &itr,
- const KeyType &key, const DataType &data,
- const AggrCalcT &aggrCalc);
+ insert(BTreeNode::Ref &root, Iterator &itr, const KeyType &key, const DataType &data, const AggrCalcT &aggrCalc);
};
extern template class BTreeInserter<uint32_t, uint32_t, NoAggregated>;
extern template class BTreeInserter<uint32_t, BTreeNoLeafData, NoAggregated>;
extern template class BTreeInserter<uint32_t, int32_t, MinMaxAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits,
- MinMaxAggrCalc>;
-
-} // namespace search::btree
-} // namespace search
+ std::less<uint32_t>, BTreeDefaultTraits, MinMaxAggrCalc>;
+}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeinserter.hpp b/vespalib/src/vespa/vespalib/btree/btreeinserter.hpp
index d1da94c1b17..b24874088a2 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeinserter.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeinserter.hpp
@@ -7,8 +7,7 @@
#include "btreeiterator.hpp"
#include <vespa/vespalib/stllike/asciistream.h>
-namespace search {
-namespace btree {
+namespace search::btree {
namespace {
@@ -178,7 +177,4 @@ insert(BTreeNode::Ref &root,
}
}
-
-} // namespace search::btree
-} // namespace search
-
+}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
index b26f249c51b..13d41ef61f3 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.hpp
@@ -9,8 +9,6 @@
namespace search::btree {
-#define STRICT_BTREE_ITERATOR_SEEK
-
template <typename KeyT, typename DataT, typename AggrT,
uint32_t INTERNAL_SLOTS, uint32_t LEAF_SLOTS, uint32_t PATH_SIZE>
BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::
@@ -81,10 +79,7 @@ operator=(const BTreeIteratorBase &other)
template <typename KeyT, typename DataT, typename AggrT,
uint32_t INTERNAL_SLOTS, uint32_t LEAF_SLOTS, uint32_t PATH_SIZE>
-BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::
-~BTreeIteratorBase()
-{
-}
+BTreeIteratorBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS, PATH_SIZE>::~BTreeIteratorBase() = default;
template <typename KeyT, typename DataT, typename AggrT,
uint32_t INTERNAL_SLOTS, uint32_t LEAF_SLOTS, uint32_t PATH_SIZE>
@@ -674,11 +669,7 @@ BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>::
binarySeek(const KeyType & key, CompareT comp)
{
const LeafNodeType *lnode = _leaf.getNode();
- uint32_t lidx = _leaf.getIdx();
-#ifdef STRICT_BTREE_ITERATOR_SEEK
- assert(_leaf.valid() && comp(lnode->getKey(lidx), key));
-#endif
- ++lidx;
+ uint32_t lidx = _leaf.getIdx() + 1;
if (lidx < lnode->validSlots()) {
if (!comp(lnode->getKey(lidx), key)) {
_leaf.setIdx(lidx);
@@ -723,11 +714,7 @@ BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>::
linearSeek(const KeyType & key, CompareT comp)
{
const LeafNodeType *lnode = _leaf.getNode();
- uint32_t lidx = _leaf.getIdx();
-#ifdef STRICT_BTREE_ITERATOR_SEEK
- assert(_leaf.valid() && comp(lnode->getKey(lidx), key));
-#endif
- ++lidx;
+ uint32_t lidx = _leaf.getIdx() + 1;
if (lidx < lnode->validSlots()) {
if (!comp(lnode->getKey(lidx), key)) {
_leaf.setIdx(lidx);
@@ -792,11 +779,7 @@ BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>::
binarySeekPast(const KeyType & key, CompareT comp)
{
const LeafNodeType *lnode = _leaf.getNode();
- uint32_t lidx = _leaf.getIdx();
-#ifdef STRICT_BTREE_ITERATOR_SEEK
- assert(_leaf.valid() && !comp(key, lnode->getKey(lidx)));
-#endif
- ++lidx;
+ uint32_t lidx = _leaf.getIdx() + 1;
if (lidx < lnode->validSlots()) {
if (comp(key, lnode->getKey(lidx))) {
_leaf.setIdx(lidx);
@@ -841,11 +824,8 @@ BTreeConstIterator<KeyT, DataT, AggrT, CompareT, TraitsT>::
linearSeekPast(const KeyType & key, CompareT comp)
{
const LeafNodeType *lnode = _leaf.getNode();
- uint32_t lidx = _leaf.getIdx();
-#ifdef STRICT_BTREE_ITERATOR_SEEK
- assert(_leaf.valid() && !comp(key, lnode->getKey(lidx)));
-#endif
- ++lidx;
+ uint32_t lidx = _leaf.getIdx() + 1;
+
if (lidx < lnode->validSlots()) {
if (comp(key, lnode->getKey(lidx))) {
_leaf.setIdx(lidx);
diff --git a/vespalib/src/vespa/vespalib/btree/btreeremover.cpp b/vespalib/src/vespa/vespalib/btree/btreeremover.cpp
index 2322eebf784..f5ada77fed6 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeremover.cpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeremover.cpp
@@ -11,8 +11,6 @@ namespace search::btree {
template class BTreeRemover<uint32_t, uint32_t, NoAggregated>;
template class BTreeRemover<uint32_t, BTreeNoLeafData, NoAggregated>;
template class BTreeRemover<uint32_t, int32_t, MinMaxAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits,
- MinMaxAggrCalc>;
+ std::less<uint32_t>, BTreeDefaultTraits, MinMaxAggrCalc>;
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeremover.h b/vespalib/src/vespa/vespalib/btree/btreeremover.h
index 87355aa4ce7..bbb825d6299 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeremover.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeremover.h
@@ -10,11 +10,7 @@
#include "minmaxaggrcalc.h"
#include "btreeiterator.h"
-namespace search
-{
-
-namespace btree
-{
+namespace search::btree {
template <typename KeyT,
typename DataT,
@@ -82,23 +78,15 @@ public:
typedef DataT DataType;
typedef typename InternalNodeType::RefPair InternalNodeTypeRefPair;
typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair;
- typedef BTreeIterator<KeyT, DataT, AggrT,
- CompareT, TraitsT> Iterator;
+ typedef BTreeIterator<KeyT, DataT, AggrT, CompareT, TraitsT> Iterator;
static void
- remove(BTreeNode::Ref &root,
- Iterator &itr,
- const AggrCalcT &aggrCalc);
+ remove(BTreeNode::Ref &root, Iterator &itr, const AggrCalcT &aggrCalc);
};
extern template class BTreeRemover<uint32_t, uint32_t, NoAggregated>;
extern template class BTreeRemover<uint32_t, BTreeNoLeafData, NoAggregated>;
-extern template class BTreeRemover<uint32_t, int32_t,
- MinMaxAggregated,
- std::less<uint32_t>,
- BTreeDefaultTraits,
- MinMaxAggrCalc>;
-
-} // namespace search::btree
-} // namespace search
+extern template class BTreeRemover<uint32_t, int32_t, MinMaxAggregated,
+ std::less<uint32_t>, BTreeDefaultTraits, MinMaxAggrCalc>;
+}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeremover.hpp b/vespalib/src/vespa/vespalib/btree/btreeremover.hpp
index c304ea13016..2281fd99f6d 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeremover.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreeremover.hpp
@@ -6,11 +6,7 @@
#include "btreerootbase.hpp"
#include <vespa/vespalib/stllike/asciistream.h>
-namespace search
-{
-
-namespace btree
-{
+namespace search::btree {
template <typename KeyT, typename DataT, typename AggrT, size_t INTERNAL_SLOTS,
size_t LEAF_SLOTS, class AggrCalcT>
@@ -179,7 +175,4 @@ remove(BTreeNode::Ref &root,
++itr;
}
-
-} // namespace search::btree
-} // namespace search
-
+}
diff --git a/vespalib/src/vespa/vespalib/data/databuffer.cpp b/vespalib/src/vespa/vespalib/data/databuffer.cpp
index 9b04724b601..758922aec6d 100644
--- a/vespalib/src/vespa/vespalib/data/databuffer.cpp
+++ b/vespalib/src/vespa/vespalib/data/databuffer.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "databuffer.h"
+#include <algorithm>
namespace vespalib {
diff --git a/vespalib/src/vespa/vespalib/data/slime/json_format.cpp b/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
index 637be8db999..d2f953b38c8 100644
--- a/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
+++ b/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
@@ -434,7 +434,7 @@ JsonDecoder::decodeNumber(Inserter &inserter)
default:
char *endp;
int errorCode = insertNumber(inserter, isLong, value, &endp);
- if (errorCode != 0) {
+ if ((endp == value.c_str()) || (errorCode != 0)) {
std::stringstream ss;
ss << "error inserting number " << value << ". error code: " << errorCode << ". endp - value: " << (endp - value.c_str());
in.fail(ss.str());
@@ -450,11 +450,11 @@ insertNumber(Inserter &inserter, bool isLong, const vespalib::string & value, ch
int errorCode = 0;
errno = 0;
if (isLong) {
- long val = strtol(value.c_str(), endp, 0);
+ long val = strtol(value.c_str(), endp, 10);
errorCode = errno;
inserter.insertLong(val);
} else {
- double val = locale::c::strtod(value.c_str(), endp);
+ double val = locale::c::strtod_au(value.c_str(), endp);
errorCode = errno;
inserter.insertDouble(val);
}
diff --git a/vespalib/src/vespa/vespalib/locale/c.cpp b/vespalib/src/vespa/vespalib/locale/c.cpp
index af228ce55c3..74e7485f158 100644
--- a/vespalib/src/vespa/vespalib/locale/c.cpp
+++ b/vespalib/src/vespa/vespalib/locale/c.cpp
@@ -3,6 +3,7 @@
#include "c.h"
#include "locale.h"
#include <cstdlib>
+#include <errno.h>
namespace vespalib::locale::c {
@@ -20,5 +21,22 @@ float strtof(const char *startp, char **endp) {
return strtof_l(startp, endp, _G_C_Locale.get());
}
+double strtod_au(const char *startp, char **endp) {
+ int was = errno;
+ double v = strtod_l(startp, endp, _G_C_Locale.get());
+ if (errno == ERANGE) {
+ if ((-1.0 < v) && (v < 1.0)) errno = was;
+ }
+ return v;
}
+float strtof_au(const char *startp, char **endp) {
+ int was = errno;
+ float v = strtof_l(startp, endp, _G_C_Locale.get());
+ if (errno == ERANGE) {
+ if ((-1.0 < v) && (v < 1.0)) errno = was;
+ }
+ return v;
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/locale/c.h b/vespalib/src/vespa/vespalib/locale/c.h
index ff7d0c18639..08cf6185e7f 100644
--- a/vespalib/src/vespa/vespalib/locale/c.h
+++ b/vespalib/src/vespa/vespalib/locale/c.h
@@ -7,6 +7,10 @@ namespace vespalib::locale::c {
double strtod(const char *nptr, char **endptr);
float strtof(const char *nptr, char **endptr);
+// allow underflow variants
+double strtod_au(const char *nptr, char **endptr);
+float strtof_au(const char *nptr, char **endptr);
+
inline double atof(const char *nptr) { return strtod(nptr, nullptr); }
}
diff --git a/vespalib/src/vespa/vespalib/stllike/asciistream.cpp b/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
index 30a963c374c..7d585cf1cf6 100644
--- a/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
+++ b/vespalib/src/vespa/vespalib/stllike/asciistream.cpp
@@ -10,6 +10,7 @@
#include <limits>
#include <stdexcept>
#include <cassert>
+#include <math.h>
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.stllike.asciistream");
@@ -154,7 +155,7 @@ void throwInputError(int e, const char * t, const char * buf)
if (e == 0) {
throw IllegalArgumentException("Failed decoding a " + string(t) + " from '" + string(buf) + "'.", VESPA_STRLOC);
} else if (errno == ERANGE) {
- throw IllegalArgumentException(string(t) + " value is outside of range '" + string(buf) + "'.", VESPA_STRLOC);
+ throw IllegalArgumentException(string(t) + " value '" + string(buf) + "' is outside of range.", VESPA_STRLOC);
} else if (errno == EINVAL) {
throw IllegalArgumentException("Illegal " + string(t) + " value '" + string(buf) + "'.", VESPA_STRLOC);
} else {
@@ -171,7 +172,7 @@ int getValue(double & val, const char *buf)
{
char *ebuf;
errno = 0;
- val = locale::c::strtod(buf, &ebuf);
+ val = locale::c::strtod_au(buf, &ebuf);
if ((errno != 0) || (buf == ebuf)) {
throwInputError(errno, "double", buf);
}
@@ -182,7 +183,7 @@ int getValue(float & val, const char *buf)
{
char *ebuf;
errno = 0;
- val = locale::c::strtof(buf, &ebuf);
+ val = locale::c::strtof_au(buf, &ebuf);
if ((errno != 0) || (buf == ebuf)) {
throwInputError(errno, "float", buf);
}
diff --git a/vespalib/src/vespa/vespalib/util/benchmark_timer.h b/vespalib/src/vespa/vespalib/util/benchmark_timer.h
index 8d4147907fc..8e0821d6127 100644
--- a/vespalib/src/vespa/vespalib/util/benchmark_timer.h
+++ b/vespalib/src/vespa/vespalib/util/benchmark_timer.h
@@ -3,6 +3,7 @@
#include <chrono>
#include <functional>
+#include <algorithm>
namespace vespalib {
diff --git a/vespalog/src/test/threads/testthreads.cpp b/vespalog/src/test/threads/testthreads.cpp
index 1723f35e432..802514285b6 100644
--- a/vespalog/src/test/threads/testthreads.cpp
+++ b/vespalog/src/test/threads/testthreads.cpp
@@ -3,6 +3,7 @@
#include <vespa/fastos/time.h>
#include <vespa/fastos/thread.h>
#include <vespa/log/bufferedlogger.h>
+#include <array>
#include <iostream>
#include <thread>
#include <chrono>
diff --git a/vespalog/src/vespa/log/log_message.h b/vespalog/src/vespa/log/log_message.h
index 832b5f6d47d..ac00e4237ac 100644
--- a/vespalog/src/vespa/log/log_message.h
+++ b/vespalog/src/vespa/log/log_message.h
@@ -4,6 +4,7 @@
#include "log.h"
#include <string_view>
+#include <string>
namespace ns_log {