summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--application/abi-spec.json2
-rw-r--r--application/src/main/java/com/yahoo/application/container/Processing.java12
-rw-r--r--application/src/main/java/com/yahoo/application/container/ProcessingBase.java16
-rw-r--r--application/src/main/java/com/yahoo/application/container/Search.java13
-rw-r--r--client/go/cmd/test.go1
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model/.gitignore1
-rw-r--r--config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java9
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java70
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java28
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java2
-rw-r--r--config-model/src/test/configmodel/types/documentmanager.cfg218
-rw-r--r--config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg27
-rw-r--r--config-model/src/test/derived/inheritance/mother/documentmanager.cfg176
-rw-r--r--config-model/src/test/examples/fieldoftypedocument.cfg60
-rw-r--r--[-rwxr-xr-x]config-model/src/test/examples/structresult.cfg40
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg2
-rw-r--r--config-model/src/test/java/helpers/CompareConfigTestHelper.java4
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java4
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterResources.java7
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java27
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java16
-rw-r--r--config/src/vespa/config/helper/configfetcher.cpp6
-rw-r--r--config/src/vespa/config/retriever/simpleconfigurer.cpp4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java22
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java31
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java21
-rwxr-xr-xconfigserver/src/main/sh/start-configserver1
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java75
-rw-r--r--container-core/abi-spec.json42
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java2
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java7
-rw-r--r--container-core/src/main/java/com/yahoo/processing/Response.java37
-rw-r--r--container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java3
-rw-r--r--container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java20
-rw-r--r--container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java31
-rw-r--r--container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java47
-rw-r--r--container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java22
-rw-r--r--container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java25
-rw-r--r--container-core/src/main/java/com/yahoo/processing/response/DataList.java7
-rw-r--r--container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java15
-rw-r--r--container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java24
-rw-r--r--container-core/src/main/java/com/yahoo/processing/response/IncomingData.java21
-rw-r--r--container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java20
-rw-r--r--container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java2
-rw-r--r--container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java8
-rw-r--r--container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java3
-rw-r--r--container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java17
-rw-r--r--container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java9
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java4
-rw-r--r--container-search/abi-spec.json7
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java26
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/Renderer.java13
-rw-r--r--container-search/src/main/java/com/yahoo/search/result/HitGroup.java12
-rw-r--r--container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java2
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java54
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java4
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java22
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java32
-rw-r--r--container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java46
-rw-r--r--default_build_settings.cmake20
-rw-r--r--dist/vespa.spec15
-rw-r--r--document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java282
-rw-r--r--document/src/main/java/com/yahoo/document/annotation/Annotation.java1
-rw-r--r--document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java17
-rw-r--r--document/src/test/document/documentmanager.cfg201
-rw-r--r--document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java8
-rw-r--r--document/src/test/vespaxmlparser/alltypes.cfg101
-rw-r--r--document/src/test/vespaxmlparser/documentmanager.cfg109
-rw-r--r--document/src/tests/data/defaultdocument.cfg94
-rw-r--r--document/src/vespa/document/config/documentmanager.def156
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/tests/instruction/l2_distance/CMakeLists.txt10
-rw-r--r--eval/src/tests/instruction/l2_distance/l2_distance_test.cpp96
-rw-r--r--eval/src/vespa/eval/eval/optimize_tensor_function.cpp7
-rw-r--r--eval/src/vespa/eval/eval/typed_cells.h4
-rw-r--r--eval/src/vespa/eval/instruction/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/instruction/l2_distance.cpp96
-rw-r--r--eval/src/vespa/eval/instruction/l2_distance.h21
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java11
-rw-r--r--jdisc_core/abi-spec.json327
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java1
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java41
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java64
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java144
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java28
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java17
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java49
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json2
-rw-r--r--pom.xml1
-rw-r--r--screwdriver.yaml2
-rw-r--r--searchcommon/src/vespa/searchcommon/attribute/config.h3
-rw-r--r--searchcommon/src/vespa/searchcommon/common/CMakeLists.txt1
-rw-r--r--searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp15
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_test.cpp3
-rw-r--r--searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp27
-rw-r--r--searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h1
-rw-r--r--searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp2
-rw-r--r--searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h10
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp85
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h45
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h11
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/test.h1
-rw-r--r--searchlib/abi-spec.json2
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java24
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java20
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java13
-rw-r--r--searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp2
-rw-r--r--searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp5
-rw-r--r--searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp2
-rw-r--r--searchlib/src/tests/attribute/enumstore/enumstore_test.cpp227
-rw-r--r--searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp6
-rw-r--r--searchlib/src/tests/attribute/posting_store/posting_store_test.cpp5
-rw-r--r--searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp2
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp2
-rw-r--r--searchlib/src/tests/docstore/document_store/document_store_test.cpp1
-rw-r--r--searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp15
-rw-r--r--searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp11
-rw-r--r--searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp12
-rw-r--r--searchlib/src/tests/transactionlog/translogclient_test.cpp47
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_header.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_header.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp159
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.h4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.hpp32
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store.h16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h19
-rw-r--r--searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h15
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp16
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp214
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.h15
-rw-r--r--searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp17
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute.h3
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/compacter.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.h5
-rw-r--r--searchlib/src/vespa/searchlib/docstore/idatastore.h12
-rw-r--r--searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/docstore/idocumentstore.h31
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp92
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.h18
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdocumentstore.h3
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h15
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hamming_distance.h10
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.h4
-rw-r--r--searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h7
-rw-r--r--searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domain.cpp44
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domain.h2
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp65
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domainpart.h10
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp47
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/ichunk.h23
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp5
-rw-r--r--slobrok/src/vespa/slobrok/server/slobrokserver.cpp9
-rw-r--r--staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp34
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp2
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp15
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/singleexecutor.h5
-rwxr-xr-xstandalone-container/src/main/sh/standalone-container.sh1
-rw-r--r--storage/src/tests/distributor/getoperationtest.cpp20
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp32
-rw-r--r--storage/src/tests/persistence/filestorage/operationabortingtest.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/getoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp7
-rw-r--r--vbench/src/apps/vbench/vbench.cpp7
-rw-r--r--vbench/src/tests/dispatcher/dispatcher_test.cpp7
-rw-r--r--vbench/src/tests/handler_thread/handler_thread_test.cpp4
-rw-r--r--vbench/src/vbench/core/handler_thread.h2
-rw-r--r--vbench/src/vbench/core/handler_thread.hpp4
-rw-r--r--vbench/src/vbench/vbench/request_scheduler.cpp10
-rw-r--r--vbench/src/vbench/vbench/vbench.cpp4
-rw-r--r--vbench/src/vbench/vbench/worker.cpp4
-rw-r--r--vespa-feed-client-api/abi-spec.json (renamed from vespa-feed-client/abi-spec.json)120
-rw-r--r--vespa-feed-client-api/pom.xml57
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java)0
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java)0
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java128
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java)0
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java)2
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java)14
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java)0
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java)2
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java)0
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java23
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java)4
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java)3
-rw-r--r--vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java)0
-rw-r--r--vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java)11
-rw-r--r--vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java)0
-rw-r--r--vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java)0
-rw-r--r--vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java)0
-rw-r--r--vespa-feed-client-cli/pom.xml2
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java (renamed from vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java)2
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java (renamed from vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java)9
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh2
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client.sh2
-rw-r--r--vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java (renamed from vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java)3
-rw-r--r--vespa-feed-client/pom.xml11
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java)8
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java)5
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java)6
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java)4
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java)7
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java)94
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java)5
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java)19
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java)2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java)11
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java)5
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java)22
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java)2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java)6
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java (renamed from vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java)4
-rw-r--r--vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder2
-rwxr-xr-xvespa-feed-client/src/main/sh/vespa-version-generator.sh2
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java)8
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java)2
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java)13
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java)16
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java (renamed from vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java)11
-rw-r--r--vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java18
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java49
-rw-r--r--vespalib/src/tests/btree/btree_store/btree_store_test.cpp101
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp5
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp6
-rw-r--r--vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp186
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp6
-rw-r--r--vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp4
-rw-r--r--vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp124
-rw-r--r--vespalib/src/tests/thread/thread_test.cpp8
-rw-r--r--vespalib/src/tests/util/rcuvector/rcuvector_test.cpp16
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.h4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btree.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreeiterator.h6
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenode.h5
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h3
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h3
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.hpp5
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h13
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp53
-rw-r--r--vespalib/src/vespa/vespalib/datastore/CMakeLists.txt2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp49
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_spec.h29
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp37
-rw-r--r--vespalib/src/vespa/vespalib/datastore/compaction_strategy.h (renamed from searchcommon/src/vespa/searchcommon/common/compaction_strategy.h)40
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp28
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h35
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp106
-rw-r--r--vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h21
-rw-r--r--vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp29
-rw-r--r--vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp11
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp12
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h33
-rw-r--r--vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp60
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp66
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/peer_policies.h7
-rw-r--r--vespalib/src/vespa/vespalib/util/rcuvector.h6
-rw-r--r--vespalib/src/vespa/vespalib/util/rcuvector.hpp32
-rw-r--r--vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp19
-rw-r--r--vespalib/src/vespa/vespalib/util/simple_thread_bundle.h10
-rw-r--r--vespalib/src/vespa/vespalib/util/thread.cpp12
-rw-r--r--vespalib/src/vespa/vespalib/util/thread.h6
334 files changed, 4419 insertions, 3016 deletions
diff --git a/application/abi-spec.json b/application/abi-spec.json
index 5c298471b9c..2138f12854c 100644
--- a/application/abi-spec.json
+++ b/application/abi-spec.json
@@ -347,7 +347,7 @@
"public final com.yahoo.processing.Response process(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)",
"protected abstract com.yahoo.processing.Response doProcess(com.yahoo.component.chain.Chain, com.yahoo.processing.Request)",
"public final byte[] processAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request)",
- "protected abstract com.google.common.util.concurrent.ListenableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)",
+ "protected abstract java.util.concurrent.CompletableFuture doProcessAndRender(com.yahoo.component.ComponentSpecification, com.yahoo.processing.Request, com.yahoo.processing.rendering.Renderer, java.io.ByteArrayOutputStream)",
"protected com.yahoo.component.chain.Chain getChain(com.yahoo.component.ComponentSpecification)",
"protected final com.yahoo.processing.rendering.Renderer getRenderer(com.yahoo.component.ComponentSpecification)",
"protected abstract com.yahoo.processing.rendering.Renderer doGetRenderer(com.yahoo.component.ComponentSpecification)"
diff --git a/application/src/main/java/com/yahoo/application/container/Processing.java b/application/src/main/java/com/yahoo/application/container/Processing.java
index 1f96fe2294b..4ca367ea720 100644
--- a/application/src/main/java/com/yahoo/application/container/Processing.java
+++ b/application/src/main/java/com/yahoo/application/container/Processing.java
@@ -2,7 +2,6 @@
package com.yahoo.application.container;
import com.yahoo.api.annotations.Beta;
-import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.chain.Chain;
import com.yahoo.processing.Processor;
@@ -15,6 +14,7 @@ import com.yahoo.processing.rendering.Renderer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
/**
* @author Einar M R Rosenvinge
@@ -41,14 +41,14 @@ public final class Processing extends ProcessingBase<Request, Response, Processo
}
@Override
- protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
- Request request,
- Renderer<Response> renderer,
- ByteArrayOutputStream stream) throws IOException {
+ protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
+ Request request,
+ Renderer<Response> renderer,
+ ByteArrayOutputStream stream) throws IOException {
Execution execution = handler.createExecution(getChain(chainSpec), request);
Response response = execution.process(request);
- return renderer.render(stream, response, execution, request);
+ return renderer.renderResponse(stream, response, execution, request);
}
@Override
diff --git a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java
index 2b4ea822d03..96866b94e29 100644
--- a/application/src/main/java/com/yahoo/application/container/ProcessingBase.java
+++ b/application/src/main/java/com/yahoo/application/container/ProcessingBase.java
@@ -2,20 +2,18 @@
package com.yahoo.application.container;
import com.yahoo.api.annotations.Beta;
-import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.chain.Chain;
import com.yahoo.processing.Processor;
import com.yahoo.processing.Request;
import com.yahoo.processing.Response;
import com.yahoo.processing.execution.chain.ChainRegistry;
-import com.yahoo.processing.rendering.AsynchronousRenderer;
import com.yahoo.processing.rendering.Renderer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
/**
* @author gjoranv
@@ -45,13 +43,13 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R
REQUEST request) throws IOException {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
Renderer<RESPONSE> renderer = getRenderer(rendererSpec);
- ListenableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream);
+ CompletableFuture<Boolean> renderTask = doProcessAndRender(chainSpec, request, renderer, stream);
awaitFuture(renderTask);
return stream.toByteArray();
}
- private void awaitFuture(ListenableFuture<Boolean> renderTask) {
+ private void awaitFuture(CompletableFuture<Boolean> renderTask) {
try {
renderTask.get();
} catch (InterruptedException | ExecutionException e) {
@@ -59,10 +57,10 @@ public abstract class ProcessingBase<REQUEST extends Request, RESPONSE extends R
}
}
- protected abstract ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
- REQUEST request,
- Renderer<RESPONSE> renderer,
- ByteArrayOutputStream stream) throws IOException ;
+ protected abstract CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
+ REQUEST request,
+ Renderer<RESPONSE> renderer,
+ ByteArrayOutputStream stream) throws IOException ;
protected Chain<PROCESSOR> getChain(ComponentSpecification chainSpec) {
Chain<PROCESSOR> chain = getChains().getComponent(chainSpec);
diff --git a/application/src/main/java/com/yahoo/application/container/Search.java b/application/src/main/java/com/yahoo/application/container/Search.java
index 3535b660b78..6a2f728fbcc 100644
--- a/application/src/main/java/com/yahoo/application/container/Search.java
+++ b/application/src/main/java/com/yahoo/application/container/Search.java
@@ -2,7 +2,6 @@
package com.yahoo.application.container;
import com.yahoo.api.annotations.Beta;
-import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.ComponentSpecification;
import com.yahoo.component.chain.Chain;
import com.yahoo.processing.execution.chain.ChainRegistry;
@@ -12,10 +11,10 @@ import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
import com.yahoo.search.handler.HttpSearchResponse;
import com.yahoo.search.handler.SearchHandler;
-import com.yahoo.search.searchchain.SearchChainRegistry;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
/**
* @author Einar M R Rosenvinge
@@ -41,12 +40,12 @@ public final class Search extends ProcessingBase<Query, Result, Searcher> {
}
@Override
- protected ListenableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
- Query request,
- Renderer<Result> renderer,
- ByteArrayOutputStream stream) throws IOException {
+ protected CompletableFuture<Boolean> doProcessAndRender(ComponentSpecification chainSpec,
+ Query request,
+ Renderer<Result> renderer,
+ ByteArrayOutputStream stream) throws IOException {
Result result = process(chainSpec, request);
- return HttpSearchResponse.waitableRender(result, result.getQuery(), renderer, stream);
+ return HttpSearchResponse.asyncRender(result, result.getQuery(), renderer, stream);
}
@Override
diff --git a/client/go/cmd/test.go b/client/go/cmd/test.go
index b8e028ee763..ec445de2b9d 100644
--- a/client/go/cmd/test.go
+++ b/client/go/cmd/test.go
@@ -420,7 +420,6 @@ func validateRelativePath(relPath string) error {
return fmt.Errorf("path must be relative, but was '%s'", relPath)
}
cleanPath := filepath.Clean(relPath)
- fmt.Println(cleanPath)
if strings.HasPrefix(cleanPath, "../../../") {
return fmt.Errorf("path may not point outside src/test/application, but '%s' does", relPath)
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 6ed2f3daa30..3df93f7d08d 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -109,6 +109,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"arnej"}) default boolean ignoreThreadStackSizes() { return false; }
@ModelFeatureFlag(owners = {"vekterli", "geirst"}) default boolean unorderedMergeChaining() { return false; }
@ModelFeatureFlag(owners = {"arnej"}) default boolean useV8GeoPositions() { return false; }
+ @ModelFeatureFlag(owners = {"arnej", "baldersheim"}) default boolean useV8DocManagerCfg() { return false; }
}
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
diff --git a/config-model/.gitignore b/config-model/.gitignore
index b0f358e8113..4cf50da0853 100644
--- a/config-model/.gitignore
+++ b/config-model/.gitignore
@@ -4,3 +4,4 @@
/target
/src/test/integration/*/copy/
/src/test/integration/*/models.generated/
+*.cfg.actual
diff --git a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java
index 118714ca2b1..8848759b415 100644
--- a/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java
+++ b/config-model/src/main/java/com/yahoo/documentmodel/DataTypeRepo.java
@@ -48,9 +48,14 @@ public class DataTypeRepo implements DataTypeCollection {
{
throw new IllegalStateException("Data type '" + type.getName() + "' is not registered.");
}
- typeByName.remove(type.getName());
+ var oldByName = typeByName.remove(type.getName());
+ var oldById = typeById.remove(type.getId());
+ if (oldByName != oldById) {
+ throw new IllegalStateException("Data type '" + type.getName() +
+ "' inconsistent replace, by name: " + oldByName
+ + " but by id: " + oldById);
+ }
typeByName.put(type.getName(), type);
- typeById.remove(type.getId());
typeById.put(type.getId(), type);
return this;
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
index 8809cdeacc8..170753a6ff1 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
@@ -26,6 +26,7 @@ import com.yahoo.vespa.documentmodel.FieldView;
import com.yahoo.vespa.documentmodel.SearchDef;
import com.yahoo.vespa.documentmodel.SearchField;
+import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -184,57 +185,94 @@ public class DocumentModelBuilder {
}
}
}
+
+ // This is how you make a "Pair" class in java....
+ private static class TypeReplacement extends AbstractMap.SimpleEntry<DataType,DataType> {
+ DataType oldType() { return getKey(); }
+ DataType newType() { return getValue(); }
+ public TypeReplacement(DataType oldType, DataType newType) {
+ super(oldType, newType);
+ }
+ }
+
private void addDocumentTypes(List<SDDocumentType> docList) {
LinkedList<NewDocumentType> lst = new LinkedList<>();
for (SDDocumentType doc : docList) {
lst.add(convert(doc));
model.getDocumentManager().add(lst.getLast());
}
+ Set<TypeReplacement> replacements = new HashSet<>();
+ for(NewDocumentType doc : lst) {
+ resolveTemporaries(doc.getAllTypes(), lst, replacements);
+ }
for(NewDocumentType doc : lst) {
- resolveTemporaries(doc.getAllTypes(), lst);
+ for (var entry : replacements) {
+ var old = entry.oldType();
+ if (doc.getDataType(old.getId()) == old) {
+ doc.replace(entry.newType());
+ }
+ }
}
}
- private static void resolveTemporaries(DataTypeCollection dtc, Collection<NewDocumentType> docs) {
+
+ private static void resolveTemporaries(DataTypeCollection dtc,
+ Collection<NewDocumentType> docs,
+ Set<TypeReplacement> replacements)
+ {
for (DataType type : dtc.getTypes()) {
- resolveTemporariesRecurse(type, dtc, docs);
+ resolveTemporariesRecurse(type, dtc, docs, replacements);
}
}
@SuppressWarnings("deprecation")
private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo,
- Collection<NewDocumentType> docs) {
+ Collection<NewDocumentType> docs,
+ Set<TypeReplacement> replacements)
+ {
+ DataType original = type;
if (type instanceof TemporaryStructuredDataType) {
- DataType struct = repo.getDataType(type.getId());
- if (struct != null)
- type = struct;
- else
- type = getDocumentType(docs, type.getId());
- }
- else if (type instanceof StructDataType) {
+ DataType other = repo.getDataType(type.getId());
+ if (other == null || other == type) {
+ other = getDocumentType(docs, type.getId());
+ }
+ // maybe warning if null here?
+ if (other != null) {
+ type = other;
+ }
+ } else if (type instanceof DocumentType || type instanceof NewDocumentType) {
+ DataType other = getDocumentType(docs, type.getId());
+ // maybe warning if null here?
+ if (other != null) {
+ type = other;
+ }
+ } else if (type instanceof StructDataType) {
StructDataType dt = (StructDataType) type;
for (com.yahoo.document.Field field : dt.getFields()) {
if (field.getDataType() != type) {
// XXX deprecated:
- field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs));
+ field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs, replacements));
}
}
}
else if (type instanceof MapDataType) {
MapDataType t = (MapDataType) type;
- t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs));
- t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs));
+ t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs, replacements));
+ t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs, replacements));
}
else if (type instanceof CollectionDataType) {
CollectionDataType t = (CollectionDataType) type;
- t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs));
+ t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs, replacements));
}
else if (type instanceof ReferenceDataType) {
ReferenceDataType t = (ReferenceDataType) type;
if (t.getTargetType() instanceof TemporaryStructuredDataType) {
- DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs);
+ DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs, replacements);
t.setTargetType((StructuredDataType) targetType);
}
}
+ if (type != original) {
+ replacements.add(new TypeReplacement(original, type));
+ }
return type;
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
index e2af71ebbf3..fef7ff56763 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/MapEvaluationTypeContext.java
@@ -17,7 +17,6 @@ import com.yahoo.tensor.TensorType;
import com.yahoo.tensor.evaluation.TypeContext;
import java.util.ArrayDeque;
-import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
@@ -65,7 +64,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
globallyResolvedTypes = new HashMap<>();
}
- private MapEvaluationTypeContext(ImmutableMap<String, ExpressionFunction> functions,
+ private MapEvaluationTypeContext(Map<String, ExpressionFunction> functions,
Map<String, String> bindings,
Optional<MapEvaluationTypeContext> parent,
Map<Reference, TensorType> featureTypes,
@@ -250,7 +249,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
private Optional<ExpressionFunction> functionInvocation(Reference reference) {
if (reference.output() != null) return Optional.empty();
- ExpressionFunction function = functions().get(reference.name());
+ ExpressionFunction function = getFunctions().get(reference.name());
if (function == null) return Optional.empty();
if (function.arguments().size() != reference.arguments().size()) return Optional.empty();
return Optional.of(function);
@@ -348,7 +347,7 @@ public class MapEvaluationTypeContext extends FunctionReferenceContext implement
@Override
public MapEvaluationTypeContext withBindings(Map<String, String> bindings) {
- return new MapEvaluationTypeContext(functions(),
+ return new MapEvaluationTypeContext(getFunctions(),
bindings,
Optional.of(this),
featureTypes,
diff --git a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
index fdbb1d8c8e0..4cfd5c84550 100644
--- a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
+++ b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
@@ -11,8 +11,11 @@ import com.yahoo.documentmodel.NewDocumentType;
import com.yahoo.documentmodel.VespaDocumentType;
import com.yahoo.searchdefinition.document.FieldSet;
import com.yahoo.vespa.documentmodel.DocumentModel;
+import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
/**
@@ -35,7 +38,8 @@ public class DocumentManager {
for(NewDocumentType documentType : model.getDocumentManager().getTypes()) {
buildConfig(documentType, documentConfigBuilder, handled);
buildConfig(documentType.getAnnotations(), documentConfigBuilder);
- if ( documentType != VespaDocumentType.INSTANCE) {
+ if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) {
+ handled.add(documentType);
DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder();
documentConfigBuilder.datatype(dataTypeBuilder);
buildConfig(documentType, dataTypeBuilder);
@@ -46,10 +50,16 @@ public class DocumentManager {
@SuppressWarnings("deprecation")
private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) {
- for (DataType dataType : type.getTypes()) {
+ List<DataType> todo = new ArrayList<>(type.getTypes());
+ Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName())
+ ? a.getId() - b.getId()
+ : a.getName().compareTo(b.getName())));
+ for (DataType dataType : todo) {
if (built.contains(dataType)) continue;
built.add(dataType);
- if (dataType instanceof TemporaryStructuredDataType) continue;
+ if (dataType instanceof TemporaryStructuredDataType) {
+ throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName());
+ }
if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) {
Datatype.Builder dataTypeBuilder = new Datatype.Builder();
documentConfigBuilder.datatype(dataTypeBuilder);
@@ -98,15 +108,7 @@ public class DocumentManager {
keytype(mtype.getKeyType().getId()).
valtype(mtype.getValueType().getId()));
} else if (type instanceof DocumentType) {
- DocumentType dt = (DocumentType) type;
- Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder();
- builder.documenttype(doc);
- doc.
- name(dt.getName()).
- headerstruct(dt.contentStruct().getId());
- for (DocumentType inherited : dt.getInheritedTypes()) {
- doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName()));
- }
+ throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName());
} else if (type instanceof NewDocumentType) {
NewDocumentType dt = (NewDocumentType) type;
Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder();
@@ -120,7 +122,7 @@ public class DocumentManager {
buildConfig(dt.getFieldSets(), doc);
buildImportedFieldsConfig(dt.getImportedFieldNames(), doc);
} else if (type instanceof TemporaryStructuredDataType) {
- //Ignored
+ throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName());
} else if (type instanceof StructDataType) {
StructDataType structType = (StructDataType) type;
Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 973f7f5cc40..12f0f717a19 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -429,6 +429,7 @@ public class VespaMetricSet {
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared");
addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup");
+ addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer");
// jobs
metrics.add(new Metric("content.proton.documentdb.job.total.average"));
@@ -583,6 +584,15 @@ public class VespaMetricSet {
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count"));
metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate"));
+ // feeding
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum"));
+ metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count"));
+
return metrics;
}
@@ -618,6 +628,12 @@ public class VespaMetricSet {
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count"));
metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); // TODO: Remove in Vespa 8
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max"));
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum"));
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count"));
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max"));
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum"));
+ metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum"));
metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
index 10d97cbb58c..d1dc2b84c8a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/QuotaValidator.java
@@ -39,7 +39,7 @@ public class QuotaValidator extends Validator {
var maxSpend = model.allClusters().stream()
.filter(id -> !adminClusterIds(model).contains(id))
.map(id -> model.provisioned().all().getOrDefault(id, zeroCapacity))
- .mapToDouble(c -> c.maxResources().cost())
+ .mapToDouble(c -> c.maxResources().cost()) // TODO: This may be unspecified -> 0
.sum();
var actualSpend = model.allocatedHosts().getHosts().stream()
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java
index 25a570e44a2..14fb903a547 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ClusterSizeReductionValidator.java
@@ -41,7 +41,7 @@ public class ClusterSizeReductionValidator implements ChangeValidator {
int currentSize = current.minResources().nodes();
int nextSize = next.minResources().nodes();
// don't allow more than 50% reduction, but always allow to reduce size with 1
- if ( nextSize < ((double)currentSize) * 0.5 && nextSize != currentSize - 1)
+ if ( nextSize < currentSize * 0.5 && nextSize != currentSize - 1)
overrides.invalid(ValidationId.clusterSizeReduction,
"Size reduction in '" + clusterId.value() + "' is too large: " +
"New min size must be at least 50% of the current min size. " +
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
index 636a3f44369..e85bbba2dca 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
@@ -31,8 +31,7 @@ public class RedundancyOnFirstDeploymentValidator extends Validator {
if ( ! deployState.zone().environment().isProduction()) return;
for (ContentCluster cluster : model.getContentClusters().values()) {
- if (cluster.redundancy().finalRedundancy() == 1
- && cluster.redundancy().totalNodes() > cluster.redundancy().groups())
+ if (cluster.redundancy().finalRedundancy() == 1 && cluster.redundancy().groups() == 1)
deployState.validationOverrides().invalid(ValidationId.redundancyOne,
cluster + " has redundancy 1, which will cause it to lose data " +
"if a node fails. This requires an override on first deployment " +
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 73138d15559..562ccc44a37 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -313,7 +313,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
if (deploymentSpec.isEmpty()) return;
for (var deprecatedElement : deploymentSpec.get().deprecatedElements()) {
- deployLogger.log(WARNING, deprecatedElement.humanReadableString());
+ deployLogger.logApplicationPackage(WARNING, deprecatedElement.humanReadableString());
}
addIdentityProvider(cluster,
diff --git a/config-model/src/test/configmodel/types/documentmanager.cfg b/config-model/src/test/configmodel/types/documentmanager.cfg
index 66002968586..8b93e3a4665 100644
--- a/config-model/src/test/configmodel/types/documentmanager.cfg
+++ b/config-model/src/test/configmodel/types/documentmanager.cfg
@@ -13,117 +13,117 @@ datatype[0].structtype[0].field[0].detailedtype ""
datatype[0].structtype[0].field[1].name "y"
datatype[0].structtype[0].field[1].datatype 0
datatype[0].structtype[0].field[1].detailedtype ""
-datatype[1].id -1865479609
-datatype[1].maptype[0].keytype 2
-datatype[1].maptype[0].valtype 4
-datatype[2].id 294108848
-datatype[2].structtype[0].name "folder"
-datatype[2].structtype[0].version 0
-datatype[2].structtype[0].compresstype NONE
-datatype[2].structtype[0].compresslevel 0
-datatype[2].structtype[0].compressthreshold 95
-datatype[2].structtype[0].compressminsize 800
-datatype[2].structtype[0].field[0].name "Version"
-datatype[2].structtype[0].field[0].datatype 0
-datatype[2].structtype[0].field[0].detailedtype ""
-datatype[2].structtype[0].field[1].name "Name"
-datatype[2].structtype[0].field[1].datatype 2
-datatype[2].structtype[0].field[1].detailedtype ""
-datatype[2].structtype[0].field[2].name "FlagsCounter"
-datatype[2].structtype[0].field[2].datatype -1865479609
-datatype[2].structtype[0].field[2].detailedtype ""
-datatype[2].structtype[0].field[3].name "anotherfolder"
-datatype[2].structtype[0].field[3].datatype 294108848
-datatype[2].structtype[0].field[3].detailedtype ""
-datatype[3].id 109267174
-datatype[3].structtype[0].name "sct"
-datatype[3].structtype[0].version 0
-datatype[3].structtype[0].compresstype NONE
-datatype[3].structtype[0].compresslevel 0
-datatype[3].structtype[0].compressthreshold 95
-datatype[3].structtype[0].compressminsize 800
-datatype[3].structtype[0].field[0].name "s1"
-datatype[3].structtype[0].field[0].datatype 2
-datatype[3].structtype[0].field[0].detailedtype ""
-datatype[3].structtype[0].field[1].name "s2"
-datatype[3].structtype[0].field[1].datatype 2
-datatype[3].structtype[0].field[1].detailedtype ""
-datatype[4].id 49942803
-datatype[4].arraytype[0].datatype 16
-datatype[5].id 339965458
-datatype[5].maptype[0].keytype 2
-datatype[5].maptype[0].valtype 2
-datatype[6].id -2092985853
-datatype[6].structtype[0].name "mystruct"
-datatype[6].structtype[0].version 0
-datatype[6].structtype[0].compresstype NONE
-datatype[6].structtype[0].compresslevel 0
-datatype[6].structtype[0].compressthreshold 95
-datatype[6].structtype[0].compressminsize 800
-datatype[6].structtype[0].field[0].name "bytearr"
-datatype[6].structtype[0].field[0].datatype 49942803
-datatype[6].structtype[0].field[0].detailedtype ""
-datatype[6].structtype[0].field[1].name "mymap"
-datatype[6].structtype[0].field[1].datatype 339965458
-datatype[6].structtype[0].field[1].detailedtype ""
-datatype[6].structtype[0].field[2].name "title"
-datatype[6].structtype[0].field[2].datatype 2
-datatype[6].structtype[0].field[2].detailedtype ""
-datatype[6].structtype[0].field[3].name "structfield"
-datatype[6].structtype[0].field[3].datatype 2
-datatype[6].structtype[0].field[3].detailedtype ""
-datatype[7].id -1245117006
-datatype[7].arraytype[0].datatype 0
-datatype[8].id 1328286588
-datatype[8].weightedsettype[0].datatype 2
-datatype[8].weightedsettype[0].createifnonexistant false
-datatype[8].weightedsettype[0].removeifzero false
-datatype[9].id 2125328771
-datatype[9].weightedsettype[0].datatype 2
-datatype[9].weightedsettype[0].createifnonexistant false
-datatype[9].weightedsettype[0].removeifzero true
-datatype[10].id 2065577986
-datatype[10].weightedsettype[0].datatype 2
-datatype[10].weightedsettype[0].createifnonexistant true
-datatype[10].weightedsettype[0].removeifzero false
-datatype[11].id -1244829667
-datatype[11].arraytype[0].datatype 109267174
-datatype[12].id -1584287606
-datatype[12].maptype[0].keytype 2
-datatype[12].maptype[0].valtype 0
-datatype[13].id 2125154557
-datatype[13].maptype[0].keytype 2
-datatype[13].maptype[0].valtype 1
-datatype[14].id -1715531035
+datatype[1].id -794985308
+datatype[1].arraytype[0].datatype 1707615575
+datatype[2].id 1707615575
+datatype[2].arraytype[0].datatype -1486737430
+datatype[3].id 1416345047
+datatype[3].arraytype[0].datatype -372512406
+datatype[4].id 69621385
+datatype[4].arraytype[0].datatype 339965458
+datatype[5].id 49942803
+datatype[5].arraytype[0].datatype 16
+datatype[6].id -1245117006
+datatype[6].arraytype[0].datatype 0
+datatype[7].id 759956026
+datatype[7].arraytype[0].datatype -2092985853
+datatype[8].id -1244829667
+datatype[8].arraytype[0].datatype 109267174
+datatype[9].id -1486737430
+datatype[9].arraytype[0].datatype 2
+datatype[10].id -372512406
+datatype[10].maptype[0].keytype 0
+datatype[10].maptype[0].valtype 1707615575
+datatype[11].id 2138385264
+datatype[11].maptype[0].keytype 0
+datatype[11].maptype[0].valtype 5
+datatype[12].id -389833101
+datatype[12].maptype[0].keytype 0
+datatype[12].maptype[0].valtype 294108848
+datatype[13].id -1715531035
+datatype[13].maptype[0].keytype 0
+datatype[13].maptype[0].valtype 4
+datatype[14].id 1901258752
datatype[14].maptype[0].keytype 0
-datatype[14].maptype[0].valtype 4
-datatype[15].id 2138385264
-datatype[15].maptype[0].keytype 0
-datatype[15].maptype[0].valtype 5
-datatype[16].id 435886609
+datatype[14].maptype[0].valtype -2092985853
+datatype[15].id 435886609
+datatype[15].maptype[0].keytype 2
+datatype[15].maptype[0].valtype -1245117006
+datatype[16].id 2125154557
datatype[16].maptype[0].keytype 2
-datatype[16].maptype[0].valtype -1245117006
-datatype[17].id -1486737430
-datatype[17].arraytype[0].datatype 2
-datatype[18].id 1707615575
-datatype[18].arraytype[0].datatype -1486737430
-datatype[19].id -794985308
-datatype[19].arraytype[0].datatype 1707615575
-datatype[20].id 69621385
-datatype[20].arraytype[0].datatype 339965458
-datatype[21].id -372512406
-datatype[21].maptype[0].keytype 0
-datatype[21].maptype[0].valtype 1707615575
-datatype[22].id 1416345047
-datatype[22].arraytype[0].datatype -372512406
-datatype[23].id 1901258752
-datatype[23].maptype[0].keytype 0
-datatype[23].maptype[0].valtype -2092985853
-datatype[24].id 759956026
-datatype[24].arraytype[0].datatype -2092985853
-datatype[25].id -389833101
-datatype[25].maptype[0].keytype 0
-datatype[25].maptype[0].valtype 294108848
+datatype[16].maptype[0].valtype 1
+datatype[17].id -1584287606
+datatype[17].maptype[0].keytype 2
+datatype[17].maptype[0].valtype 0
+datatype[18].id -1865479609
+datatype[18].maptype[0].keytype 2
+datatype[18].maptype[0].valtype 4
+datatype[19].id 339965458
+datatype[19].maptype[0].keytype 2
+datatype[19].maptype[0].valtype 2
+datatype[20].id 1328286588
+datatype[20].weightedsettype[0].datatype 2
+datatype[20].weightedsettype[0].createifnonexistant false
+datatype[20].weightedsettype[0].removeifzero false
+datatype[21].id 2065577986
+datatype[21].weightedsettype[0].datatype 2
+datatype[21].weightedsettype[0].createifnonexistant true
+datatype[21].weightedsettype[0].removeifzero false
+datatype[22].id 2125328771
+datatype[22].weightedsettype[0].datatype 2
+datatype[22].weightedsettype[0].createifnonexistant false
+datatype[22].weightedsettype[0].removeifzero true
+datatype[23].id 294108848
+datatype[23].structtype[0].name "folder"
+datatype[23].structtype[0].version 0
+datatype[23].structtype[0].compresstype NONE
+datatype[23].structtype[0].compresslevel 0
+datatype[23].structtype[0].compressthreshold 95
+datatype[23].structtype[0].compressminsize 800
+datatype[23].structtype[0].field[0].name "Version"
+datatype[23].structtype[0].field[0].datatype 0
+datatype[23].structtype[0].field[0].detailedtype ""
+datatype[23].structtype[0].field[1].name "Name"
+datatype[23].structtype[0].field[1].datatype 2
+datatype[23].structtype[0].field[1].detailedtype ""
+datatype[23].structtype[0].field[2].name "FlagsCounter"
+datatype[23].structtype[0].field[2].datatype -1865479609
+datatype[23].structtype[0].field[2].detailedtype ""
+datatype[23].structtype[0].field[3].name "anotherfolder"
+datatype[23].structtype[0].field[3].datatype 294108848
+datatype[23].structtype[0].field[3].detailedtype ""
+datatype[24].id -2092985853
+datatype[24].structtype[0].name "mystruct"
+datatype[24].structtype[0].version 0
+datatype[24].structtype[0].compresstype NONE
+datatype[24].structtype[0].compresslevel 0
+datatype[24].structtype[0].compressthreshold 95
+datatype[24].structtype[0].compressminsize 800
+datatype[24].structtype[0].field[0].name "bytearr"
+datatype[24].structtype[0].field[0].datatype 49942803
+datatype[24].structtype[0].field[0].detailedtype ""
+datatype[24].structtype[0].field[1].name "mymap"
+datatype[24].structtype[0].field[1].datatype 339965458
+datatype[24].structtype[0].field[1].detailedtype ""
+datatype[24].structtype[0].field[2].name "title"
+datatype[24].structtype[0].field[2].datatype 2
+datatype[24].structtype[0].field[2].detailedtype ""
+datatype[24].structtype[0].field[3].name "structfield"
+datatype[24].structtype[0].field[3].datatype 2
+datatype[24].structtype[0].field[3].detailedtype ""
+datatype[25].id 109267174
+datatype[25].structtype[0].name "sct"
+datatype[25].structtype[0].version 0
+datatype[25].structtype[0].compresstype NONE
+datatype[25].structtype[0].compresslevel 0
+datatype[25].structtype[0].compressthreshold 95
+datatype[25].structtype[0].compressminsize 800
+datatype[25].structtype[0].field[0].name "s1"
+datatype[25].structtype[0].field[0].datatype 2
+datatype[25].structtype[0].field[0].detailedtype ""
+datatype[25].structtype[0].field[1].name "s2"
+datatype[25].structtype[0].field[1].datatype 2
+datatype[25].structtype[0].field[1].detailedtype ""
datatype[26].id 1328581348
datatype[26].structtype[0].name "types.header"
datatype[26].structtype[0].version 0
diff --git a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
index 9be843c89aa..61c92eee8d1 100644
--- a/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
+++ b/config-model/src/test/configmodel/types/documenttypes_with_doc_field.cfg
@@ -27,7 +27,7 @@ documenttype[1].version 0
documenttype[1].headerstruct 1328581348
documenttype[1].bodystruct 0
documenttype[1].inherits[0].id 8
-documenttype[1].datatype[0].id -1368624373
+documenttype[1].datatype[0].id 1328581348
documenttype[1].datatype[0].type STRUCT
documenttype[1].datatype[0].array.element.id 0
documenttype[1].datatype[0].map.key.id 0
@@ -36,29 +36,14 @@ documenttype[1].datatype[0].wset.key.id 0
documenttype[1].datatype[0].wset.createifnonexistent false
documenttype[1].datatype[0].wset.removeifzero false
documenttype[1].datatype[0].annotationref.annotation.id 0
-documenttype[1].datatype[0].sstruct.name "other_doc"
+documenttype[1].datatype[0].sstruct.name "types.header"
documenttype[1].datatype[0].sstruct.version 0
documenttype[1].datatype[0].sstruct.compression.type NONE
documenttype[1].datatype[0].sstruct.compression.level 0
documenttype[1].datatype[0].sstruct.compression.threshold 95
documenttype[1].datatype[0].sstruct.compression.minsize 200
-documenttype[1].datatype[1].id 1328581348
-documenttype[1].datatype[1].type STRUCT
-documenttype[1].datatype[1].array.element.id 0
-documenttype[1].datatype[1].map.key.id 0
-documenttype[1].datatype[1].map.value.id 0
-documenttype[1].datatype[1].wset.key.id 0
-documenttype[1].datatype[1].wset.createifnonexistent false
-documenttype[1].datatype[1].wset.removeifzero false
-documenttype[1].datatype[1].annotationref.annotation.id 0
-documenttype[1].datatype[1].sstruct.name "types.header"
-documenttype[1].datatype[1].sstruct.version 0
-documenttype[1].datatype[1].sstruct.compression.type NONE
-documenttype[1].datatype[1].sstruct.compression.level 0
-documenttype[1].datatype[1].sstruct.compression.threshold 95
-documenttype[1].datatype[1].sstruct.compression.minsize 200
-documenttype[1].datatype[1].sstruct.field[0].name "doc_field"
-documenttype[1].datatype[1].sstruct.field[0].id 819293364
-documenttype[1].datatype[1].sstruct.field[0].datatype -1368624373
-documenttype[1].datatype[1].sstruct.field[0].detailedtype ""
+documenttype[1].datatype[0].sstruct.field[0].name "doc_field"
+documenttype[1].datatype[0].sstruct.field[0].id 819293364
+documenttype[1].datatype[0].sstruct.field[0].datatype -1368624373
+documenttype[1].datatype[0].sstruct.field[0].detailedtype ""
documenttype[1].fieldsets{[document]}.fields[0] "doc_field"
diff --git a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg b/config-model/src/test/derived/inheritance/mother/documentmanager.cfg
deleted file mode 100644
index 3cf7eae655d..00000000000
--- a/config-model/src/test/derived/inheritance/mother/documentmanager.cfg
+++ /dev/null
@@ -1,176 +0,0 @@
-enablecompression false
-usev8geopositions false
-datatype[-126593034].id -126593034
-datatype[-126593034].structtype[single].name "child.body"
-datatype[-126593034].structtype[single].version 0
-datatype[-141935690].id -141935690
-datatype[-141935690].structtype[single].name "search_smartsummary"
-datatype[-141935690].structtype[single].version 0
-datatype[-141935690].structtype[single].field[abstract].datatype 2
-datatype[-141935690].structtype[single].field[abstract].name "abstract"
-datatype[-141935690].structtype[single].field[dispurl].datatype 2
-datatype[-141935690].structtype[single].field[dispurl].name "dispurl"
-datatype[-141935690].structtype[single].field[title].datatype 2
-datatype[-141935690].structtype[single].field[title].name "title"
-datatype[-1467672569].id -1467672569
-datatype[-1467672569].structtype[single].name "child_search.body"
-datatype[-1467672569].structtype[single].version 0
-datatype[-154107656].id -154107656
-datatype[-154107656].documenttype[single].bodystruct 978262812
-datatype[-154107656].documenttype[single].headerstruct 990971719
-datatype[-154107656].documenttype[single].name "grandparent"
-datatype[-154107656].documenttype[single].version 0
-datatype[-158393403].id -158393403
-datatype[-158393403].documenttype[single].bodystruct -1989003153
-datatype[-158393403].documenttype[single].headerstruct 1306663898
-datatype[-158393403].documenttype[single].name "mother"
-datatype[-158393403].documenttype[single].version 0
-datatype[-158393403].documenttype[single].inherits[grandparent].name "grandparent"
-datatype[-158393403].documenttype[single].inherits[grandparent].version 0
-datatype[-1740240543].id -1740240543
-datatype[-1740240543].structtype[single].name "search_feature"
-datatype[-1740240543].structtype[single].version 0
-datatype[-1740240543].structtype[single].field[name].datatype 2
-datatype[-1740240543].structtype[single].field[name].name "name"
-datatype[-1740240543].structtype[single].field[value].datatype 5
-datatype[-1740240543].structtype[single].field[value].name "value"
-datatype[-1742340170].id -1742340170
-datatype[-1742340170].structtype[single].name "father.body"
-datatype[-1742340170].structtype[single].version 0
-datatype[-1852215954].id -1852215954
-datatype[-1852215954].structtype[single].name "mother_search.body"
-datatype[-1852215954].structtype[single].version 0
-datatype[-1962244686].id -1962244686
-datatype[-1962244686].structtype[single].name "father_search.header"
-datatype[-1962244686].structtype[single].version 0
-datatype[-1962244686].structtype[single].field[onlyfather].datatype 2
-datatype[-1962244686].structtype[single].field[onlyfather].name "onlyfather"
-datatype[-1962244686].structtype[single].field[onlygrandparent].datatype 0
-datatype[-1962244686].structtype[single].field[onlygrandparent].name "onlygrandparent"
-datatype[-1962244686].structtype[single].field[overridden].datatype 0
-datatype[-1962244686].structtype[single].field[overridden].name "overridden"
-datatype[-1989003153].id -1989003153
-datatype[-1989003153].structtype[single].name "mother.body"
-datatype[-1989003153].structtype[single].version 0
-datatype[-205818510].id -205818510
-datatype[-205818510].structtype[single].name "child_search.header"
-datatype[-205818510].structtype[single].version 0
-datatype[-205818510].structtype[single].field[onlychild].datatype 2
-datatype[-205818510].structtype[single].field[onlychild].name "onlychild"
-datatype[-205818510].structtype[single].field[onlyfather].datatype 2
-datatype[-205818510].structtype[single].field[onlyfather].name "onlyfather"
-datatype[-205818510].structtype[single].field[onlygrandparent].datatype 0
-datatype[-205818510].structtype[single].field[onlygrandparent].name "onlygrandparent"
-datatype[-205818510].structtype[single].field[onlymother].datatype 2
-datatype[-205818510].structtype[single].field[onlymother].name "onlymother"
-datatype[-205818510].structtype[single].field[overridden].datatype 0
-datatype[-205818510].structtype[single].field[overridden].name "overridden"
-datatype[-384824039].id -384824039
-datatype[-384824039].structtype[single].name "mother_search.header"
-datatype[-384824039].structtype[single].version 0
-datatype[-384824039].structtype[single].field[onlygrandparent].datatype 0
-datatype[-384824039].structtype[single].field[onlygrandparent].name "onlygrandparent"
-datatype[-384824039].structtype[single].field[onlymother].datatype 2
-datatype[-384824039].structtype[single].field[onlymother].name "onlymother"
-datatype[-384824039].structtype[single].field[overridden].datatype 0
-datatype[-384824039].structtype[single].field[overridden].name "overridden"
-datatype[-52742073].id -52742073
-datatype[-52742073].structtype[single].name "father_search.body"
-datatype[-52742073].structtype[single].version 0
-datatype[-580592339].id -580592339
-datatype[-580592339].documenttype[single].bodystruct -1467672569
-datatype[-580592339].documenttype[single].headerstruct -205818510
-datatype[-580592339].documenttype[single].name "child_search"
-datatype[-580592339].documenttype[single].version 0
-datatype[-876064862].id -876064862
-datatype[-876064862].structtype[single].name "search_position"
-datatype[-876064862].structtype[single].version 0
-datatype[-876064862].structtype[single].field[x].datatype 0
-datatype[-876064862].structtype[single].field[x].name "x"
-datatype[-876064862].structtype[single].field[y].datatype 0
-datatype[-876064862].structtype[single].field[y].name "y"
-datatype[1306663898].id 1306663898
-datatype[1306663898].structtype[single].name "mother.header"
-datatype[1306663898].structtype[single].version 0
-datatype[1306663898].structtype[single].field[onlymother].datatype 2
-datatype[1306663898].structtype[single].field[onlymother].name "onlymother"
-datatype[1464571117].id 1464571117
-datatype[1464571117].documenttype[single].bodystruct -52742073
-datatype[1464571117].documenttype[single].headerstruct -1962244686
-datatype[1464571117].documenttype[single].name "father_search"
-datatype[1464571117].documenttype[single].version 0
-datatype[147991900].id 147991900
-datatype[147991900].arraytype[single].datatype -1740240543
-datatype[1530060044].id 1530060044
-datatype[1530060044].structtype[single].name "grandparent_search.header"
-datatype[1530060044].structtype[single].version 0
-datatype[1530060044].structtype[single].field[onlygrandparent].datatype 0
-datatype[1530060044].structtype[single].field[onlygrandparent].name "onlygrandparent"
-datatype[1530060044].structtype[single].field[overridden].datatype 0
-datatype[1530060044].structtype[single].field[overridden].name "overridden"
-datatype[1845861921].id 1845861921
-datatype[1845861921].structtype[single].name "grandparent_search.body"
-datatype[1845861921].structtype[single].version 0
-datatype[2126589281].id 2126589281
-datatype[2126589281].structtype[single].name "father.header"
-datatype[2126589281].structtype[single].version 0
-datatype[2126589281].structtype[single].field[onlyfather].datatype 2
-datatype[2126589281].structtype[single].field[onlyfather].name "onlyfather"
-datatype[328953555].id 328953555
-datatype[328953555].documenttype[single].bodystruct 1845861921
-datatype[328953555].documenttype[single].headerstruct 1530060044
-datatype[328953555].documenttype[single].name "grandparent_search"
-datatype[328953555].documenttype[single].version 0
-datatype[464784087].id 464784087
-datatype[464784087].structtype[single].name "search_uri"
-datatype[464784087].structtype[single].version 0
-datatype[464784087].structtype[single].field[all].datatype 2
-datatype[464784087].structtype[single].field[all].name "all"
-datatype[464784087].structtype[single].field[fragment].datatype 2
-datatype[464784087].structtype[single].field[fragment].name "fragment"
-datatype[464784087].structtype[single].field[host].datatype 2
-datatype[464784087].structtype[single].field[host].name "host"
-datatype[464784087].structtype[single].field[path].datatype 2
-datatype[464784087].structtype[single].field[path].name "path"
-datatype[464784087].structtype[single].field[port].datatype 0
-datatype[464784087].structtype[single].field[port].name "port"
-datatype[464784087].structtype[single].field[query].datatype 2
-datatype[464784087].structtype[single].field[query].name "query"
-datatype[464784087].structtype[single].field[scheme].datatype 2
-datatype[464784087].structtype[single].field[scheme].name "scheme"
-datatype[644645734].id 644645734
-datatype[644645734].documenttype[single].bodystruct -1852215954
-datatype[644645734].documenttype[single].headerstruct -384824039
-datatype[644645734].documenttype[single].name "mother_search"
-datatype[644645734].documenttype[single].version 0
-datatype[746267614].id 746267614
-datatype[746267614].documenttype[single].bodystruct -126593034
-datatype[746267614].documenttype[single].headerstruct 81425825
-datatype[746267614].documenttype[single].name "child"
-datatype[746267614].documenttype[single].version 0
-datatype[746267614].documenttype[single].inherits[father].name "father"
-datatype[746267614].documenttype[single].inherits[father].version 0
-datatype[746267614].documenttype[single].inherits[mother].name "mother"
-datatype[746267614].documenttype[single].inherits[mother].version 0
-datatype[81425825].id 81425825
-datatype[81425825].structtype[single].name "child.header"
-datatype[81425825].structtype[single].version 0
-datatype[81425825].structtype[single].field[onlychild].datatype 2
-datatype[81425825].structtype[single].field[onlychild].name "onlychild"
-datatype[978262812].id 978262812
-datatype[978262812].structtype[single].name "grandparent.body"
-datatype[978262812].structtype[single].version 0
-datatype[986686494].id 986686494
-datatype[986686494].documenttype[single].bodystruct -1742340170
-datatype[986686494].documenttype[single].headerstruct 2126589281
-datatype[986686494].documenttype[single].name "father"
-datatype[986686494].documenttype[single].version 0
-datatype[986686494].documenttype[single].inherits[grandparent].name "grandparent"
-datatype[986686494].documenttype[single].inherits[grandparent].version 0
-datatype[990971719].id 990971719
-datatype[990971719].structtype[single].name "grandparent.header"
-datatype[990971719].structtype[single].version 0
-datatype[990971719].structtype[single].field[onlygrandparent].datatype 0
-datatype[990971719].structtype[single].field[onlygrandparent].name "onlygrandparent"
-datatype[990971719].structtype[single].field[overridden].datatype 0
-datatype[990971719].structtype[single].field[overridden].name "overridden"
diff --git a/config-model/src/test/examples/fieldoftypedocument.cfg b/config-model/src/test/examples/fieldoftypedocument.cfg
index 5753ae556a6..82a30012a07 100644
--- a/config-model/src/test/examples/fieldoftypedocument.cfg
+++ b/config-model/src/test/examples/fieldoftypedocument.cfg
@@ -23,37 +23,37 @@ datatype[1].structtype[0].compressminsize 800
datatype[1].structtype[0].field[0].name "soundtrack"
datatype[1].structtype[0].field[0].datatype 1412693671
datatype[1].structtype[0].field[0].detailedtype ""
-datatype[2].id -1383388565
-datatype[2].documenttype[0].name "book"
+datatype[2].id 1412693671
+datatype[2].documenttype[0].name "music"
datatype[2].documenttype[0].version 0
datatype[2].documenttype[0].inherits[0].name "document"
datatype[2].documenttype[0].inherits[0].version 0
-datatype[2].documenttype[0].headerstruct -1344444812
+datatype[2].documenttype[0].headerstruct -1910204744
datatype[2].documenttype[0].bodystruct 0
-datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack"
-datatype[3].id -1910204744
-datatype[3].structtype[0].name "music.header"
-datatype[3].structtype[0].version 0
-datatype[3].structtype[0].compresstype NONE
-datatype[3].structtype[0].compresslevel 0
-datatype[3].structtype[0].compressthreshold 95
-datatype[3].structtype[0].compressminsize 800
-datatype[3].structtype[0].field[0].name "intfield"
-datatype[3].structtype[0].field[0].datatype 0
-datatype[3].structtype[0].field[0].detailedtype ""
-datatype[3].structtype[0].field[1].name "stringfield"
-datatype[3].structtype[0].field[1].datatype 2
-datatype[3].structtype[0].field[1].detailedtype ""
-datatype[3].structtype[0].field[2].name "longfield"
-datatype[3].structtype[0].field[2].datatype 4
-datatype[3].structtype[0].field[2].detailedtype ""
-datatype[4].id 1412693671
-datatype[4].documenttype[0].name "music"
-datatype[4].documenttype[0].version 0
-datatype[4].documenttype[0].inherits[0].name "document"
-datatype[4].documenttype[0].inherits[0].version 0
-datatype[4].documenttype[0].headerstruct -1910204744
-datatype[4].documenttype[0].bodystruct 0
-datatype[4].documenttype[0].fieldsets{[document]}.fields[0] "intfield"
-datatype[4].documenttype[0].fieldsets{[document]}.fields[1] "longfield"
-datatype[4].documenttype[0].fieldsets{[document]}.fields[2] "stringfield"
+datatype[2].documenttype[0].fieldsets{[document]}.fields[0] "intfield"
+datatype[2].documenttype[0].fieldsets{[document]}.fields[1] "longfield"
+datatype[2].documenttype[0].fieldsets{[document]}.fields[2] "stringfield"
+datatype[3].id -1383388565
+datatype[3].documenttype[0].name "book"
+datatype[3].documenttype[0].version 0
+datatype[3].documenttype[0].inherits[0].name "document"
+datatype[3].documenttype[0].inherits[0].version 0
+datatype[3].documenttype[0].headerstruct -1344444812
+datatype[3].documenttype[0].bodystruct 0
+datatype[3].documenttype[0].fieldsets{[document]}.fields[0] "soundtrack"
+datatype[4].id -1910204744
+datatype[4].structtype[0].name "music.header"
+datatype[4].structtype[0].version 0
+datatype[4].structtype[0].compresstype NONE
+datatype[4].structtype[0].compresslevel 0
+datatype[4].structtype[0].compressthreshold 95
+datatype[4].structtype[0].compressminsize 800
+datatype[4].structtype[0].field[0].name "intfield"
+datatype[4].structtype[0].field[0].datatype 0
+datatype[4].structtype[0].field[0].detailedtype ""
+datatype[4].structtype[0].field[1].name "stringfield"
+datatype[4].structtype[0].field[1].datatype 2
+datatype[4].structtype[0].field[1].detailedtype ""
+datatype[4].structtype[0].field[2].name "longfield"
+datatype[4].structtype[0].field[2].datatype 4
+datatype[4].structtype[0].field[2].detailedtype ""
diff --git a/config-model/src/test/examples/structresult.cfg b/config-model/src/test/examples/structresult.cfg
index 639d91c892d..b5b90245858 100755..100644
--- a/config-model/src/test/examples/structresult.cfg
+++ b/config-model/src/test/examples/structresult.cfg
@@ -13,32 +13,32 @@ datatype[0].structtype[0].field[0].detailedtype ""
datatype[0].structtype[0].field[1].name "y"
datatype[0].structtype[0].field[1].datatype 0
datatype[0].structtype[0].field[1].detailedtype ""
-datatype[1].id 93505813
-datatype[1].structtype[0].name "bar"
-datatype[1].structtype[0].version 0
-datatype[1].structtype[0].compresstype NONE
-datatype[1].structtype[0].compresslevel 0
-datatype[1].structtype[0].compressthreshold 95
-datatype[1].structtype[0].compressminsize 800
-datatype[1].structtype[0].field[0].name "humbe"
-datatype[1].structtype[0].field[0].datatype 97614088
-datatype[1].structtype[0].field[0].detailedtype ""
-datatype[2].id 97614088
-datatype[2].structtype[0].name "foo"
+datatype[1].id -1245205573
+datatype[1].arraytype[0].datatype 97614088
+datatype[2].id 93505813
+datatype[2].structtype[0].name "bar"
datatype[2].structtype[0].version 0
datatype[2].structtype[0].compresstype NONE
datatype[2].structtype[0].compresslevel 0
datatype[2].structtype[0].compressthreshold 95
datatype[2].structtype[0].compressminsize 800
-datatype[2].structtype[0].field[0].name "fubar"
-datatype[2].structtype[0].field[0].datatype 0
+datatype[2].structtype[0].field[0].name "humbe"
+datatype[2].structtype[0].field[0].datatype 97614088
datatype[2].structtype[0].field[0].detailedtype ""
-datatype[2].structtype[0].field[1].name "bar"
-datatype[2].structtype[0].field[1].id[0].id 1
-datatype[2].structtype[0].field[1].datatype 2
-datatype[2].structtype[0].field[1].detailedtype ""
-datatype[3].id -1245205573
-datatype[3].arraytype[0].datatype 97614088
+datatype[3].id 97614088
+datatype[3].structtype[0].name "foo"
+datatype[3].structtype[0].version 0
+datatype[3].structtype[0].compresstype NONE
+datatype[3].structtype[0].compresslevel 0
+datatype[3].structtype[0].compressthreshold 95
+datatype[3].structtype[0].compressminsize 800
+datatype[3].structtype[0].field[0].name "fubar"
+datatype[3].structtype[0].field[0].datatype 0
+datatype[3].structtype[0].field[0].detailedtype ""
+datatype[3].structtype[0].field[1].name "bar"
+datatype[3].structtype[0].field[1].id[0].id 1
+datatype[3].structtype[0].field[1].datatype 2
+datatype[3].structtype[0].field[1].detailedtype ""
datatype[4].id -1910204744
datatype[4].structtype[0].name "music.header"
datatype[4].structtype[0].version 0
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java
index a08ec110219..8ece5cd0fe4 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/AbstractSchemaTestCase.java
@@ -13,6 +13,10 @@ import static helpers.CompareConfigTestHelper.assertSerializedConfigFileEquals;
public abstract class AbstractSchemaTestCase {
protected static void assertConfigFile(String filename, String cfg) throws IOException {
+ IOUtils.writeFile(filename + ".actual", cfg, false);
+ if (! cfg.endsWith("\n")) {
+ IOUtils.writeFile(filename + ".actual", "\n", true);
+ }
assertSerializedConfigFileEquals(filename, cfg);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg
index 08b0e6809ce..26aab134699 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/empty.cfg
@@ -1 +1 @@
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg
index fc77c5d82fa..5f48b7b75c2 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/explicit-reference-override.cfg
@@ -11,4 +11,4 @@ queryprofile[1].property[0].overridable ""
queryprofile[1].reference[0].name "a"
queryprofile[1].reference[0].value "a1"
queryprofile[1].reference[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg
index 337623bc448..954a6b8d68a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsbe-query-profiles-simple.cfg
@@ -18,4 +18,4 @@ queryprofile[0].queryprofilevariant[0].fordimensionvalues[2] "sc"
queryprofile[0].queryprofilevariant[0].property[0].name "scthumbnail.sourcecountry"
queryprofile[0].queryprofilevariant[0].property[0].value "uk"
queryprofile[0].queryprofilevariant[0].property[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg
index b3e41d88233..33ef2610d1d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/newsfe-query-profiles-simple.cfg
@@ -27,4 +27,4 @@ queryprofile[1].type ""
queryprofile[1].reference[0].name "source.news"
queryprofile[1].reference[0].value "backend/news"
queryprofile[1].reference[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg
index 8cf8385f397..c10e0b93560 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants-configuration.cfg
@@ -46,4 +46,4 @@ queryprofile[2].type ""
queryprofile[2].property[0].name "a"
queryprofile[2].property[0].value "a1"
queryprofile[2].property[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg
index 0bce407e8ef..2f9879dc721 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profile-variants2-configuration.cfg
@@ -33,8 +33,8 @@ queryprofile[1].queryprofilevariant[0].property[0].value "default"
queryprofile[1].queryprofilevariant[0].property[0].overridable ""
queryprofile[1].queryprofilevariant[0].reference[0].name "model"
queryprofile[1].queryprofilevariant[0].reference[0].value "querylove"
-queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*"
queryprofile[1].queryprofilevariant[0].reference[0].overridable ""
+queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "*"
queryprofile[1].queryprofilevariant[1].fordimensionvalues[1] "default"
queryprofile[1].queryprofilevariant[1].property[0].name "model.defaultIndex"
queryprofile[1].queryprofilevariant[1].property[0].value "default"
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg
index 54997e152f3..18fc48fc7c9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/query-profiles.cfg
@@ -103,4 +103,4 @@ queryprofiletype[2].field[0].type "string"
queryprofiletype[2].field[0].overridable false
queryprofiletype[2].field[0].mandatory false
queryprofiletype[2].field[0].alias ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg
index fdfd1955491..bb125065671 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound-with-reference.cfg
@@ -20,9 +20,9 @@ queryprofile[2].queryprofilevariant[0].property[0].value "a.b.x1"
queryprofile[2].queryprofilevariant[0].property[0].overridable ""
queryprofile[2].queryprofilevariant[0].reference[0].name "a"
queryprofile[2].queryprofilevariant[0].reference[0].value "a2"
-queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2"
queryprofile[2].queryprofilevariant[0].reference[0].overridable ""
+queryprofile[2].queryprofilevariant[1].fordimensionvalues[0] "x2"
queryprofile[2].queryprofilevariant[1].property[0].name "a.b"
queryprofile[2].queryprofilevariant[1].property[0].value "a.b.x2"
queryprofile[2].queryprofilevariant[1].property[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg
index 6f66a3bd441..f867ca9a56b 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/test/variants-of-explicit-compound.cfg
@@ -17,4 +17,4 @@ queryprofile[1].queryprofilevariant[1].fordimensionvalues[0] "x2"
queryprofile[1].queryprofilevariant[1].property[0].name "a.b"
queryprofile[1].queryprofilevariant[1].property[0].value "a.b.x2"
queryprofile[1].queryprofilevariant[1].property[0].overridable ""
-enableGroupingSessionCache true \ No newline at end of file
+enableGroupingSessionCache true
diff --git a/config-model/src/test/java/helpers/CompareConfigTestHelper.java b/config-model/src/test/java/helpers/CompareConfigTestHelper.java
index 18c0723c6a2..ba06ecc9397 100644
--- a/config-model/src/test/java/helpers/CompareConfigTestHelper.java
+++ b/config-model/src/test/java/helpers/CompareConfigTestHelper.java
@@ -19,6 +19,10 @@ import static org.junit.Assert.assertEquals;
public class CompareConfigTestHelper {
public static void assertSerializedConfigFileEquals(String filename, String actual) throws IOException {
+ IOUtils.writeFile(filename + ".actual", actual, false);
+ if (! actual.endsWith("\n")) {
+ IOUtils.writeFile(filename + ".actual", "\n", true);
+ }
assertSerializedConfigEquals(IOUtils.readFile(new File(filename)), actual, false);
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
index 182b924e877..958a37e1432 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
@@ -58,8 +58,8 @@ public final class Capacity {
*/
public NodeType type() { return type; }
- public Capacity withGroups(int groups) {
- return new Capacity(min.withGroups(groups), max.withGroups(groups), required, canFail, type);
+ public Capacity withLimits(ClusterResources min, ClusterResources max) {
+ return new Capacity(min, max, required, canFail, type);
}
@Override
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterResources.java
index 9938823768b..66e03a9f5fa 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterResources.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterResources.java
@@ -48,8 +48,11 @@ public class ClusterResources {
public boolean isWithin(ClusterResources min, ClusterResources max) {
if (this.smallerThan(min)) return false;
if (max.smallerThan(this)) return false;
- if ( ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false;
- if ( ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false;
+ if (min.nodeResources().isUnspecified())
+ if ( ! min.nodeResources().isUnspecified()
+ && ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false;
+ if ( ! max.nodeResources().isUnspecified()
+ && ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false;
return true;
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
index b887a2a93e6..c753c22e3c6 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeResources.java
@@ -17,6 +17,7 @@ public class NodeResources {
private static final double diskUnitCost = 0.0003;
private static final NodeResources zero = new NodeResources(0, 0, 0, 0);
+ private static final NodeResources unspecified = new NodeResources(0, 0, 0, 0);
public enum DiskSpeed {
@@ -125,46 +126,56 @@ public class NodeResources {
}
public NodeResources withVcpu(double vcpu) {
+ ensureSpecified();
if (vcpu == this.vcpu) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
public NodeResources withMemoryGb(double memoryGb) {
+ ensureSpecified();
if (memoryGb == this.memoryGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
public NodeResources withDiskGb(double diskGb) {
+ ensureSpecified();
if (diskGb == this.diskGb) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
public NodeResources withBandwidthGbps(double bandwidthGbps) {
+ ensureSpecified();
if (bandwidthGbps == this.bandwidthGbps) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
public NodeResources with(DiskSpeed diskSpeed) {
+ ensureSpecified();
if (diskSpeed == this.diskSpeed) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
public NodeResources with(StorageType storageType) {
+ ensureSpecified();
if (storageType == this.storageType) return this;
return new NodeResources(vcpu, memoryGb, diskGb, bandwidthGbps, diskSpeed, storageType);
}
/** Returns this with disk speed and storage type set to any */
public NodeResources justNumbers() {
+ if (isUnspecified()) return unspecified();
return with(NodeResources.DiskSpeed.any).with(StorageType.any);
}
/** Returns this with all numbers set to 0 */
public NodeResources justNonNumbers() {
+ if (isUnspecified()) return unspecified();
return withVcpu(0).withMemoryGb(0).withDiskGb(0).withBandwidthGbps(0);
}
public NodeResources subtract(NodeResources other) {
+ ensureSpecified();
+ other.ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu - other.vcpu,
@@ -176,6 +187,7 @@ public class NodeResources {
}
public NodeResources add(NodeResources other) {
+ ensureSpecified();
if ( ! this.isInterchangeableWith(other))
throw new IllegalArgumentException(this + " and " + other + " are not interchangeable");
return new NodeResources(vcpu + other.vcpu,
@@ -187,6 +199,8 @@ public class NodeResources {
}
private boolean isInterchangeableWith(NodeResources other) {
+ ensureSpecified();
+ other.ensureSpecified();
if (this.diskSpeed != DiskSpeed.any && other.diskSpeed != DiskSpeed.any && this.diskSpeed != other.diskSpeed)
return false;
if (this.storageType != StorageType.any && other.storageType != StorageType.any && this.storageType != other.storageType)
@@ -248,6 +262,8 @@ public class NodeResources {
/** Returns true if all the resources of this are the same or larger than the given resources */
public boolean satisfies(NodeResources other) {
+ ensureSpecified();
+ other.ensureSpecified();
if (this.vcpu < other.vcpu) return false;
if (this.memoryGb < other.memoryGb) return false;
if (this.diskGb < other.diskGb) return false;
@@ -266,6 +282,8 @@ public class NodeResources {
/** Returns true if all the resources of this are the same as or compatible with the given resources */
public boolean compatibleWith(NodeResources other) {
+ ensureSpecified();
+ other.ensureSpecified();
if ( ! equal(this.vcpu, other.vcpu)) return false;
if ( ! equal(this.memoryGb, other.memoryGb)) return false;
if ( ! equal(this.diskGb, other.diskGb)) return false;
@@ -276,9 +294,14 @@ public class NodeResources {
return true;
}
- public static NodeResources unspecified() { return zero; }
+ public static NodeResources unspecified() { return unspecified; }
- public boolean isUnspecified() { return this.equals(zero); }
+ public boolean isUnspecified() { return this == unspecified; }
+
+ private void ensureSpecified() {
+ if (isUnspecified())
+ throw new IllegalStateException("Cannot perform this on unspecified resources");
+ }
// Returns squared euclidean distance of the relevant numerical values of two node resources
public double distanceTo(NodeResources other) {
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
index 625f1b5fe17..68570722117 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
@@ -4,8 +4,6 @@ package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.subscription.ConfigSourceSet;
import com.yahoo.jrt.Supervisor;
-import com.yahoo.vespa.config.ConnectionPool;
-import com.yahoo.vespa.config.JRTConnectionPool;
import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.filedistribution.FileDownloader;
@@ -29,9 +27,7 @@ public class FileDistributionAndUrlDownload {
new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup"));
public FileDistributionAndUrlDownload(Supervisor supervisor, ConfigSourceSet source) {
- fileDistributionRpcServer =
- new FileDistributionRpcServer(supervisor,
- new FileDownloader(createConnectionPool(supervisor, source), supervisor, Duration.ofMinutes(5)));
+ fileDistributionRpcServer = new FileDistributionRpcServer(supervisor, createDownloader(supervisor, source));
urlDownloadRpcServer = new UrlDownloadRpcServer(supervisor);
cleanupExecutor.scheduleAtFixedRate(new CachedFilesMaintainer(), delay.toSeconds(), delay.toSeconds(), TimeUnit.SECONDS);
}
@@ -48,12 +44,10 @@ public class FileDistributionAndUrlDownload {
}
}
- private static ConnectionPool createConnectionPool(Supervisor supervisor, ConfigSourceSet source) {
- String useFileDistributionConnectionPool = System.getenv("VESPA_CONFIG_PROXY_USE_FILE_DISTRIBUTION_CONNECTION_POOL");
- if (useFileDistributionConnectionPool != null && useFileDistributionConnectionPool.equalsIgnoreCase("true"))
- return new FileDistributionConnectionPool(source, supervisor);
- else
- return new JRTConnectionPool(source, supervisor);
+ private FileDownloader createDownloader(Supervisor supervisor, ConfigSourceSet source) {
+ return new FileDownloader(new FileDistributionConnectionPool(source, supervisor),
+ supervisor,
+ Duration.ofMinutes(5));
}
}
diff --git a/config/src/vespa/config/helper/configfetcher.cpp b/config/src/vespa/config/helper/configfetcher.cpp
index 7a6f806c6ff..d85308bbcbb 100644
--- a/config/src/vespa/config/helper/configfetcher.cpp
+++ b/config/src/vespa/config/helper/configfetcher.cpp
@@ -9,9 +9,11 @@ LOG_SETUP(".config.helper.configfetcher");
namespace config {
+VESPA_THREAD_STACK_TAG(config_fetcher_thread);
+
ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context)
: _poller(context),
- _thread(std::make_unique<vespalib::Thread>(_poller)),
+ _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)),
_closed(false),
_started(false)
{
@@ -19,7 +21,7 @@ ConfigFetcher::ConfigFetcher(const IConfigContext::SP & context)
ConfigFetcher::ConfigFetcher(const SourceSpec & spec)
: _poller(std::make_shared<ConfigContext>(spec)),
- _thread(std::make_unique<vespalib::Thread>(_poller)),
+ _thread(std::make_unique<vespalib::Thread>(_poller, config_fetcher_thread)),
_closed(false),
_started(false)
{
diff --git a/config/src/vespa/config/retriever/simpleconfigurer.cpp b/config/src/vespa/config/retriever/simpleconfigurer.cpp
index 74022cfd6a3..5059b9997f5 100644
--- a/config/src/vespa/config/retriever/simpleconfigurer.cpp
+++ b/config/src/vespa/config/retriever/simpleconfigurer.cpp
@@ -8,10 +8,12 @@ LOG_SETUP(".config.retriever.simpleconfigurer");
namespace config {
+VESPA_THREAD_STACK_TAG(simple_configurer_thread);
+
SimpleConfigurer::SimpleConfigurer(SimpleConfigRetriever::UP retriever, SimpleConfigurable * const configurable)
: _retriever(std::move(retriever)),
_configurable(configurable),
- _thread(*this),
+ _thread(*this, simple_configurer_thread),
_started(false)
{
assert(_retriever);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 00dc1f4d065..69098ea0030 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -1081,7 +1081,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
public double getQuotaUsageRate(ApplicationId applicationId) {
var application = getApplication(applicationId);
return application.getModel().provisioned().all().values().stream()
- .map(Capacity::maxResources)
+ .map(Capacity::maxResources)// TODO: This may be unspecified -> 0
.mapToDouble(resources -> resources.nodes() * resources.nodeResources().cost())
.sum();
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index a7deac443a5..1b55d17fd36 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -201,6 +201,7 @@ public class ModelContextImpl implements ModelContext {
private final boolean ignoreThreadStackSizes;
private final boolean unorderedMergeChaining;
private final boolean useV8GeoPositions;
+ private final boolean useV8DocManagerCfg;
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT);
@@ -240,7 +241,7 @@ public class ModelContextImpl implements ModelContext {
this.ignoreThreadStackSizes = flagValue(source, appId, Flags.IGNORE_THREAD_STACK_SIZES);
this.unorderedMergeChaining = flagValue(source, appId, Flags.UNORDERED_MERGE_CHAINING);
this.useV8GeoPositions = flagValue(source, appId, Flags.USE_V8_GEO_POSITIONS);
-
+ this.useV8DocManagerCfg = flagValue(source, appId, Flags.USE_V8_DOC_MANAGER_CFG);
}
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@@ -282,6 +283,7 @@ public class ModelContextImpl implements ModelContext {
@Override public boolean ignoreThreadStackSizes() { return ignoreThreadStackSizes; }
@Override public boolean unorderedMergeChaining() { return unorderedMergeChaining; }
@Override public boolean useV8GeoPositions() { return useV8GeoPositions; }
+ @Override public boolean useV8DocManagerCfg() { return useV8DocManagerCfg; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index 5f3999a9fdf..b6a7efd3d4d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Transport;
import com.yahoo.vespa.config.ConnectionPool;
-import com.yahoo.vespa.config.JRTConnectionPool;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.CompressedFileReference;
import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
@@ -22,8 +21,6 @@ import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
-import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.yolean.Exceptions;
import java.io.File;
@@ -40,6 +37,7 @@ import java.util.logging.Logger;
import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.getOtherConfigServersInCluster;
public class FileServer {
+
private static final Logger log = Logger.getLogger(FileServer.class.getName());
private final FileDirectory root;
@@ -77,15 +75,14 @@ public class FileServer {
@SuppressWarnings("WeakerAccess") // Created by dependency injection
@Inject
- public FileServer(ConfigserverConfig configserverConfig, FlagSource flagSource) {
+ public FileServer(ConfigserverConfig configserverConfig) {
this(new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())),
- createFileDownloader(getOtherConfigServersInCluster(configserverConfig),
- Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value()));
+ createFileDownloader(getOtherConfigServersInCluster(configserverConfig)));
}
// For testing only
public FileServer(File rootDir) {
- this(rootDir, createFileDownloader(List.of(), true));
+ this(rootDir, createFileDownloader(List.of()));
}
public FileServer(File rootDir, FileDownloader fileDownloader) {
@@ -212,22 +209,19 @@ public class FileServer {
executor.shutdown();
}
- private static FileDownloader createFileDownloader(List<String> configServers, boolean useFileDistributionConnectionPool) {
+ private static FileDownloader createFileDownloader(List<String> configServers) {
Supervisor supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true);
return new FileDownloader(configServers.isEmpty()
? FileDownloader.emptyConnectionPool()
- : createConnectionPool(configServers, supervisor, useFileDistributionConnectionPool),
+ : createConnectionPool(configServers, supervisor),
supervisor);
}
- private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor, boolean useFileDistributionConnectionPool) {
+ private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor) {
ConfigSourceSet configSourceSet = new ConfigSourceSet(configServers);
-
if (configServers.size() == 0) return FileDownloader.emptyConnectionPool();
- return useFileDistributionConnectionPool
- ? new FileDistributionConnectionPool(configSourceSet, supervisor)
- : new JRTConnectionPool(configSourceSet, supervisor);
+ return new FileDistributionConnectionPool(configSourceSet, supervisor);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java
index 48f23a1f7bd..43ed16ab21c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandler.java
@@ -3,8 +3,6 @@ package com.yahoo.vespa.config.server.http.v1;
import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Deployer;
-import com.yahoo.config.provision.Deployment;
import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.path.Path;
import com.yahoo.restapi.RestApi;
@@ -20,7 +18,6 @@ import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.yolean.Exceptions;
import java.time.Clock;
-import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
@@ -50,18 +47,16 @@ public class RoutingStatusApiHandler extends RestApiRequestHandler<RoutingStatus
private final Curator curator;
private final Clock clock;
- private final Deployer deployer;
@Inject
- public RoutingStatusApiHandler(Context context, Curator curator, Deployer deployer) {
- this(context, curator, Clock.systemUTC(), deployer);
+ public RoutingStatusApiHandler(Context context, Curator curator) {
+ this(context, curator, Clock.systemUTC());
}
- RoutingStatusApiHandler(Context context, Curator curator, Clock clock, Deployer deployer) {
+ RoutingStatusApiHandler(Context context, Curator curator, Clock clock) {
super(context, RoutingStatusApiHandler::createRestApiDefinition);
this.curator = Objects.requireNonNull(curator);
this.clock = Objects.requireNonNull(clock);
- this.deployer = Objects.requireNonNull(deployer);
curator.create(DEPLOYMENT_STATUS_ROOT);
}
@@ -122,24 +117,8 @@ public class RoutingStatusApiHandler extends RestApiRequestHandler<RoutingStatus
log.log(Level.INFO, "Changing routing status of " + instance + " from " +
currentStatus.status() + " to " + wantedStatus.status());
boolean needsChange = currentStatuses.stream().anyMatch(status -> status.status() != wantedStatus.status());
- if (!needsChange) {
- return new SlimeJsonResponse(toSlime(wantedStatus));
- }
- changeStatus(upstreamNames, wantedStatus);
- try {
- Optional<Deployment> deployment = deployer.deployFromLocalActive(instance, Duration.ofMinutes(1));
- if (deployment.isEmpty()) throw new IllegalArgumentException("No deployment of " + instance + " found");
- deployment.get().activate();
- } catch (Exception e) {
- log.log(Level.SEVERE, "Failed to redeploy " + instance + ". Reverting routing status to " +
- currentStatus.status(), e);
- changeStatus(upstreamNames, currentStatus);
- throw new RestApiException.InternalServerError("Failed to change status to " +
- wantedStatus.status() + ", reverting to "
- + currentStatus.status() +
- " because redeployment of " +
- instance + " failed: " +
- Exceptions.toMessageString(e));
+ if (needsChange) {
+ changeStatus(upstreamNames, wantedStatus);
}
return new SlimeJsonResponse(toSlime(wantedStatus));
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index cf082cb32b1..53007566a62 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -8,18 +8,16 @@ import com.yahoo.config.subscription.ConfigSourceSet;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Transport;
import com.yahoo.vespa.config.ConnectionPool;
-import com.yahoo.vespa.config.JRTConnectionPool;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.session.Session;
import com.yahoo.vespa.config.server.session.SessionRepository;
import com.yahoo.vespa.config.server.tenant.Tenant;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
-import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import java.io.File;
import java.time.Duration;
@@ -54,11 +52,7 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
this.applicationRepository = applicationRepository;
this.configserverConfig = applicationRepository.configserverConfig();
this.downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
- boolean useFileDistributionConnectionPool = Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value();
- this.fileDownloader = createFileDownloader(configserverConfig,
- useFileDistributionConnectionPool,
- downloadDirectory,
- supervisor);
+ this.fileDownloader = createFileDownloader(configserverConfig, downloadDirectory, supervisor);
}
@Override
@@ -99,19 +93,14 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
}
private static FileDownloader createFileDownloader(ConfigserverConfig configserverConfig,
- boolean useFileDistributionConnectionPool,
File downloadDirectory,
Supervisor supervisor) {
List<String> otherConfigServersInCluster = getOtherConfigServersInCluster(configserverConfig);
ConfigSourceSet configSourceSet = new ConfigSourceSet(otherConfigServersInCluster);
- ConnectionPool connectionPool;
- if (otherConfigServersInCluster.isEmpty())
- connectionPool = FileDownloader.emptyConnectionPool();
- else
- connectionPool = useFileDistributionConnectionPool
- ? new FileDistributionConnectionPool(configSourceSet, supervisor)
- : new JRTConnectionPool(configSourceSet, supervisor);
+ ConnectionPool connectionPool = (otherConfigServersInCluster.isEmpty())
+ ? FileDownloader.emptyConnectionPool()
+ : new FileDistributionConnectionPool(configSourceSet, supervisor);
return new FileDownloader(connectionPool, supervisor, downloadDirectory, Duration.ofSeconds(30));
}
diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver
index efee86be29f..317af4b2fea 100755
--- a/configserver/src/main/sh/start-configserver
+++ b/configserver/src/main/sh/start-configserver
@@ -177,7 +177,6 @@ vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
- --add-opens=java.base/sun.security.util=ALL-UNNAMED \
-Djava.io.tmpdir=${VESPA_HOME}/tmp \
-Djava.library.path=${VESPA_HOME}/lib64 \
-Djava.awt.headless=true \
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index f85ca37a351..29ec11bad26 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -10,7 +10,6 @@ import com.yahoo.net.HostName;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -118,7 +117,7 @@ public class FileServerTest {
private FileServer createFileServer(ConfigserverConfig.Builder configBuilder) throws IOException {
File fileReferencesDir = temporaryFolder.newFolder();
configBuilder.fileReferencesDir(fileReferencesDir.getAbsolutePath());
- return new FileServer(new ConfigserverConfig(configBuilder), new InMemoryFlagSource());
+ return new FileServer(new ConfigserverConfig(configBuilder));
}
private static class FileReceiver implements FileServer.Receiver {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java
index 8dd7cf4d6fc..e2b45d33cbc 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v1/RoutingStatusApiHandlerTest.java
@@ -2,9 +2,6 @@
package com.yahoo.vespa.config.server.http.v1;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Deployer;
-import com.yahoo.config.provision.Deployment;
-import com.yahoo.config.provision.HostFilter;
import com.yahoo.container.jdisc.HttpRequestBuilder;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.jdisc.http.HttpRequest.Method;
@@ -19,17 +16,11 @@ import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.nio.charset.StandardCharsets;
-import java.time.Clock;
-import java.time.Duration;
import java.time.Instant;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.Optional;
import static com.yahoo.yolean.Exceptions.uncheck;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* @author bjorncs
@@ -42,7 +33,6 @@ public class RoutingStatusApiHandlerTest {
private final Curator curator = new MockCurator();
private final ManualClock clock = new ManualClock();
- private final MockDeployer deployer = new MockDeployer(clock);
private RestApiTestDriver testDriver;
@@ -50,8 +40,7 @@ public class RoutingStatusApiHandlerTest {
public void before() {
RoutingStatusApiHandler requestHandler = new RoutingStatusApiHandler(RestApiTestDriver.createHandlerTestContext(),
curator,
- clock,
- deployer);
+ clock);
testDriver = RestApiTestDriver.newBuilder(requestHandler).build();
}
@@ -77,14 +66,6 @@ public class RoutingStatusApiHandlerTest {
String response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(),
statusOut()));
assertEquals(response("OUT", "issue-XXX", "operator", clock.instant()), response);
- assertTrue("Re-deployed " + instance, deployer.lastDeployed.containsKey(instance));
-
- // Status is reverted if redeployment fails
- deployer.failNextDeployment(true);
- response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreamName + "?application=" + instance.serializedForm(),
- requestContent("IN", "all good")));
- assertEquals("{\"error-code\":\"INTERNAL_SERVER_ERROR\",\"message\":\"Failed to change status to in, reverting to out because redeployment of t1.a1.i1 failed: Deployment failed\"}",
- response);
// Read status stored in old format (path exists, but without content)
curator.set(Path.fromString("/routing/v1/status/" + upstreamName), new byte[0]);
@@ -92,7 +73,6 @@ public class RoutingStatusApiHandlerTest {
assertEquals(response("OUT", "", "", clock.instant()), response);
// Change status of multiple upstreams
- deployer.failNextDeployment(false);
String upstreamName2 = "upstream2";
String upstreams = upstreamName + "," + upstreamName2 + "," + upstreamName2;
response = responseAsString(executeRequest(Method.PUT, "/routing/v1/status/" + upstreams + "?application=" + instance.serializedForm(),
@@ -172,57 +152,4 @@ public class RoutingStatusApiHandlerTest {
return "{\"status\":\"" + status + "\",\"cause\":\"" + reason + "\",\"agent\":\"" + agent + "\",\"lastUpdate\":" + instant.getEpochSecond() + "}";
}
- private static class MockDeployer implements Deployer {
-
- private final Map<ApplicationId, Instant> lastDeployed = new HashMap<>();
- private final Clock clock;
-
- private boolean failNextDeployment = false;
-
- public MockDeployer(Clock clock) {
- this.clock = clock;
- }
-
- public MockDeployer failNextDeployment(boolean fail) {
- this.failNextDeployment = fail;
- return this;
- }
-
- @Override
- public Optional<Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) {
- return deployFromLocalActive(application, Duration.ZERO, false);
- }
-
- @Override
- public Optional<Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) {
- if (failNextDeployment) {
- throw new RuntimeException("Deployment failed");
- }
- return Optional.of(new Deployment() {
- @Override
- public void prepare() {}
-
- @Override
- public long activate() {
- lastDeployed.put(application, clock.instant());
- return 1L;
- }
-
- @Override
- public void restart(HostFilter filter) {}
- });
- }
-
- @Override
- public Optional<Instant> lastDeployTime(ApplicationId application) {
- return Optional.ofNullable(lastDeployed.get(application));
- }
-
- @Override
- public Duration serverDeployTimeout() {
- return Duration.ZERO;
- }
-
- }
-
}
diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json
index 6bee1f2b4fb..ed53543fcbc 100644
--- a/container-core/abi-spec.json
+++ b/container-core/abi-spec.json
@@ -2321,8 +2321,7 @@
"public"
],
"methods": [
- "public void <init>(com.yahoo.jdisc.http.HttpRequest)",
- "public com.yahoo.jdisc.http.HttpRequest getParentRequest()"
+ "public void <init>(com.yahoo.jdisc.http.HttpRequest)"
],
"fields": []
},
@@ -2840,6 +2839,7 @@
"public void <init>(com.yahoo.processing.Request, com.yahoo.processing.request.ErrorMessage)",
"public void mergeWith(com.yahoo.processing.Response)",
"public com.yahoo.processing.response.DataList data()",
+ "public static java.util.concurrent.CompletableFuture recursiveFuture(com.yahoo.processing.response.DataList)",
"public static com.google.common.util.concurrent.ListenableFuture recursiveComplete(com.yahoo.processing.response.DataList)"
],
"fields": []
@@ -3138,8 +3138,9 @@
"public abstract void endResponse()",
"public void <init>()",
"public void <init>(java.util.concurrent.Executor)",
- "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
+ "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"public void deconstruct()",
+ "public final java.util.concurrent.CompletableFuture renderResponseBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"public final com.google.common.util.concurrent.ListenableFuture renderBeforeHandover(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"public com.yahoo.processing.execution.Execution getExecution()",
"public com.yahoo.processing.Response getResponse()",
@@ -3185,7 +3186,8 @@
"public void <init>()",
"public com.yahoo.processing.rendering.Renderer clone()",
"public void init()",
- "public abstract com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
+ "public com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
+ "public java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"public abstract java.lang.String getEncoding()",
"public abstract java.lang.String getMimeType()",
"public bridge synthetic com.yahoo.component.AbstractComponent clone()",
@@ -3389,7 +3391,7 @@
"fields": []
},
"com.yahoo.processing.response.AbstractDataList$DrainOnGetFuture": {
- "superClass": "com.google.common.util.concurrent.AbstractFuture",
+ "superClass": "com.yahoo.processing.impl.ProcessingFuture",
"interfaces": [],
"attributes": [
"public",
@@ -3401,8 +3403,8 @@
"public boolean isCancelled()",
"public com.yahoo.processing.response.DataList get()",
"public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)",
- "public bridge synthetic java.lang.Object get()",
- "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)"
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
],
"fields": []
},
@@ -3424,6 +3426,7 @@
"public com.yahoo.processing.Request request()",
"public com.yahoo.processing.response.IncomingData incoming()",
"public com.google.common.util.concurrent.ListenableFuture complete()",
+ "public java.util.concurrent.CompletableFuture completeFuture()",
"public boolean isOrdered()",
"public boolean isStreamed()",
"public java.lang.String toString()"
@@ -3482,6 +3485,7 @@
"public abstract com.yahoo.processing.response.Data get(int)",
"public abstract java.util.List asList()",
"public abstract com.yahoo.processing.response.IncomingData incoming()",
+ "public abstract java.util.concurrent.CompletableFuture completeFuture()",
"public abstract com.google.common.util.concurrent.ListenableFuture complete()",
"public abstract void addDataListener(java.lang.Runnable)",
"public void close()"
@@ -3502,6 +3506,7 @@
"public final void assignOwner(com.yahoo.processing.response.DataList)",
"public com.yahoo.processing.response.DataList getOwner()",
"public com.google.common.util.concurrent.ListenableFuture completed()",
+ "public java.util.concurrent.CompletableFuture completedFuture()",
"public synchronized boolean isComplete()",
"public synchronized void addLast(com.yahoo.processing.response.Data)",
"public synchronized void add(com.yahoo.processing.response.Data)",
@@ -3515,26 +3520,29 @@
"fields": []
},
"com.yahoo.processing.response.FutureResponse": {
- "superClass": "com.google.common.util.concurrent.ForwardingFuture",
- "interfaces": [],
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.util.concurrent.Future"
+ ],
"attributes": [
"public"
],
"methods": [
"public void <init>(java.util.concurrent.Callable, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
- "public com.google.common.util.concurrent.ListenableFutureTask delegate()",
+ "public java.util.concurrent.FutureTask delegate()",
+ "public boolean cancel(boolean)",
+ "public boolean isCancelled()",
+ "public boolean isDone()",
"public com.yahoo.processing.Response get()",
"public com.yahoo.processing.Response get(long, java.util.concurrent.TimeUnit)",
"public com.yahoo.processing.Request getRequest()",
"public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
- "public bridge synthetic java.lang.Object get()",
- "public bridge synthetic java.util.concurrent.Future delegate()",
- "public bridge synthetic java.lang.Object delegate()"
+ "public bridge synthetic java.lang.Object get()"
],
"fields": []
},
"com.yahoo.processing.response.IncomingData$NullIncomingData$ImmediateFuture": {
- "superClass": "com.google.common.util.concurrent.AbstractFuture",
+ "superClass": "com.yahoo.processing.impl.ProcessingFuture",
"interfaces": [],
"attributes": [
"public"
@@ -3546,8 +3554,8 @@
"public boolean isDone()",
"public com.yahoo.processing.response.DataList get()",
"public com.yahoo.processing.response.DataList get(long, java.util.concurrent.TimeUnit)",
- "public bridge synthetic java.lang.Object get()",
- "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)"
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
],
"fields": []
},
@@ -3563,6 +3571,7 @@
"methods": [
"public void <init>(com.yahoo.processing.response.DataList)",
"public com.google.common.util.concurrent.ListenableFuture completed()",
+ "public java.util.concurrent.CompletableFuture completedFuture()",
"public com.yahoo.processing.response.DataList getOwner()",
"public boolean isComplete()",
"public void addLast(com.yahoo.processing.response.Data)",
@@ -3586,6 +3595,7 @@
],
"methods": [
"public abstract com.yahoo.processing.response.DataList getOwner()",
+ "public abstract java.util.concurrent.CompletableFuture completedFuture()",
"public abstract com.google.common.util.concurrent.ListenableFuture completed()",
"public abstract boolean isComplete()",
"public abstract void addLast(com.yahoo.processing.response.Data)",
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
index 8ac2305f5df..2580b4a6ac0 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
@@ -73,7 +73,7 @@ public class DiscFilterRequest {
*/
@Deprecated(forRemoval = true, since = "7.511")
public HttpRequest getParentRequest() {
- throw new UnsupportedOperationException("getParentRequest is not supported for " + parent.getClass().getName());
+ return parent;
}
/**
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java
index aa4050dd963..74c3b8adc7d 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/filter/JdiscFilterRequest.java
@@ -10,15 +10,8 @@ import com.yahoo.jdisc.http.HttpRequest;
@Deprecated(forRemoval = true, since = "7.511")
public class JdiscFilterRequest extends DiscFilterRequest {
- private final HttpRequest parent;
-
public JdiscFilterRequest(HttpRequest parent) {
super(parent);
- this.parent = parent;
}
- @SuppressWarnings("removal")
- @Override
- public HttpRequest getParentRequest() { return parent; }
-
}
diff --git a/container-core/src/main/java/com/yahoo/processing/Response.java b/container-core/src/main/java/com/yahoo/processing/Response.java
index 0319a36f2f8..cf54d043c5f 100644
--- a/container-core/src/main/java/com/yahoo/processing/Response.java
+++ b/container-core/src/main/java/com/yahoo/processing/Response.java
@@ -1,12 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing;
-import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
import com.yahoo.component.provider.ListenableFreezableClass;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.concurrent.SystemTimer;
import com.yahoo.processing.execution.ResponseReceiver;
+import com.yahoo.processing.impl.ProcessingFuture;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.processing.request.ErrorMessage;
import com.yahoo.processing.response.ArrayDataList;
@@ -15,8 +15,8 @@ import com.yahoo.processing.response.DataList;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -57,7 +57,7 @@ public class Response extends ListenableFreezableClass {
if (freezeListener != null) {
if (freezeListener instanceof ResponseReceiver)
((ResponseReceiver)freezeListener).setResponse(this);
- data.addFreezeListener(freezeListener, MoreExecutors.directExecutor());
+ data.addFreezeListener(freezeListener, Runnable::run);
}
}
@@ -96,15 +96,22 @@ public class Response extends ListenableFreezableClass {
* @param rootDataList the list to complete recursively
* @return the future in which all data in and below this list is complete, as the given root dataList for convenience
*/
- public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) {
- List<ListenableFuture<DataList<D>>> futures = new ArrayList<>();
+ public static <D extends Data> CompletableFuture<DataList<D>> recursiveFuture(DataList<D> rootDataList) {
+ List<CompletableFuture<DataList<D>>> futures = new ArrayList<>();
collectCompletionFutures(rootDataList, futures);
return new CompleteAllOnGetFuture<D>(futures);
}
+ /** @deprecated Use {@link #recursiveFuture(DataList)} instead */
+ @Deprecated(forRemoval = true, since = "7")
+ @SuppressWarnings("removal")
+ public static <D extends Data> ListenableFuture<DataList<D>> recursiveComplete(DataList<D> rootDataList) {
+ return CompletableFutures.toGuavaListenableFuture(recursiveFuture(rootDataList));
+ }
+
@SuppressWarnings("unchecked")
- private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<ListenableFuture<DataList<D>>> futures) {
- futures.add(dataList.complete());
+ private static <D extends Data> void collectCompletionFutures(DataList<D> dataList, List<CompletableFuture<DataList<D>>> futures) {
+ futures.add(dataList.completeFuture());
for (D data : dataList.asList()) {
if (data instanceof DataList)
collectCompletionFutures((DataList<D>) data, futures);
@@ -115,24 +122,24 @@ public class Response extends ListenableFreezableClass {
* A future which on get calls get on all its given futures and sets the value returned from the
* first given future as its result.
*/
- private static class CompleteAllOnGetFuture<D extends Data> extends AbstractFuture<DataList<D>> {
+ private static class CompleteAllOnGetFuture<D extends Data> extends ProcessingFuture<DataList<D>> {
- private final List<ListenableFuture<DataList<D>>> futures;
+ private final List<CompletableFuture<DataList<D>>> futures;
- public CompleteAllOnGetFuture(List<ListenableFuture<DataList<D>>> futures) {
+ public CompleteAllOnGetFuture(List<CompletableFuture<DataList<D>>> futures) {
this.futures = new ArrayList<>(futures);
}
@Override
public DataList<D> get() throws InterruptedException, ExecutionException {
DataList<D> result = null;
- for (ListenableFuture<DataList<D>> future : futures) {
+ for (CompletableFuture<DataList<D>> future : futures) {
if (result == null)
result = future.get();
else
future.get();
}
- set(result);
+ complete(result);
return result;
}
@@ -141,7 +148,7 @@ public class Response extends ListenableFreezableClass {
DataList<D> result = null;
long timeLeft = unit.toMillis(timeout);
long currentCallStart = SystemTimer.INSTANCE.milliTime();
- for (ListenableFuture<DataList<D>> future : futures) {
+ for (CompletableFuture<DataList<D>> future : futures) {
if (result == null)
result = future.get(timeLeft, TimeUnit.MILLISECONDS);
else
@@ -151,7 +158,7 @@ public class Response extends ListenableFreezableClass {
if (timeLeft <= 0) break;
currentCallStart = currentCallEnd;
}
- set(result);
+ complete(result);
return result;
}
diff --git a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java
index 5119e69f72e..9b9224e70ef 100644
--- a/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java
+++ b/container-core/src/main/java/com/yahoo/processing/handler/AbstractProcessingHandler.java
@@ -244,7 +244,8 @@ public abstract class AbstractProcessingHandler<COMPONENT extends Processor> ext
// Render if we have a renderer capable of it
if (getRenderer() instanceof AsynchronousSectionedRenderer) {
- ((AsynchronousSectionedRenderer) getRenderer()).renderBeforeHandover(new ContentChannelOutputStream(channel), response, execution, request);
+ ((AsynchronousSectionedRenderer) getRenderer()).renderResponseBeforeHandover(
+ new ContentChannelOutputStream(channel), response, execution, request);
}
}
diff --git a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java
index 54fbce9e177..28645b4bde0 100644
--- a/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java
+++ b/container-core/src/main/java/com/yahoo/processing/handler/ProcessingResponse.java
@@ -1,19 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.handler;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Collections;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Executor;
-
import com.google.common.collect.ImmutableList;
import com.yahoo.container.jdisc.AsyncHttpResponse;
-import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.VespaHeaders;
-import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
import com.yahoo.processing.Request;
@@ -26,6 +16,14 @@ import com.yahoo.processing.request.ErrorMessage;
import com.yahoo.processing.response.Data;
import com.yahoo.processing.response.DataList;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Collections;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Executor;
+
/**
* A response from running a request through processing. This response is just a
* wrapper of the knowhow needed to render the Response from processing.
@@ -62,7 +60,7 @@ public class ProcessingResponse extends AsyncHttpResponse {
AsynchronousRenderer asyncRenderer = (AsynchronousRenderer)renderer;
asyncRenderer.setNetworkWiring(channel, completionHandler);
}
- renderer.render(stream, processingResponse, execution, processingRequest);
+ renderer.renderResponse(stream, processingResponse, execution, processingRequest);
// the stream is closed in AsynchronousSectionedRenderer, after all data
// has arrived
}
diff --git a/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java
new file mode 100644
index 00000000000..ab597fffaff
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/processing/impl/ProcessingFuture.java
@@ -0,0 +1,31 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.processing.impl;
+
+import com.google.common.util.concurrent.ListenableFuture;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * A {@link CompletableFuture} where {@link #get()}/{@link #get(long, TimeUnit)} may have side-effects (e.g trigger the underlying computation).
+ *
+ * @author bjorncs
+ */
+// TODO Vespa 8 remove ListenableFuture implementation
+public abstract class ProcessingFuture<V> extends CompletableFuture<V> implements ListenableFuture<V> {
+
+ @Override public boolean cancel(boolean mayInterruptIfRunning) { return false; }
+ @Override public boolean isCancelled() { return false; }
+
+ @Override public abstract V get() throws InterruptedException, ExecutionException;
+ @Override public abstract V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException;
+
+ @Override
+ public void addListener(Runnable listener, Executor executor) {
+ whenCompleteAsync((__, ___) -> listener.run(), executor);
+ }
+
+}
diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
index b77d493ea30..bb5fe7a1f76 100644
--- a/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
+++ b/container-core/src/main/java/com/yahoo/processing/rendering/AsynchronousSectionedRenderer.java
@@ -2,12 +2,10 @@
package com.yahoo.processing.rendering;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.concurrent.ThreadFactoryFactory;
import com.yahoo.jdisc.handler.CompletionHandler;
import com.yahoo.jdisc.handler.ContentChannel;
-import java.util.logging.Level;
import com.yahoo.processing.Request;
import com.yahoo.processing.Response;
import com.yahoo.processing.execution.Execution;
@@ -23,12 +21,14 @@ import java.util.ArrayDeque;
import java.util.Collections;
import java.util.Deque;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -126,7 +126,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
return executor;
}
- private SettableFuture<Boolean> success;
+ private CompletableFuture<Boolean> success;
private ContentChannel channel;
private CompletionHandler completionHandler;
@@ -173,8 +173,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
* @return a future indicating whether rendering was successful
*/
@Override
- public final ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response,
- Execution execution, Request request) {
+ public final CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response,
+ Execution execution, Request request) {
if (beforeHandoverMode) { // rendering has already started or is already complete
beforeHandoverMode = false;
if ( ! dataListListenerStack.isEmpty() &&
@@ -215,22 +215,31 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
* At this point the worker thread still owns the Response, so all this rendering must happen
* on the caller thread invoking freeze (that is, on the thread calling this).
*/
- public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response,
- Execution execution, Request request) {
+ public final CompletableFuture<Boolean> renderResponseBeforeHandover(OutputStream stream, RESPONSE response,
+ Execution execution, Request request) {
beforeHandoverMode = true;
if ( ! isInitialized) throw new IllegalStateException("render() invoked before init().");
return startRender(stream, response, execution, request);
}
- private ListenableFuture<Boolean> startRender(OutputStream stream, RESPONSE response,
+
+ /** @deprecated Use {@link #renderResponseBeforeHandover(OutputStream, Response, Execution, Request)} */
+ @Deprecated(forRemoval = true, since = "7")
+ @SuppressWarnings("removal")
+ public final ListenableFuture<Boolean> renderBeforeHandover(OutputStream stream, RESPONSE response,
+ Execution execution, Request request) {
+ return CompletableFutures.toGuavaListenableFuture(renderResponseBeforeHandover(stream, response, execution, request));
+ }
+
+ private CompletableFuture<Boolean> startRender(OutputStream stream, RESPONSE response,
Execution execution, Request request) {
this.response = response;
this.stream = stream;
this.execution = execution;
DataListListener parentOfTopLevelListener = new DataListListener(new ParentOfTopLevel(request,response.data()), null);
dataListListenerStack.addFirst(parentOfTopLevelListener);
- success = SettableFuture.create();
+ success = new CompletableFuture<>();
try {
getExecutor().execute(parentOfTopLevelListener);
} catch (RejectedExecutionException e) {
@@ -247,7 +256,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
* inadvertently work ends up in async data producing threads in some cases.
*/
Executor getExecutor() {
- return beforeHandoverMode ? MoreExecutors.directExecutor() : renderingExecutor;
+ return beforeHandoverMode ? Runnable::run : renderingExecutor;
}
/** For inspection only; use getExecutor() for execution */
Executor getRenderingExecutor() { return renderingExecutor; }
@@ -350,10 +359,10 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
return; // Called on completion of a list which is not frozen yet - hold off until frozen
if ( ! beforeHandoverMode)
- list.complete().get(); // trigger completion if not done already to invoke any listeners on that event
+ list.completeFuture().get(); // trigger completion if not done already to invoke any listeners on that event
boolean startedRendering = renderData();
if ( ! startedRendering || uncompletedChildren > 0) return; // children must render to completion first
- if (list.complete().isDone()) // might not be when in before handover mode
+ if (list.completeFuture().isDone()) // might not be when in before handover mode
endListLevel();
else
stream.flush();
@@ -435,8 +444,8 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
flushIfLikelyToSuspend(subList);
subList.addFreezeListener(listListener, getExecutor());
- subList.complete().addListener(listListener, getExecutor());
- subList.incoming().completed().addListener(listListener, getExecutor());
+ subList.completeFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor());
+ subList.incoming().completedFuture().whenCompleteAsync((__, ___) -> listListener.run(), getExecutor());
}
private boolean isOrdered(DataList dataList) {
@@ -471,11 +480,11 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
logger.log(Level.WARNING, "Exception caught while closing stream to client.", e);
} finally {
if (failed != null) {
- success.setException(failed);
+ success.completeExceptionally(failed);
} else if (closeException != null) {
- success.setException(closeException);
+ success.completeExceptionally(closeException);
} else {
- success.set(true);
+ success.complete(true);
}
if (channel != null) {
channel.close(completionHandler);
@@ -541,7 +550,7 @@ public abstract class AsynchronousSectionedRenderer<RESPONSE extends Response> e
} catch (Exception ignored) {
}
}
- success.setException(e);
+ success.completeExceptionally(e);
}
}
} catch (Error e) {
diff --git a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java
index 14ec3002b0a..8db4ed4f624 100644
--- a/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java
+++ b/container-core/src/main/java/com/yahoo/processing/rendering/Renderer.java
@@ -3,11 +3,13 @@ package com.yahoo.processing.rendering;
import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.AbstractComponent;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.processing.Request;
import com.yahoo.processing.Response;
import com.yahoo.processing.execution.Execution;
import java.io.OutputStream;
+import java.util.concurrent.CompletableFuture;
/**
* Renders a response to a stream. The renderers are cloned just before
@@ -41,6 +43,17 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon
}
/**
+ * @deprecated Use/implement {@link #renderResponse(OutputStream, Response, Execution, Request)} instead.
+ * Return type changed from {@link ListenableFuture} to {@link CompletableFuture}.
+ */
+ @Deprecated(forRemoval = true, since = "7")
+ @SuppressWarnings("removal")
+ public ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response, Execution execution,
+ Request request) {
+ return CompletableFutures.toGuavaListenableFuture(renderResponse(stream, response, execution, request));
+ }
+
+ /**
* Render a response to a stream. The stream also exposes a ByteBuffer API
* for efficient transactions to JDisc. The returned future will throw the
* exception causing failure wrapped in an ExecutionException if rendering
@@ -50,10 +63,13 @@ public abstract class Renderer<RESPONSE extends Response> extends AbstractCompon
* @param response the response to render
* @param execution the execution which created this response
* @param request the request matching the response
- * @return a ListenableFuture containing a boolean where true indicates a successful rendering
+ * @return a {@link CompletableFuture} containing a boolean where true indicates a successful rendering
*/
- public abstract ListenableFuture<Boolean> render(OutputStream stream, RESPONSE response,
- Execution execution, Request request);
+ @SuppressWarnings("removal")
+ public CompletableFuture<Boolean> renderResponse(OutputStream stream, RESPONSE response,
+ Execution execution, Request request) {
+ return CompletableFutures.toCompletableFuture(render(stream, response, execution, request));
+ }
/**
* Name of the output encoding, if applicable.
diff --git a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java
index 4633ac5ec1c..b1ce0643487 100644
--- a/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java
+++ b/container-core/src/main/java/com/yahoo/processing/response/AbstractDataList.java
@@ -1,15 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.response;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ExecutionList;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
import com.yahoo.component.provider.ListenableFreezableClass;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.processing.Request;
+import com.yahoo.processing.impl.ProcessingFuture;
-import java.util.ArrayList;
-import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -34,7 +32,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable
*/
private final IncomingData<DATATYPE> incomingData;
- private final ListenableFuture<DataList<DATATYPE>> completedFuture;
+ private final CompletableFuture<DataList<DATATYPE>> completedFuture;
/**
* Creates a simple data list which does not allow late incoming data
@@ -94,10 +92,15 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable
return incomingData;
}
+ @Override
+ @SuppressWarnings("removal")
+ @Deprecated(forRemoval = true, since = "7")
public ListenableFuture<DataList<DATATYPE>> complete() {
- return completedFuture;
+ return CompletableFutures.toGuavaListenableFuture(completedFuture);
}
+ @Override public CompletableFuture<DataList<DATATYPE>> completeFuture() { return completedFuture; }
+
@Override
public boolean isOrdered() { return ordered; }
@@ -108,7 +111,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable
return super.toString() + (complete().isDone() ? " [completed]" : " [incomplete, " + incoming() + "]");
}
- public static final class DrainOnGetFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> {
+ public static final class DrainOnGetFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> {
private final DataList<DATATYPE> owner;
@@ -137,7 +140,7 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable
*/
@Override
public DataList<DATATYPE> get() throws InterruptedException, ExecutionException {
- return drain(owner.incoming().completed().get());
+ return drain(owner.incoming().completedFuture().get());
}
/**
@@ -146,13 +149,13 @@ public abstract class AbstractDataList<DATATYPE extends Data> extends Listenable
*/
@Override
public DataList<DATATYPE> get(long timeout, TimeUnit timeUnit) throws InterruptedException, ExecutionException, TimeoutException {
- return drain(owner.incoming().completed().get(timeout, timeUnit));
+ return drain(owner.incoming().completedFuture().get(timeout, timeUnit));
}
private DataList<DATATYPE> drain(DataList<DATATYPE> dataList) {
for (DATATYPE item : dataList.incoming().drain())
dataList.add(item);
- set(dataList); // Signal completion to listeners
+ complete(dataList); // Signal completion to listeners
return dataList;
}
diff --git a/container-core/src/main/java/com/yahoo/processing/response/DataList.java b/container-core/src/main/java/com/yahoo/processing/response/DataList.java
index d566e201375..dbda8983f12 100644
--- a/container-core/src/main/java/com/yahoo/processing/response/DataList.java
+++ b/container-core/src/main/java/com/yahoo/processing/response/DataList.java
@@ -1,11 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.response;
-import com.google.common.util.concurrent.ExecutionList;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
-import java.util.concurrent.Executor;
+import java.util.concurrent.CompletableFuture;
/**
* A list of data items created due to a processing request.
@@ -73,6 +72,10 @@ public interface DataList<DATATYPE extends Data> extends Data {
* Making this call on a list which does not support future data always returns immediately and
* causes no memory synchronization cost.
*/
+ CompletableFuture<DataList<DATATYPE>> completeFuture();
+
+ /** @deprecated Use {@link #completeFuture()} instead */
+ @Deprecated(forRemoval = true, since = "7")
ListenableFuture<DataList<DATATYPE>> complete();
/**
diff --git a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java
index 619e554f45c..813d6ac54d8 100644
--- a/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java
+++ b/container-core/src/main/java/com/yahoo/processing/response/DefaultIncomingData.java
@@ -2,12 +2,13 @@
package com.yahoo.processing.response;
import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
import com.yahoo.collections.Tuple2;
+import com.yahoo.concurrent.CompletableFutures;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
/**
@@ -19,7 +20,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData<
private DataList<DATATYPE> owner = null;
- private final SettableFuture<DataList<DATATYPE>> completionFuture;
+ private final CompletableFuture<DataList<DATATYPE>> completionFuture;
private final List<DATATYPE> dataList = new ArrayList<>();
@@ -35,7 +36,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData<
public DefaultIncomingData(DataList<DATATYPE> owner) {
assignOwner(owner);
- completionFuture = SettableFuture.create();
+ completionFuture = new CompletableFuture<>();
}
/** Assigns the owner of this. Throws an exception if the owner is already set. */
@@ -50,10 +51,14 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData<
}
@Override
+ @Deprecated(forRemoval = true, since = "7")
+ @SuppressWarnings("removal")
public ListenableFuture<DataList<DATATYPE>> completed() {
- return completionFuture;
+ return CompletableFutures.toGuavaListenableFuture(completionFuture);
}
+ @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; }
+
/** Returns whether the data in this is complete */
@Override
public synchronized boolean isComplete() {
@@ -92,7 +97,7 @@ public class DefaultIncomingData<DATATYPE extends Data> implements IncomingData<
@Override
public synchronized void markComplete() {
complete = true;
- completionFuture.set(owner);
+ completionFuture.complete(owner);
}
/**
diff --git a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java
index d589b7dd195..25c230e383f 100644
--- a/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java
+++ b/container-core/src/main/java/com/yahoo/processing/response/FutureResponse.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.response;
-import com.google.common.util.concurrent.ForwardingFuture;
-import com.google.common.util.concurrent.ListenableFutureTask;
import com.yahoo.processing.Request;
import com.yahoo.processing.Response;
import com.yahoo.processing.execution.Execution;
@@ -10,6 +8,8 @@ import com.yahoo.processing.request.ErrorMessage;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
@@ -20,9 +20,10 @@ import java.util.logging.Logger;
*
* @author bratseth
*/
-public class FutureResponse extends ForwardingFuture<Response> {
+public class FutureResponse implements Future<Response> {
private final Request request;
+ private final FutureTask<Response> task;
/**
* Only used for generating messages
@@ -31,24 +32,23 @@ public class FutureResponse extends ForwardingFuture<Response> {
private final static Logger log = Logger.getLogger(FutureResponse.class.getName());
- private final ListenableFutureTask<Response> futureTask;
-
public FutureResponse(final Callable<Response> callable, Execution execution, final Request request) {
- this.futureTask = ListenableFutureTask.create(callable);
+ this.task = new FutureTask<>(callable);
this.request = request;
this.execution = execution;
}
- @Override
- public ListenableFutureTask<Response> delegate() {
- return futureTask;
- }
+ public FutureTask<Response> delegate() { return task; }
+
+ @Override public boolean cancel(boolean mayInterruptIfRunning) { return task.cancel(mayInterruptIfRunning); }
+ @Override public boolean isCancelled() { return task.isCancelled(); }
+ @Override public boolean isDone() { return task.isDone(); }
public
@Override
Response get() {
try {
- return super.get();
+ return task.get();
} catch (InterruptedException e) {
return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e));
} catch (ExecutionException e) {
@@ -61,7 +61,7 @@ public class FutureResponse extends ForwardingFuture<Response> {
@Override
Response get(long timeout, TimeUnit timeunit) {
try {
- return super.get(timeout, timeunit);
+ return task.get(timeout, timeunit);
} catch (InterruptedException e) {
return new Response(request, new ErrorMessage("'" + execution + "' was interrupted", e));
} catch (ExecutionException e) {
diff --git a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java
index 371c1bca45f..54ba0fa8031 100644
--- a/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java
+++ b/container-core/src/main/java/com/yahoo/processing/response/IncomingData.java
@@ -1,11 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.response;
-import com.google.common.util.concurrent.AbstractFuture;
import com.google.common.util.concurrent.ListenableFuture;
+import com.yahoo.concurrent.CompletableFutures;
+import com.yahoo.processing.impl.ProcessingFuture;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
@@ -35,6 +37,10 @@ public interface IncomingData<DATATYPE extends Data> {
* <p>
* This return the list owning this for convenience.
*/
+ CompletableFuture<DataList<DATATYPE>> completedFuture();
+
+ /** @deprecated Use {@link #completedFuture()} instead */
+ @Deprecated(forRemoval = true, since = "7")
ListenableFuture<DataList<DATATYPE>> completed();
/**
@@ -108,10 +114,15 @@ public interface IncomingData<DATATYPE extends Data> {
completionFuture = new ImmediateFuture<>(owner);
}
+ @Override
+ @SuppressWarnings("removal")
+ @Deprecated(forRemoval = true, since = "7")
public ListenableFuture<DataList<DATATYPE>> completed() {
- return completionFuture;
+ return CompletableFutures.toGuavaListenableFuture(completionFuture);
}
+ @Override public CompletableFuture<DataList<DATATYPE>> completedFuture() { return completionFuture; }
+
@Override
public DataList<DATATYPE> getOwner() {
return owner;
@@ -178,13 +189,13 @@ public interface IncomingData<DATATYPE extends Data> {
* This is semantically the same as Futures.immediateFuture but contrary to it,
* this never causes any memory synchronization when accessed.
*/
- public static class ImmediateFuture<DATATYPE extends Data> extends AbstractFuture<DataList<DATATYPE>> {
+ public static class ImmediateFuture<DATATYPE extends Data> extends ProcessingFuture<DataList<DATATYPE>> {
- private DataList<DATATYPE> owner;
+ private final DataList<DATATYPE> owner;
public ImmediateFuture(DataList<DATATYPE> owner) {
this.owner = owner; // keep here to avoid memory synchronization for access
- set(owner); // Signal completion (for future listeners)
+ complete(owner); // Signal completion (for future listeners)
}
@Override
diff --git a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
index aebbc3f538d..ee8dbd8dccb 100644
--- a/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
+++ b/container-core/src/main/java/com/yahoo/processing/test/ProcessorLibrary.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.processing.test;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
import com.yahoo.component.chain.Chain;
import com.yahoo.processing.Processor;
import com.yahoo.processing.Request;
@@ -15,6 +13,7 @@ import com.yahoo.processing.request.ErrorMessage;
import com.yahoo.processing.response.*;
import java.util.*;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
/**
@@ -288,7 +287,7 @@ public class ProcessorLibrary {
private final boolean ordered, streamed;
/** The incoming data this has created */
- public final SettableFuture<IncomingData> incomingData = SettableFuture.create();
+ public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>();
/** Create an instance which returns ordered, streamable data */
public ListenableFutureDataSource() { this(true, true); }
@@ -307,7 +306,7 @@ public class ProcessorLibrary {
dataList = ArrayDataList.createAsyncNonstreamed(request);
else
dataList = ArrayDataList.createAsync(request);
- incomingData.set(dataList.incoming());
+ incomingData.complete(dataList.incoming());
return new Response(dataList);
}
@@ -317,12 +316,12 @@ public class ProcessorLibrary {
public static class RequestCounter extends Processor {
/** The incoming data this has created */
- public final SettableFuture<IncomingData> incomingData = SettableFuture.create();
+ public final CompletableFuture<IncomingData> incomingData = new CompletableFuture<>();
@Override
public Response process(Request request, Execution execution) {
ArrayDataList dataList = ArrayDataList.createAsync(request);
- incomingData.set(dataList.incoming());
+ incomingData.complete(dataList.incoming());
return new Response(dataList);
}
@@ -354,7 +353,7 @@ public class ProcessorLibrary {
// wait for other executions and merge the responses
for (Response additionalResponse : AsyncExecution.waitForAll(futures, 1000)) {
- additionalResponse.data().complete().get(); // block until we have all the data elements
+ additionalResponse.data().completeFuture().get(); // block until we have all the data elements
for (Object item : additionalResponse.data().asList())
response.data().add((Data) item);
response.mergeWith(additionalResponse);
@@ -382,9 +381,10 @@ public class ProcessorLibrary {
public Response process(Request request, Execution execution) {
Response response = execution.process(request);
// TODO: Consider for to best provide helpers for this
- response.data().complete().addListener(new RunnableExecution(request,
- new ExecutionWithResponse(asyncChain, response, execution)),
- MoreExecutors.directExecutor());
+ response.data().completeFuture().whenComplete(
+ (__, ___) ->
+ new RunnableExecution(request, new ExecutionWithResponse(asyncChain, response, execution))
+ .run());
return response;
}
diff --git a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java
index 0f16aed3d0b..efcf608b6f0 100644
--- a/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java
+++ b/container-core/src/test/java/com/yahoo/processing/ResponseTestCase.java
@@ -22,7 +22,7 @@ public class ResponseTestCase {
* Check the recursive toString printing along the way.
* List variable names ends by numbers specifying the index of the list at each level.
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testRecursiveCompletionAndToString() throws InterruptedException, ExecutionException {
// create lists
diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java
index 40e7384c745..2fb32271419 100644
--- a/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java
+++ b/container-core/src/test/java/com/yahoo/processing/execution/test/FutureDataTestCase.java
@@ -25,7 +25,7 @@ import static org.junit.Assert.assertEquals;
public class FutureDataTestCase {
/** Run a chain which ends in a processor which returns a response containing future data. */
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testFutureDataPassThrough() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
@@ -52,7 +52,7 @@ public class FutureDataTestCase {
}
/** Federate to one source which returns data immediately and one who return future data */
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testFederateSyncAndAsyncData() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
@@ -88,7 +88,7 @@ public class FutureDataTestCase {
}
/** Register a chain which will be called when some async data is available */
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testAsyncDataProcessing() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
@@ -120,7 +120,7 @@ public class FutureDataTestCase {
* When the first of the futures are done one additional chain is to be run.
* When both are done another chain is to be run.
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testAsyncDataProcessingOfFederatedResult() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
diff --git a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java
index 1ebf01c5f33..bd1307ff77c 100644
--- a/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java
+++ b/container-core/src/test/java/com/yahoo/processing/execution/test/StreamingTestCase.java
@@ -13,7 +13,6 @@ import com.yahoo.processing.test.ProcessorLibrary;
import org.junit.Test;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -27,7 +26,7 @@ import static org.junit.Assert.assertEquals;
public class StreamingTestCase {
/** Tests adding a chain which is called every time new data is added to a data list */
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testStreamingData() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
diff --git a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java
index ce2b54ba6ff..627081e0d3b 100644
--- a/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java
+++ b/container-core/src/test/java/com/yahoo/processing/rendering/AsynchronousSectionedRendererTest.java
@@ -15,7 +15,6 @@ import com.yahoo.processing.response.DataList;
import com.yahoo.processing.response.IncomingData;
import com.yahoo.text.Utf8;
import org.junit.Test;
-import static org.junit.Assert.*;
import java.io.IOException;
import java.io.OutputStream;
@@ -23,10 +22,16 @@ import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.*;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
@@ -222,7 +227,7 @@ public class AsynchronousSectionedRendererTest {
return render(renderer, data);
}
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
public String render(Renderer renderer, DataList data) throws InterruptedException, IOException {
TestContentChannel contentChannel = new TestContentChannel();
@@ -403,6 +408,7 @@ public class AsynchronousSectionedRendererTest {
}
@Override
+ @SuppressWarnings("removal")
public ListenableFuture<DataList<StringData>> complete() {
return new ListenableFuture<DataList<StringData>>() {
@Override
@@ -438,6 +444,11 @@ public class AsynchronousSectionedRendererTest {
}
@Override
+ public CompletableFuture<DataList<StringData>> completeFuture() {
+ return CompletableFuture.completedFuture(this);
+ }
+
+ @Override
public String getString() {
return list.toString();
}
diff --git a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java
index 67a6634b659..21731f7d714 100644
--- a/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java
+++ b/container-core/src/test/java/com/yahoo/processing/test/documentation/AsyncDataProcessingInitiator.java
@@ -3,8 +3,12 @@ package com.yahoo.processing.test.documentation;
import com.google.common.util.concurrent.MoreExecutors;
import com.yahoo.component.chain.Chain;
-import com.yahoo.processing.*;
-import com.yahoo.processing.execution.*;
+import com.yahoo.processing.Processor;
+import com.yahoo.processing.Request;
+import com.yahoo.processing.Response;
+import com.yahoo.processing.execution.Execution;
+import com.yahoo.processing.execution.ExecutionWithResponse;
+import com.yahoo.processing.execution.RunnableExecution;
/**
* A processor which registers a listener on the future completion of
@@ -18,6 +22,7 @@ public class AsyncDataProcessingInitiator extends Processor {
this.asyncChain=asyncChain;
}
+ @SuppressWarnings({"removal"})
@Override
public Response process(Request request, Execution execution) {
Response response=execution.process(request);
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java
index 7bb01e76b43..879778487f5 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/GarbageCollectionMetrics.java
@@ -62,10 +62,10 @@ public class GarbageCollectionMetrics {
for(Iterator<Map.Entry<String, LinkedList<GcStats>>> it = gcStatistics.entrySet().iterator(); it.hasNext(); ) {
Map.Entry<String, LinkedList<GcStats>> entry = it.next();
LinkedList<GcStats> history = entry.getValue();
- while(history.isEmpty() == false && oldestToKeep.isAfter(history.getFirst().when)) {
+ while( ! history.isEmpty() && oldestToKeep.isAfter(history.getFirst().when)) {
history.removeFirst();
}
- if(history.isEmpty()) {
+ if (history.isEmpty()) {
it.remove();
}
}
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 183bb33b4f4..f7176b0982a 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -4301,6 +4301,8 @@
"public void <init>(int, com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer)",
"public com.google.common.util.concurrent.ListenableFuture waitableRender(java.io.OutputStream)",
"public static com.google.common.util.concurrent.ListenableFuture waitableRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)",
+ "public java.util.concurrent.CompletableFuture asyncRender(java.io.OutputStream)",
+ "public static java.util.concurrent.CompletableFuture asyncRender(com.yahoo.search.Result, com.yahoo.search.Query, com.yahoo.processing.rendering.Renderer, java.io.OutputStream)",
"public void render(java.io.OutputStream, com.yahoo.jdisc.handler.ContentChannel, com.yahoo.jdisc.handler.CompletionHandler)",
"public void populateAccessLogEntry(com.yahoo.container.logging.AccessLogEntry)",
"public java.lang.String getParsedQuery()",
@@ -7201,13 +7203,13 @@
],
"methods": [
"public void <init>()",
- "public final com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
+ "public final java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.search.Result, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"protected abstract void render(java.io.Writer, com.yahoo.search.Result)",
"public java.lang.String getCharacterEncoding(com.yahoo.search.Result)",
"public java.lang.String getDefaultSummaryClass()",
"public final java.lang.String getRequestedEncoding(com.yahoo.search.Query)",
"public com.yahoo.search.rendering.Renderer clone()",
- "public bridge synthetic com.google.common.util.concurrent.ListenableFuture render(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
+ "public bridge synthetic java.util.concurrent.CompletableFuture renderResponse(java.io.OutputStream, com.yahoo.processing.Response, com.yahoo.processing.execution.Execution, com.yahoo.processing.Request)",
"public bridge synthetic com.yahoo.processing.rendering.Renderer clone()",
"public bridge synthetic com.yahoo.component.AbstractComponent clone()",
"public bridge synthetic java.lang.Object clone()"
@@ -7703,6 +7705,7 @@
"public java.util.Set getFilled()",
"public com.yahoo.processing.response.IncomingData incoming()",
"public com.google.common.util.concurrent.ListenableFuture complete()",
+ "public java.util.concurrent.CompletableFuture completeFuture()",
"public void addDataListener(java.lang.Runnable)",
"public void close()",
"public bridge synthetic com.yahoo.search.result.Hit clone()",
diff --git a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java
index 5c897245e64..64e7403fa1a 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/HttpSearchResponse.java
@@ -3,6 +3,7 @@ package com.yahoo.search.handler;
import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.collections.ListMap;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.container.handler.Coverage;
import com.yahoo.container.handler.Timing;
import com.yahoo.container.jdisc.ExtendedResponse;
@@ -25,6 +26,7 @@ import java.io.OutputStream;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.CompletableFuture;
/**
* Wrap the result of a query as an HTTP response.
@@ -75,20 +77,36 @@ public class HttpSearchResponse extends ExtendedResponse {
}
}
+ /** @deprecated Use {@link #asyncRender(OutputStream)} instead */
+ @Deprecated(forRemoval = true, since = "7")
public ListenableFuture<Boolean> waitableRender(OutputStream stream) throws IOException {
return waitableRender(result, query, rendererCopy, stream);
}
+ /** @deprecated Use {@link #asyncRender(Result, Query, Renderer, OutputStream)} instead */
+ @Deprecated(forRemoval = true, since = "7")
+ @SuppressWarnings("removal")
public static ListenableFuture<Boolean> waitableRender(Result result,
Query query,
Renderer<Result> renderer,
OutputStream stream) throws IOException {
+ return CompletableFutures.toGuavaListenableFuture(asyncRender(result, query, renderer, stream));
+ }
+
+ public CompletableFuture<Boolean> asyncRender(OutputStream stream) {
+ return asyncRender(result, query, rendererCopy, stream);
+ }
+
+ public static CompletableFuture<Boolean> asyncRender(Result result,
+ Query query,
+ Renderer<Result> renderer,
+ OutputStream stream) {
SearchResponse.trimHits(result);
SearchResponse.removeEmptySummaryFeatureFields(result);
- return renderer.render(stream, result, query.getModel().getExecution(), query);
-
+ return renderer.renderResponse(stream, result, query.getModel().getExecution(), query);
}
+
@Override
public void render(OutputStream output, ContentChannel networkChannel, CompletionHandler handler) throws IOException {
if (rendererCopy instanceof AsynchronousSectionedRenderer) {
@@ -98,9 +116,9 @@ public class HttpSearchResponse extends ExtendedResponse {
try {
try {
long nanoStart = System.nanoTime();
- ListenableFuture<Boolean> promise = waitableRender(output);
+ CompletableFuture<Boolean> promise = asyncRender(output);
if (metric != null) {
- promise.addListener(new RendererLatencyReporter(nanoStart), Runnable::run);
+ promise.whenComplete((__, ___) -> new RendererLatencyReporter(nanoStart).run());
}
} finally {
if (!(rendererCopy instanceof AsynchronousSectionedRenderer)) {
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
index b8a7f0d1978..6ff8f003f7e 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/Renderer.java
@@ -1,19 +1,18 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.rendering;
-import com.yahoo.search.Query;
-import com.yahoo.search.Result;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
import com.yahoo.io.ByteWriter;
import com.yahoo.processing.Request;
import com.yahoo.processing.execution.Execution;
+import com.yahoo.search.Query;
+import com.yahoo.search.Result;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.nio.charset.Charset;
import java.nio.charset.CharsetEncoder;
+import java.util.concurrent.CompletableFuture;
/**
* Renders a search result to a writer synchronously
@@ -37,7 +36,7 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R
* @return a future which is always completed to true
*/
@Override
- public final ListenableFuture<Boolean> render(OutputStream stream, Result response, Execution execution, Request request) {
+ public final CompletableFuture<Boolean> renderResponse(OutputStream stream, Result response, Execution execution, Request request) {
Writer writer = null;
try {
writer = createWriter(stream, response);
@@ -50,8 +49,8 @@ abstract public class Renderer extends com.yahoo.processing.rendering.Renderer<R
if (writer != null)
try { writer.close(); } catch (IOException e2) {};
}
- SettableFuture<Boolean> completed = SettableFuture.create();
- completed.set(true);
+ CompletableFuture<Boolean> completed = new CompletableFuture<>();
+ completed.complete(true);
return completed;
}
diff --git a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java
index 1ae3f4e60cc..6d09bf66175 100644
--- a/container-search/src/main/java/com/yahoo/search/result/HitGroup.java
+++ b/container-search/src/main/java/com/yahoo/search/result/HitGroup.java
@@ -5,6 +5,7 @@ import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.collections.ListenableArrayList;
+import com.yahoo.concurrent.CompletableFutures;
import com.yahoo.net.URI;
import com.yahoo.prelude.fastsearch.SortDataHitSorter;
import com.yahoo.processing.response.ArrayDataList;
@@ -19,6 +20,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
/**
@@ -84,7 +86,7 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable<
*/
private DefaultErrorHit errorHit = null;
- private final ListenableFuture<DataList<Hit>> completedFuture;
+ private final CompletableFuture<DataList<Hit>> completedFuture;
private final IncomingData<Hit> incomingHits;
@@ -965,7 +967,13 @@ public class HitGroup extends Hit implements DataList<Hit>, Cloneable, Iterable<
public IncomingData<Hit> incoming() { return incomingHits; }
@Override
- public ListenableFuture<DataList<Hit>> complete() { return completedFuture; }
+ @SuppressWarnings("removal")
+ @Deprecated(forRemoval = true, since = "7")
+ public ListenableFuture<DataList<Hit>> complete() {
+ return CompletableFutures.toGuavaListenableFuture(completedFuture);
+ }
+
+ @Override public CompletableFuture<DataList<Hit>> completeFuture() { return completedFuture; }
@Override
public void addDataListener(Runnable runnable) {
diff --git a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java
index 0819cbd72b4..b39c170c6a3 100644
--- a/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/pagetemplates/engine/test/ExecutionAbstractTestCase.java
@@ -53,7 +53,7 @@ public class ExecutionAbstractTestCase {
assertRendered(result,resultFileName,false);
}
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
protected void assertRendered(Result result, String resultFileName, boolean print) {
try {
PageTemplatesXmlRenderer renderer = new PageTemplatesXmlRenderer();
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java
index 359aed85d30..7db29568d5b 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/AsyncGroupPopulationTestCase.java
@@ -1,23 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.rendering;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.junit.Test;
-
-import com.fasterxml.jackson.core.JsonParseException;
-import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.concurrent.Receiver;
import com.yahoo.processing.response.Data;
import com.yahoo.processing.response.DataList;
@@ -29,6 +13,20 @@ import com.yahoo.search.result.HitGroup;
import com.yahoo.search.result.Relevance;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.text.Utf8;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.function.BiConsumer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
/**
* Test adding hits to a hit group during rendering.
@@ -36,18 +34,20 @@ import com.yahoo.text.Utf8;
* @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
*/
public class AsyncGroupPopulationTestCase {
- private static class WrappedFuture<F> implements ListenableFuture<F> {
+ private static class WrappedFuture<F> extends CompletableFuture<F> {
Receiver<Boolean> isListening = new Receiver<>();
- private ListenableFuture<F> wrapped;
+ private final CompletableFuture<F> wrapped;
- WrappedFuture(ListenableFuture<F> wrapped) {
+ WrappedFuture(CompletableFuture<F> wrapped) {
this.wrapped = wrapped;
}
- public void addListener(Runnable listener, Executor executor) {
- wrapped.addListener(listener, executor);
+ @Override
+ public CompletableFuture<F> whenCompleteAsync(BiConsumer<? super F, ? super Throwable> action, Executor executor) {
+ wrapped.whenCompleteAsync(action);
isListening.put(Boolean.TRUE);
+ return this;
}
public boolean cancel(boolean mayInterruptIfRunning) {
@@ -73,14 +73,14 @@ public class AsyncGroupPopulationTestCase {
}
private static class ObservableIncoming<DATATYPE extends Data> extends DefaultIncomingData<DATATYPE> {
- WrappedFuture<DataList<DATATYPE>> waitForIt = null;
+ volatile WrappedFuture<DataList<DATATYPE>> waitForIt = null;
private final Object lock = new Object();
@Override
- public ListenableFuture<DataList<DATATYPE>> completed() {
+ public CompletableFuture<DataList<DATATYPE>> completedFuture() {
synchronized (lock) {
if (waitForIt == null) {
- waitForIt = new WrappedFuture<>(super.completed());
+ waitForIt = new WrappedFuture<>(super.completedFuture());
}
}
return waitForIt;
@@ -99,7 +99,7 @@ public class AsyncGroupPopulationTestCase {
@Test
public final void test() throws InterruptedException, ExecutionException,
- JsonParseException, JsonMappingException, IOException {
+ IOException {
String rawExpected = "{"
+ " \"root\": {"
+ " \"children\": ["
@@ -125,10 +125,10 @@ public class AsyncGroupPopulationTestCase {
JsonRenderer renderer = new JsonRenderer();
Result result = new Result(new Query(), h);
renderer.init();
- ListenableFuture<Boolean> f = renderer.render(out, result,
+ CompletableFuture<Boolean> f = renderer.renderResponse(out, result,
new Execution(Execution.Context.createContextStub()),
result.getQuery());
- WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completed();
+ WrappedFuture<DataList<Hit>> x = (WrappedFuture<DataList<Hit>>) h.incoming().completedFuture();
x.isListening.get(86_400_000);
h.incoming().add(new Hit("yahoo2"));
h.incoming().markComplete();
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
index 7395b4802a0..f3a71af0b9e 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/JsonRendererTestCase.java
@@ -364,6 +364,7 @@ public class JsonRendererTestCase {
}
@Test
+ @SuppressWarnings("removal")
public void testEmptyTracing() throws IOException, InterruptedException, ExecutionException {
String expected = "{"
+ " \"root\": {"
@@ -391,7 +392,7 @@ public class JsonRendererTestCase {
assertEqualJson(expected, summary);
}
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({"unchecked", "removal"})
@Test
public void testTracingWithEmptySubtree() throws IOException, InterruptedException, ExecutionException {
String expected = "{"
@@ -1372,6 +1373,7 @@ public class JsonRendererTestCase {
return render(execution, r);
}
+ @SuppressWarnings("removal")
private String render(Execution execution, Result r) throws InterruptedException, ExecutionException {
ByteArrayOutputStream bs = new ByteArrayOutputStream();
ListenableFuture<Boolean> f = renderer.render(bs, r, execution, null);
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java
index ae1eade12d3..99911276f50 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/SyncDefaultRendererTestCase.java
@@ -1,17 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.rendering;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.util.concurrent.ExecutionException;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.chain.Chain;
import com.yahoo.prelude.fastsearch.FastHit;
@@ -26,6 +15,15 @@ import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource;
import com.yahoo.search.statistics.ElapsedTimeTestCase.UselessSearcher;
import com.yahoo.search.statistics.TimeTracker;
import com.yahoo.text.Utf8;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
/**
* Check the legacy sync default renderer doesn't spontaneously combust.
@@ -56,7 +54,7 @@ public class SyncDefaultRendererTestCase {
assertEquals("text/xml", d.getMimeType());
}
- @SuppressWarnings("deprecation")
+ @SuppressWarnings({"deprecation", "removal"})
@Test
public void testRenderWriterResult() throws InterruptedException, ExecutionException {
Query q = new Query("/?query=a&tracelevel=5");
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java
index 0fad449763f..b3534d580d8 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/XMLRendererTestCase.java
@@ -1,39 +1,36 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.rendering;
-import static org.junit.Assert.*;
-
-import java.io.ByteArrayOutputStream;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
+import com.google.common.util.concurrent.ListenableFuture;
import com.yahoo.component.ComponentId;
+import com.yahoo.component.chain.Chain;
import com.yahoo.container.QrSearchersConfig;
import com.yahoo.prelude.Index;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
import com.yahoo.prelude.SearchDefinition;
-import com.yahoo.prelude.searcher.JuniperSearcher;
-import com.yahoo.search.result.Hit;
-import com.yahoo.search.result.Relevance;
-import com.yahoo.search.searchchain.Execution;
-import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher;
-import org.junit.Test;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import com.yahoo.component.chain.Chain;
import com.yahoo.prelude.fastsearch.FastHit;
+import com.yahoo.prelude.searcher.JuniperSearcher;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
import com.yahoo.search.result.Coverage;
import com.yahoo.search.result.ErrorMessage;
+import com.yahoo.search.result.Hit;
import com.yahoo.search.result.HitGroup;
+import com.yahoo.search.result.Relevance;
+import com.yahoo.search.searchchain.Execution;
+import com.yahoo.search.searchchain.testutil.DocumentSourceSearcher;
import com.yahoo.search.statistics.ElapsedTimeTestCase;
-import com.yahoo.search.statistics.TimeTracker;
import com.yahoo.search.statistics.ElapsedTimeTestCase.CreativeTimeSource;
+import com.yahoo.search.statistics.TimeTracker;
import com.yahoo.text.Utf8;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
/**
* Test the XML renderer
@@ -158,6 +155,7 @@ public class XMLRendererTestCase {
assertTrue(summary.contains("<meta type=\"context\">"));
}
+ @SuppressWarnings("removal")
private String render(Result result) throws Exception {
XmlRenderer renderer = new XmlRenderer();
renderer.init();
diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java
index 9c36971f688..2426b18f018 100644
--- a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java
@@ -2,7 +2,8 @@
package com.yahoo.search.searchchain.test;
import com.yahoo.component.ComponentId;
-import com.yahoo.processing.response.*;
+import com.yahoo.component.chain.Chain;
+import com.yahoo.processing.response.IncomingData;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
@@ -11,18 +12,18 @@ import com.yahoo.search.federation.sourceref.SearchChainResolver;
import com.yahoo.search.result.Hit;
import com.yahoo.search.result.HitGroup;
import com.yahoo.search.searchchain.Execution;
-
import com.yahoo.search.searchchain.SearchChainRegistry;
import com.yahoo.search.searchchain.model.federation.FederationOptions;
import org.junit.Test;
-import static org.junit.Assert.*;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
-import com.yahoo.component.chain.Chain;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
/**
* Tests using the async capabilities of the Processing parent framework of searchers.
@@ -31,6 +32,7 @@ import com.yahoo.component.chain.Chain;
*/
public class FutureDataTestCase {
+ @SuppressWarnings("removal")
@Test
public void testAsyncFederation() throws InterruptedException, ExecutionException {
// Setup environment
@@ -77,6 +79,7 @@ public class FutureDataTestCase {
assertEquals("async:1", asyncGroup.get(1).getId().toString());
}
+ @SuppressWarnings("removal")
@Test
public void testFutureData() throws InterruptedException, ExecutionException, TimeoutException {
// Set up
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index c58bb0e5fab..943d6ac7b18 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -74,7 +74,6 @@ public class RoutingController {
private final RoutingPolicies routingPolicies;
private final RotationRepository rotationRepository;
private final BooleanFlag hideSharedRoutingEndpoint;
- private final BooleanFlag changeRoutingStatusOfAllUpstreams;
public RoutingController(Controller controller, RotationsConfig rotationsConfig) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
@@ -83,7 +82,6 @@ public class RoutingController {
controller.applications(),
controller.curator());
this.hideSharedRoutingEndpoint = Flags.HIDE_SHARED_ROUTING_ENDPOINT.bindTo(controller.flagSource());
- this.changeRoutingStatusOfAllUpstreams = Flags.CHANGE_ROUTING_STATUS_OF_ALL_UPSTREAMS.bindTo(controller.flagSource());
}
/** Create a routing context for given deployment */
@@ -92,8 +90,7 @@ public class RoutingController {
return new SharedDeploymentRoutingContext(deployment,
this,
controller.serviceRegistry().configServer(),
- controller.clock(),
- changeRoutingStatusOfAllUpstreams.value());
+ controller.clock());
}
return new ExclusiveDeploymentRoutingContext(deployment, this);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
index 3124d836e54..e5eb1382ccf 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
@@ -72,21 +72,15 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
private final Clock clock;
private final ConfigServer configServer;
- private final boolean changeAllUpstreams;
- public SharedDeploymentRoutingContext(DeploymentId deployment, RoutingController controller, ConfigServer configServer, Clock clock, boolean changeAllUpstreams) {
+ public SharedDeploymentRoutingContext(DeploymentId deployment, RoutingController controller, ConfigServer configServer, Clock clock) {
super(deployment, RoutingMethod.shared, controller);
this.clock = Objects.requireNonNull(clock);
this.configServer = Objects.requireNonNull(configServer);
- this.changeAllUpstreams = changeAllUpstreams;
}
@Override
public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) {
- if (!changeAllUpstreams) {
- setLegacyRoutingStatus(value, agent);
- return;
- }
EndpointStatus newStatus = new EndpointStatus(value == RoutingStatus.Value.in
? EndpointStatus.Status.in
: EndpointStatus.Status.out,
@@ -101,10 +95,6 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
@Override
public RoutingStatus routingStatus() {
- if (!changeAllUpstreams) {
- return legacyRoutingStatus();
- }
-
// In a given deployment, all upstreams (clusters) share the same status, so we can query using any
// upstream name
String upstreamName = upstreamNames().get(0);
@@ -136,40 +126,6 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
return upstreamNames;
}
- private void setLegacyRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) {
- EndpointStatus newStatus = new EndpointStatus(value == RoutingStatus.Value.in
- ? EndpointStatus.Status.in
- : EndpointStatus.Status.out,
- agent.name(),
- clock.instant());
- primaryEndpoint().ifPresent(endpoint -> {
- try {
- configServer.setGlobalRotationStatus(deployment, List.of(endpoint.upstreamName(deployment)), newStatus);
- } catch (Exception e) {
- throw new RuntimeException("Failed to set rotation status of " + endpoint + " in " + deployment, e);
- }
- });
- }
-
- private RoutingStatus legacyRoutingStatus() {
- Optional<EndpointStatus> status = primaryEndpoint().map(endpoint -> {
- var upstreamName = endpoint.upstreamName(deployment);
- return configServer.getGlobalRotationStatus(deployment, upstreamName);
- });
- if (status.isEmpty()) return RoutingStatus.DEFAULT;
- RoutingStatus.Agent agent;
- try {
- agent = RoutingStatus.Agent.valueOf(status.get().agent().toLowerCase());
- } catch (IllegalArgumentException e) {
- agent = RoutingStatus.Agent.unknown;
- }
- return new RoutingStatus(status.get().status() == EndpointStatus.Status.in
- ? RoutingStatus.Value.in
- : RoutingStatus.Value.out,
- agent,
- status.get().changedAt());
- }
-
private Optional<Endpoint> primaryEndpoint() {
return controller.readDeclaredEndpointsOf(deployment.applicationId())
.requiresRotation()
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index b0dfed2bfd5..599aca098ec 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -32,16 +32,22 @@ function(setup_vespa_default_build_settings_centos_8)
message("-- Setting up default build settings for centos 8")
set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE)
if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream")
- set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE)
+ set(DEFAULT_VESPA_LLVM_VERSION "13" PARENT_SCOPE)
else()
set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE)
endif()
endfunction()
-function(setup_vespa_default_build_settings_rocky_8_4)
- message("-- Setting up default build settings for rocky 8.4")
+function(setup_vespa_default_build_settings_rocky_8_5)
+ message("-- Setting up default build settings for rocky 8.5")
set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE)
- set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE)
+ set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE)
+endfunction()
+
+function(setup_vespa_default_build_settings_almalinux_8_5)
+ message("-- Setting up default build settings for almalinux 8.5")
+ set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" PARENT_SCOPE)
+ set(DEFAULT_VESPA_LLVM_VERSION "12" PARENT_SCOPE)
endfunction()
function(setup_vespa_default_build_settings_darwin)
@@ -192,8 +198,10 @@ function(vespa_use_default_build_settings)
setup_vespa_default_build_settings_centos_7()
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "centos 8")
setup_vespa_default_build_settings_centos_8()
- elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.4")
- setup_vespa_default_build_settings_rocky_8_4()
+ elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "rocky 8.5")
+ setup_vespa_default_build_settings_rocky_8_5()
+ elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "almalinux 8.5")
+ setup_vespa_default_build_settings_almalinux_8_5()
elseif(VESPA_OS_DISTRO STREQUAL "darwin")
setup_vespa_default_build_settings_darwin()
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "fedora 32")
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 3c96c6b0ce1..f18c802d5fc 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -62,10 +62,18 @@ BuildRequires: vespa-pybind11-devel
BuildRequires: python3-devel
%endif
%if 0%{?el8}
+%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
+%if 0%{?_centos_stream}
+BuildRequires: gcc-toolset-11-gcc-c++
+BuildRequires: gcc-toolset-11-binutils
+BuildRequires: gcc-toolset-11-libatomic-devel
+%define _devtoolset_enable /opt/rh/gcc-toolset-11/enable
+%else
BuildRequires: gcc-toolset-10-gcc-c++
BuildRequires: gcc-toolset-10-binutils
BuildRequires: gcc-toolset-10-libatomic-devel
%define _devtoolset_enable /opt/rh/gcc-toolset-10/enable
+%endif
BuildRequires: maven
BuildRequires: pybind11-devel
BuildRequires: python3-pytest
@@ -102,9 +110,8 @@ BuildRequires: cmake >= 3.11.4-3
BuildRequires: libarchive
%endif
%define _command_cmake cmake
-%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
%if 0%{?_centos_stream}
-BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13)
+BuildRequires: (llvm-devel >= 13.0.0 and llvm-devel < 14)
%else
BuildRequires: (llvm-devel >= 12.0.0 and llvm-devel < 13)
%endif
@@ -255,7 +262,7 @@ Requires: vespa-gtest = 1.11.0
%if 0%{?el8}
%if 0%{?centos} || 0%{?rocky}
%if 0%{?_centos_stream}
-%define _vespa_llvm_version 12
+%define _vespa_llvm_version 13
%else
%define _vespa_llvm_version 12
%endif
@@ -379,7 +386,7 @@ Requires: openssl-libs
%if 0%{?el8}
%if 0%{?centos} || 0%{?rocky}
%if 0%{?_centos_stream}
-Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13)
+Requires: (llvm-libs >= 13.0.0 and llvm-libs < 14)
%else
Requires: (llvm-libs >= 12.0.0 and llvm-libs < 13)
%endif
diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
index 1f9e494aa29..925864b56c9 100644
--- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
+++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
@@ -11,15 +11,19 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
+import java.util.HashSet;
+import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
+import java.util.function.Supplier;
+import com.yahoo.tensor.TensorType;
/**
* Configures the Vespa document manager from a config id.
*
* @author Einar M R Rosenvinge
*/
-public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig>{
+public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSubscriber<DocumentmanagerConfig> {
private final static Logger log = Logger.getLogger(DocumentTypeManagerConfigurer.class.getName());
@@ -65,20 +69,22 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub
return;
}
new Apply(config, manager);
+ if (config.datatype().size() == 0 && config.annotationtype().size() == 0) {
+ new ApplyNewDoctypeConfig(config, manager);
+ }
}
private static class Apply {
+
public Apply(DocumentmanagerConfig config, DocumentTypeManager manager) {
this.manager = manager;
- this.usev8geopositions = (config == null) ? false : config.usev8geopositions();
- if (config != null) {
- apply(config);
- }
+ this.usev8geopositions = config.usev8geopositions();
+ apply(config);
}
- private Map<Integer, DataType> typesById = new HashMap<>();
- private Map<String, DataType> typesByName = new HashMap<>();
- private Map<Integer, DocumentmanagerConfig.Datatype> configMap = new HashMap<>();
+ private final Map<Integer, DataType> typesById = new HashMap<>();
+ private final Map<String, DataType> typesByName = new HashMap<>();
+ private final Map<Integer, DocumentmanagerConfig.Datatype> configMap = new HashMap<>();
private void inProgress(DataType type) {
var old = typesById.put(type.getId(), type);
@@ -109,15 +115,12 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub
.collect(Collectors.toUnmodifiableSet());
DocumentType type = new DocumentType(doc.name(), header, importedFields);
if (id != type.getId()) {
+ typesById.put(id, type);
// really old stuff, should rewrite tests using this:
int alt = (doc.name()+"."+doc.version()).hashCode();
- if (id == alt) {
- typesById.put(id, type);
- } else {
- throw new IllegalArgumentException("Document type "+doc.name()+
- " wanted id "+id+" but got "+
- type.getId()+", alternative id was: "+alt);
- }
+ log.warning("Document type "+doc.name()+
+ " wanted id "+id+" but got "+
+ type.getId()+", alternative id was: "+alt);
}
inProgress(type);
configMap.remove(id);
@@ -314,9 +317,256 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub
private final DocumentTypeManager manager;
}
+
+ private static class ApplyNewDoctypeConfig {
+
+ public ApplyNewDoctypeConfig(DocumentmanagerConfig config, DocumentTypeManager manager) {
+ this.manager = manager;
+ this.usev8geopositions = config.usev8geopositions();
+ apply(config);
+ }
+
+ Map<Integer, DataType> typesByIdx = new HashMap<>();
+
+ DataType addNewType(int id, DataType type) {
+ if (type == null) {
+ throw new IllegalArgumentException("Type to add for idx "+id+" cannot be null");
+ }
+ var old = typesByIdx.put(id, type);
+ if (old != null) {
+ throw new IllegalArgumentException("Type "+type+" for idx "+id+" conflict: "+old+" present");
+ }
+ return type;
+ }
+
+ Map<Integer, Supplier<DataType>> factoryByIdx = new HashMap<>();
+
+ ArrayList<Integer> proxyRefs = new ArrayList<>();
+
+ private DataType getOrCreateType(int id) {
+ if (typesByIdx.containsKey(id)) {
+ return typesByIdx.get(id);
+ }
+ var factory = factoryByIdx.remove(id);
+ if (factory != null) {
+ DataType type = factory.get();
+ return addNewType(id, type);
+ }
+ throw new IllegalArgumentException("No type or factory found for idx: "+id);
+ }
+
+ void createComplexTypes() {
+ var toCreate = new ArrayList<>(factoryByIdx.keySet());
+ for (var dataTypeId : toCreate) {
+ var type = getOrCreateType(dataTypeId);
+ assert(type != null);
+ }
+ }
+
+ class PerDocTypeData {
+
+ DocumentmanagerConfig.Doctype docTypeConfig;
+
+ DocumentType docType = null;
+
+ PerDocTypeData(DocumentmanagerConfig.Doctype config) {
+ this.docTypeConfig = config;
+ }
+
+ void createSimpleTypes() {
+ for (var typeconf : docTypeConfig.primitivetype()) {
+ DataType type = manager.getDataType(typeconf.name());
+ if (! (type instanceof PrimitiveDataType)) {
+ throw new IllegalArgumentException("Needed primitive type for idx "+typeconf.idx()+" but got: "+type);
+ }
+ addNewType(typeconf.idx(), type);
+ }
+ for (var typeconf : docTypeConfig.tensortype()) {
+ var type = new TensorDataType(TensorType.fromSpec(typeconf.detailedtype()));
+ addNewType(typeconf.idx(), type);
+ }
+ }
+
+ void createFactories() {
+ for (var typeconf : docTypeConfig.arraytype()) {
+ factoryByIdx.put(typeconf.idx(), () -> new ArrayDataType(getOrCreateType(typeconf.elementtype())));
+ }
+ for (var typeconf : docTypeConfig.maptype()) {
+ factoryByIdx.put(typeconf.idx(), () -> new MapDataType(getOrCreateType(typeconf.keytype()),
+ getOrCreateType(typeconf.valuetype())));
+ }
+ for (var typeconf : docTypeConfig.wsettype()) {
+ factoryByIdx.put(typeconf.idx(), () -> new WeightedSetDataType(getOrCreateType(typeconf.elementtype()),
+ typeconf.createifnonexistent(),
+ typeconf.removeifzero()));
+ }
+ for (var typeconf : docTypeConfig.documentref()) {
+ factoryByIdx.put(typeconf.idx(), () -> ReferenceDataType.createWithInferredId(inProgressById.get(typeconf.targettype()).docType));
+ }
+ for (var typeconf : docTypeConfig.annotationref()) {
+ factoryByIdx.put(typeconf.idx(), () -> new AnnotationReferenceDataType
+ (annTypeFromIdx(typeconf.annotationtype())));
+ }
+ }
+
+ void createEmptyStructs() {
+ String docName = docTypeConfig.name();
+ for (var typeconf : docTypeConfig.structtype()) {
+ addNewType(typeconf.idx(), new StructDataType(typeconf.name()));
+ }
+ }
+
+ void initializeDocType() {
+ Set<String> importedFields = new HashSet<>();
+ for (var imported : docTypeConfig.importedfield()) {
+ importedFields.add(imported.name());
+ }
+ int contentIdx = docTypeConfig.contentstruct();
+ DataType contentStruct = typesByIdx.get(contentIdx);
+ if (! (contentStruct instanceof StructDataType)) {
+ throw new IllegalArgumentException("Content struct for document type "+docTypeConfig.name()+
+ " should be a struct, but was: "+contentStruct);
+ }
+ if (docTypeConfig.name().equals(DataType.DOCUMENT.getName())) {
+ this.docType = DataType.DOCUMENT;
+ } else {
+ this.docType = new DocumentType(docTypeConfig.name(), (StructDataType)contentStruct, importedFields);
+ }
+ addNewType(docTypeConfig.idx(), docType);
+ }
+
+ void createEmptyAnnotationTypes() {
+ for (var typeconf : docTypeConfig.annotationtype()) {
+ AnnotationType annType = manager.getAnnotationTypeRegistry().getType(typeconf.name());
+ if (typeconf.internalid() != -1) {
+ if (annType == null) {
+ annType = new AnnotationType(typeconf.name(), typeconf.internalid());
+ } else {
+ if (annType.getId() != typeconf.internalid()) {
+ throw new IllegalArgumentException("Wrong internalid for annotation type "+annType+
+ " (wanted "+typeconf.internalid()+", got "+annType.getId()+")");
+ }
+ }
+ } else if (annType == null) {
+ annType = new AnnotationType(typeconf.name());
+ }
+ manager.getAnnotationTypeRegistry().register(annType);
+ // because AnnotationType is not a DataType, make a proxy
+ var proxy = new AnnotationReferenceDataType(annType);
+ proxyRefs.add(typeconf.idx());
+ addNewType(typeconf.idx(), proxy);
+ }
+ }
+
+ AnnotationType annTypeFromIdx(int idx) {
+ var proxy = (AnnotationReferenceDataType) typesByIdx.get(idx);
+ if (proxy == null) {
+ throw new IllegalArgumentException("Needed AnnotationType for idx "+idx+", found: "+typesByIdx.get(idx));
+ }
+ return proxy.getAnnotationType();
+ }
+
+ void fillAnnotationTypes() {
+ for (var typeConf : docTypeConfig.annotationtype()) {
+ var annType = annTypeFromIdx(typeConf.idx());
+ int pIdx = typeConf.datatype();
+ if (pIdx != -1) {
+ DataType payload = getOrCreateType(pIdx);
+ annType.setDataType(payload);
+ }
+ for (var inherit : typeConf.inherits()) {
+ var inheritedType = annTypeFromIdx(inherit.idx());
+ if (! annType.inherits(inheritedType)) {
+ annType.inherit(inheritedType);
+ }
+ }
+ }
+ }
+ void fillStructs() {
+ for (var structCfg : docTypeConfig.structtype()) {
+ int idx = structCfg.idx();
+ StructDataType type = (StructDataType) typesByIdx.get(idx);
+ for (var parent : structCfg.inherits()) {
+ var parentStruct = (StructDataType) typesByIdx.get(parent.type());
+ type.inherit(parentStruct);
+ }
+ for (var fieldCfg : structCfg.field()) {
+ if (fieldCfg.type() == idx) {
+ log.fine("Self-referencing struct "+structCfg.name()+" field: "+fieldCfg);
+ }
+ DataType fieldType = getOrCreateType(fieldCfg.type());
+ type.addField(new Field(fieldCfg.name(), fieldCfg.internalid(), fieldType));
+ }
+ }
+ }
+ void fillDocument() {
+ for (var inherit : docTypeConfig.inherits()) {
+ var data = inProgressById.get(inherit.idx());
+ if (data == null) {
+ throw new IllegalArgumentException("Missing doctype for inherit idx: "+inherit.idx());
+ } else {
+ docType.inherit(data.docType);
+ }
+ }
+ Map<String, Collection<String>> fieldSets = new HashMap<>();
+ for (var entry : docTypeConfig.fieldsets().entrySet()) {
+ fieldSets.put(entry.getKey(), entry.getValue().fields());
+ }
+ Set<String> importedFields = new HashSet<>();
+ for (var imported : docTypeConfig.importedfield()) {
+ importedFields.add(imported.name());
+ }
+ docType.addFieldSets(fieldSets);
+ }
+ }
+
+ private final Map<String, PerDocTypeData> inProgressByName = new HashMap<>();
+ private final Map<Integer, PerDocTypeData> inProgressById = new HashMap<>();
+
+ private void apply(DocumentmanagerConfig config) {
+ for (var docType : config.doctype()) {
+ int idx = docType.idx();
+ String name = docType.name();
+ var data = new PerDocTypeData(docType);
+ var old = inProgressById.put(idx, data);
+ if (old != null) {
+ throw new IllegalArgumentException("Multiple document types with id: "+idx);
+ }
+ old = inProgressByName.put(name, data);
+ if (old != null) {
+ throw new IllegalArgumentException("Multiple document types with name: "+name);
+ }
+ }
+ for (var docType : config.doctype()) {
+ var docTypeData = inProgressById.get(docType.idx());
+ docTypeData.createEmptyStructs();
+ docTypeData.initializeDocType();
+ docTypeData.createEmptyAnnotationTypes();
+ docTypeData.createFactories();
+ docTypeData.createSimpleTypes();
+ }
+ createComplexTypes();
+ for (var docType : config.doctype()) {
+ var docTypeData = inProgressById.get(docType.idx());
+ docTypeData.fillStructs();
+ docTypeData.fillDocument();
+ docTypeData.fillAnnotationTypes();
+ }
+ for (int idx : proxyRefs) {
+ typesByIdx.remove(idx);
+ }
+ for (DataType type : typesByIdx.values()) {
+ manager.register(type);
+ }
+ }
+
+ private final boolean usev8geopositions;
+ private final DocumentTypeManager manager;
+ }
+
public static DocumentTypeManager configureNewManager(DocumentmanagerConfig config) {
DocumentTypeManager manager = new DocumentTypeManager();
- new Apply(config, manager);
+ configureNewManager(config, manager);
return manager;
}
diff --git a/document/src/main/java/com/yahoo/document/annotation/Annotation.java b/document/src/main/java/com/yahoo/document/annotation/Annotation.java
index a5f70c2b9e3..2ee2d0baaa7 100644
--- a/document/src/main/java/com/yahoo/document/annotation/Annotation.java
+++ b/document/src/main/java/com/yahoo/document/annotation/Annotation.java
@@ -223,6 +223,7 @@ public class Annotation implements Comparable<Annotation> {
public String toString() {
String retval = "annotation of type " + type;
retval += ((value == null) ? " (no value)" : " (with value)");
+ retval += ((spanNode == null) ? " (no span)" : (" with span "+spanNode));
return retval;
}
diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java
index 58cc3c22199..9115a000e20 100644
--- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java
+++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer6.java
@@ -714,6 +714,7 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu
byte features = buf.get();
int length = buf.getInt1_2_4Bytes();
+ int skipToPos = buf.position() + length;
if ((features & (byte) 1) == (byte) 1) {
//we have a span node
@@ -728,15 +729,19 @@ public class VespaDocumentDeserializer6 extends BufferSerializer implements Docu
if ((features & (byte) 2) == (byte) 2) {
//we have a value:
int dataTypeId = buf.getInt();
-
- //if this data type ID the same as the one in our config?
- if (dataTypeId != type.getDataType().getId()) {
- //not the same, but we will handle it gracefully, and just skip past the data:
- buf.position(buf.position() + length - 4);
- } else {
+ try {
FieldValue value = type.getDataType().createFieldValue();
value.deserialize(this);
annotation.setFieldValue(value);
+ // could get buffer underflow or DeserializationException
+ } catch (RuntimeException rte) {
+ if (dataTypeId == type.getDataType().getId()) {
+ throw new DeserializationException("Could not deserialize annotation payload", rte);
+ }
+ // XXX: does this make sense? The annotation without its payload may be a problem.
+ // handle it gracefully, and just skip past the data
+ } finally {
+ buf.position(skipToPos);
}
}
}
diff --git a/document/src/test/document/documentmanager.cfg b/document/src/test/document/documentmanager.cfg
index e4c581304ce..6ceda63e606 100644
--- a/document/src/test/document/documentmanager.cfg
+++ b/document/src/test/document/documentmanager.cfg
@@ -1,105 +1,96 @@
-datatype[11]
-datatype[0].id -1365874599
-datatype[0].arraytype[0]
-datatype[0].weightedsettype[0]
-datatype[0].structtype[1]
-datatype[0].structtype[0].name foobar.header
-datatype[0].structtype[0].version 9
-datatype[0].structtype[0].field[2]
-datatype[0].structtype[0].field[0].name foobarfield1
-datatype[0].structtype[0].field[0].id[0]
-datatype[0].structtype[0].field[0].datatype 4
-datatype[0].structtype[0].field[1].name foobarfield0
-datatype[0].structtype[0].field[1].id[0]
-datatype[0].structtype[0].field[1].datatype 2
-datatype[0].documenttype[0]
-datatype[1].id 278604398
-datatype[1].arraytype[0]
-datatype[1].weightedsettype[0]
-datatype[1].structtype[1]
-datatype[1].structtype[0].name foobar.body
-datatype[1].structtype[0].version 9
-datatype[1].documenttype[0]
-datatype[2].id 378030104
-datatype[2].arraytype[0]
-datatype[2].weightedsettype[0]
-datatype[2].structtype[0]
-datatype[2].documenttype[1]
-datatype[2].documenttype[0].name foobar
-datatype[2].documenttype[0].version 9
-datatype[2].documenttype[0].inherits[0]
-datatype[2].documenttype[0].headerstruct -1365874599
-datatype[2].documenttype[0].bodystruct 278604398
-datatype[3].id 673066331
-datatype[3].arraytype[0]
-datatype[3].weightedsettype[0]
-datatype[3].structtype[1]
-datatype[3].structtype[0].name banana.header
-datatype[3].structtype[0].version 234
-datatype[3].structtype[0].field[1]
-datatype[3].structtype[0].field[0].name bananafield0
-datatype[3].structtype[0].field[0].id[0]
-datatype[3].structtype[0].field[0].datatype 16
-datatype[3].documenttype[0]
-datatype[4].id -176986064
-datatype[4].arraytype[0]
-datatype[4].weightedsettype[0]
-datatype[4].structtype[1]
-datatype[4].structtype[0].name banana.body
-datatype[4].structtype[0].version 234
-datatype[4].documenttype[0]
-datatype[5].id 556449802
-datatype[5].arraytype[0]
-datatype[5].weightedsettype[0]
-datatype[5].structtype[0]
-datatype[5].documenttype[1]
-datatype[5].documenttype[0].name banana
-datatype[5].documenttype[0].version 234
-datatype[5].documenttype[0].inherits[1]
-datatype[5].documenttype[0].inherits[0].name foobar
-datatype[5].documenttype[0].inherits[0].version 9
-datatype[5].documenttype[0].headerstruct 673066331
-datatype[5].documenttype[0].bodystruct -176986064
-datatype[6].id -858669928
-datatype[6].arraytype[0]
-datatype[6].weightedsettype[0]
-datatype[6].structtype[1]
-datatype[6].structtype[0].name customtypes.header
-datatype[6].structtype[0].version 3
-datatype[6].structtype[0].field[2]
-datatype[6].structtype[0].field[0].name arrayfloat
-datatype[6].structtype[0].field[0].id[0]
-datatype[6].structtype[0].field[0].datatype 99
-datatype[6].structtype[0].field[1].name arrayarrayfloat
-datatype[6].structtype[0].field[1].id[0]
-datatype[6].structtype[0].field[1].datatype 4003
-datatype[6].documenttype[0]
-datatype[7].id 99
-datatype[7].arraytype[1]
-datatype[7].arraytype[0].datatype 1
-datatype[7].weightedsettype[0]
-datatype[7].structtype[0]
-datatype[7].documenttype[0]
-datatype[8].id 4003
-datatype[8].arraytype[1]
-datatype[8].arraytype[0].datatype 99
-datatype[8].weightedsettype[0]
-datatype[8].structtype[0]
-datatype[8].documenttype[0]
-datatype[9].id 2142817261
-datatype[9].arraytype[0]
-datatype[9].weightedsettype[0]
-datatype[9].structtype[1]
-datatype[9].structtype[0].name customtypes.body
-datatype[9].structtype[0].version 3
-datatype[9].documenttype[0]
-datatype[10].id -1500313747
-datatype[10].arraytype[0]
-datatype[10].weightedsettype[0]
-datatype[10].structtype[0]
-datatype[10].documenttype[1]
-datatype[10].documenttype[0].name customtypes
-datatype[10].documenttype[0].version 3
-datatype[10].documenttype[0].inherits[0]
-datatype[10].documenttype[0].headerstruct -858669928
-datatype[10].documenttype[0].bodystruct 2142817261
+doctype[4]
+doctype[0].name "document"
+doctype[0].idx 1000
+doctype[0].contentstruct 1001
+doctype[0].primitivetype[0].idx 1002
+doctype[0].primitivetype[0].name "int"
+doctype[0].primitivetype[1].idx 1003
+doctype[0].primitivetype[1].name "double"
+doctype[0].primitivetype[2].idx 1004
+doctype[0].primitivetype[2].name "string"
+doctype[0].annotationtype[0].idx 1005
+doctype[0].annotationtype[0].name "proximity_break"
+doctype[0].annotationtype[0].internalid 8
+doctype[0].annotationtype[0].datatype 1003
+doctype[0].annotationtype[1].idx 1006
+doctype[0].annotationtype[1].name "normalized"
+doctype[0].annotationtype[1].internalid 4
+doctype[0].annotationtype[1].datatype 1004
+doctype[0].annotationtype[2].idx 1007
+doctype[0].annotationtype[2].name "reading"
+doctype[0].annotationtype[2].internalid 5
+doctype[0].annotationtype[2].datatype 1004
+doctype[0].annotationtype[3].idx 1008
+doctype[0].annotationtype[3].name "term"
+doctype[0].annotationtype[3].internalid 1
+doctype[0].annotationtype[3].datatype 1004
+doctype[0].annotationtype[4].idx 1009
+doctype[0].annotationtype[4].name "transformed"
+doctype[0].annotationtype[4].internalid 7
+doctype[0].annotationtype[4].datatype 1004
+doctype[0].annotationtype[5].idx 1010
+doctype[0].annotationtype[5].name "canonical"
+doctype[0].annotationtype[5].internalid 3
+doctype[0].annotationtype[5].datatype 1004
+doctype[0].annotationtype[6].idx 1011
+doctype[0].annotationtype[6].name "token_type"
+doctype[0].annotationtype[6].internalid 2
+doctype[0].annotationtype[6].datatype 1002
+doctype[0].annotationtype[7].idx 1012
+doctype[0].annotationtype[7].name "special_token"
+doctype[0].annotationtype[7].internalid 9
+doctype[0].annotationtype[8].idx 1013
+doctype[0].annotationtype[8].name "stem"
+doctype[0].annotationtype[8].internalid 6
+doctype[0].annotationtype[8].datatype 1004
+doctype[0].structtype[0].idx 1001
+doctype[0].structtype[0].name document.header
+doctype[1].name "foobar"
+doctype[1].idx 1014
+doctype[1].inherits[0].idx 1000
+doctype[1].contentstruct 1015
+doctype[1].primitivetype[0].idx 1016
+doctype[1].primitivetype[0].name "long"
+doctype[1].structtype[0].idx 1015
+doctype[1].structtype[0].name foobar.header
+doctype[1].structtype[0].field[0].name "foobarfield1"
+doctype[1].structtype[0].field[0].internalid 1707020592
+doctype[1].structtype[0].field[0].type 1016
+doctype[1].structtype[0].field[1].name "foobarfield0"
+doctype[1].structtype[0].field[1].internalid 1055920092
+doctype[1].structtype[0].field[1].type 1004
+doctype[2].name "banana"
+doctype[2].idx 1017
+doctype[2].inherits[0].idx 1014
+doctype[2].contentstruct 1018
+doctype[2].primitivetype[0].idx 1019
+doctype[2].primitivetype[0].name "byte"
+doctype[2].structtype[0].idx 1018
+doctype[2].structtype[0].name banana.header
+doctype[2].structtype[0].field[0].name "foobarfield1"
+doctype[2].structtype[0].field[0].internalid 1707020592
+doctype[2].structtype[0].field[0].type 1016
+doctype[2].structtype[0].field[1].name "foobarfield0"
+doctype[2].structtype[0].field[1].internalid 1055920092
+doctype[2].structtype[0].field[1].type 1004
+doctype[2].structtype[0].field[2].name "bananafield0"
+doctype[2].structtype[0].field[2].internalid 1294599520
+doctype[2].structtype[0].field[2].type 1019
+doctype[3].name "customtypes"
+doctype[3].idx 1020
+doctype[3].inherits[0].idx 1000
+doctype[3].contentstruct 1021
+doctype[3].primitivetype[0].idx 1023
+doctype[3].primitivetype[0].name "float"
+doctype[3].arraytype[0].idx 1022
+doctype[3].arraytype[0].elementtype 1024
+doctype[3].arraytype[1].idx 1024
+doctype[3].arraytype[1].elementtype 1023
+doctype[3].structtype[0].idx 1021
+doctype[3].structtype[0].name customtypes.header
+doctype[3].structtype[0].field[0].name "arrayfloat"
+doctype[3].structtype[0].field[0].internalid 1493411963
+doctype[3].structtype[0].field[0].type 1024
+doctype[3].structtype[0].field[1].name "arrayarrayfloat"
+doctype[3].structtype[0].field[1].internalid 890649191
+doctype[3].structtype[0].field[1].type 1022
diff --git a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
index 4040f3455da..eb5249227be 100644
--- a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
+++ b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
@@ -190,7 +190,7 @@ public class DocumentTypeManagerTestCase {
Field arrayfloat = type.getField("arrayfloat");
ArrayDataType dataType = (ArrayDataType) arrayfloat.getDataType();
- assertTrue(dataType.getCode() == 99);
+ // assertTrue(dataType.getCode() == 99);
assertTrue(dataType.getValueClass().equals(Array.class));
assertTrue(dataType.getNestedType().getCode() == 1);
assertTrue(dataType.getNestedType().getValueClass().equals(FloatFieldValue.class));
@@ -198,9 +198,9 @@ public class DocumentTypeManagerTestCase {
Field arrayarrayfloat = type.getField("arrayarrayfloat");
ArrayDataType subType = (ArrayDataType) arrayarrayfloat.getDataType();
- assertTrue(subType.getCode() == 4003);
+ // assertTrue(subType.getCode() == 4003);
assertTrue(subType.getValueClass().equals(Array.class));
- assertTrue(subType.getNestedType().getCode() == 99);
+ // assertTrue(subType.getNestedType().getCode() == 99);
assertTrue(subType.getNestedType().getValueClass().equals(Array.class));
ArrayDataType subSubType = (ArrayDataType) subType.getNestedType();
assertTrue(subSubType.getNestedType().getCode() == 1);
@@ -215,7 +215,7 @@ public class DocumentTypeManagerTestCase {
DocumentType customtypes = manager.getDocumentType(new DataTypeName("customtypes"));
assertNull(banana.getField("newfield"));
- assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT, 99)), customtypes.getField("arrayfloat"));
+ assertEquals(new Field("arrayfloat", 9489, new ArrayDataType(DataType.FLOAT)), customtypes.getField("arrayfloat"));
var sub = DocumentTypeManagerConfigurer.configure(manager, "file:src/test/document/documentmanager.updated.cfg");
sub.close();
diff --git a/document/src/test/vespaxmlparser/alltypes.cfg b/document/src/test/vespaxmlparser/alltypes.cfg
deleted file mode 100644
index 5d89611d24b..00000000000
--- a/document/src/test/vespaxmlparser/alltypes.cfg
+++ /dev/null
@@ -1,101 +0,0 @@
-datatype[5]
-datatype[0].id -240642363
-datatype[0].arraytype[0]
-datatype[0].weightedsettype[0]
-datatype[0].structtype[1]
-datatype[0].structtype[0].name alltypes.header
-datatype[0].structtype[0].version 0
-datatype[0].structtype[0].field[0]
-datatype[0].documenttype[0]
-datatype[1].id 1000002
-datatype[1].arraytype[1]
-datatype[1].arraytype[0].datatype 2
-datatype[1].weightedsettype[0]
-datatype[1].structtype[0]
-datatype[1].documenttype[0]
-datatype[2].id 2000001
-datatype[2].arraytype[0]
-datatype[2].weightedsettype[1]
-datatype[2].weightedsettype[0].datatype 2
-datatype[2].weightedsettype[0].createifnonexistant false
-datatype[2].weightedsettype[0].removeifzero false
-datatype[2].structtype[0]
-datatype[2].documenttype[0]
-datatype[3].id 163574298
-datatype[3].arraytype[0]
-datatype[3].weightedsettype[0]
-datatype[3].structtype[1]
-datatype[3].structtype[0].name alltypes.body
-datatype[3].structtype[0].version 0
-datatype[3].structtype[0].field[20]
-datatype[3].structtype[0].field[0].name stringval
-datatype[3].structtype[0].field[0].id[0]
-datatype[3].structtype[0].field[0].datatype 2
-datatype[3].structtype[0].field[1].name intval1
-datatype[3].structtype[0].field[1].id[0]
-datatype[3].structtype[0].field[1].datatype 0
-datatype[3].structtype[0].field[2].name intval2
-datatype[3].structtype[0].field[2].id[0]
-datatype[3].structtype[0].field[2].datatype 0
-datatype[3].structtype[0].field[3].name intval3
-datatype[3].structtype[0].field[3].id[0]
-datatype[3].structtype[0].field[3].datatype 0
-datatype[3].structtype[0].field[4].name longval1
-datatype[3].structtype[0].field[4].id[0]
-datatype[3].structtype[0].field[4].datatype 4
-datatype[3].structtype[0].field[5].name longval2
-datatype[3].structtype[0].field[5].id[0]
-datatype[3].structtype[0].field[5].datatype 4
-datatype[3].structtype[0].field[6].name longval3
-datatype[3].structtype[0].field[6].id[0]
-datatype[3].structtype[0].field[6].datatype 4
-datatype[3].structtype[0].field[7].name byteval1
-datatype[3].structtype[0].field[7].id[0]
-datatype[3].structtype[0].field[7].datatype 16
-datatype[3].structtype[0].field[8].name byteval2
-datatype[3].structtype[0].field[8].id[0]
-datatype[3].structtype[0].field[8].datatype 16
-datatype[3].structtype[0].field[9].name byteval3
-datatype[3].structtype[0].field[9].id[0]
-datatype[3].structtype[0].field[9].datatype 16
-datatype[3].structtype[0].field[10].name floatval
-datatype[3].structtype[0].field[10].id[0]
-datatype[3].structtype[0].field[10].datatype 1
-datatype[3].structtype[0].field[11].name doubleval
-datatype[3].structtype[0].field[11].id[0]
-datatype[3].structtype[0].field[11].datatype 5
-datatype[3].structtype[0].field[12].name rawval1
-datatype[3].structtype[0].field[12].id[0]
-datatype[3].structtype[0].field[12].datatype 3
-datatype[3].structtype[0].field[13].name rawval2
-datatype[3].structtype[0].field[13].id[0]
-datatype[3].structtype[0].field[13].datatype 3
-datatype[3].structtype[0].field[14].name urival
-datatype[3].structtype[0].field[14].id[0]
-datatype[3].structtype[0].field[14].datatype 10
-datatype[3].structtype[0].field[15].name contentval1
-datatype[3].structtype[0].field[15].id[0]
-datatype[3].structtype[0].field[15].datatype 12
-datatype[3].structtype[0].field[16].name contentval2
-datatype[3].structtype[0].field[16].id[0]
-datatype[3].structtype[0].field[16].datatype 12
-datatype[3].structtype[0].field[17].name arrayofstringval
-datatype[3].structtype[0].field[17].id[0]
-datatype[3].structtype[0].field[17].datatype 1000002
-datatype[3].structtype[0].field[18].name weightedsetofstringval
-datatype[3].structtype[0].field[18].id[0]
-datatype[3].structtype[0].field[18].datatype 2000001
-datatype[3].structtype[0].field[19].name tagval
-datatype[3].structtype[0].field[19].id[0]
-datatype[3].structtype[0].field[19].datatype 18
-datatype[3].documenttype[0]
-datatype[4].id -1126644934
-datatype[4].arraytype[0]
-datatype[4].weightedsettype[0]
-datatype[4].structtype[0]
-datatype[4].documenttype[1]
-datatype[4].documenttype[0].name alltypes
-datatype[4].documenttype[0].version 0
-datatype[4].documenttype[0].inherits[0]
-datatype[4].documenttype[0].headerstruct -240642363
-datatype[4].documenttype[0].bodystruct 163574298
diff --git a/document/src/test/vespaxmlparser/documentmanager.cfg b/document/src/test/vespaxmlparser/documentmanager.cfg
deleted file mode 100644
index 6662f5caab5..00000000000
--- a/document/src/test/vespaxmlparser/documentmanager.cfg
+++ /dev/null
@@ -1,109 +0,0 @@
-datatype[10]
-datatype[0].id 1002
-datatype[0].arraytype[1]
-datatype[0].arraytype[0].datatype 2
-datatype[0].weightedsettype[0]
-datatype[0].structtype[0]
-datatype[0].documenttype[0]
-datatype[1].id 1000
-datatype[1].arraytype[1]
-datatype[1].arraytype[0].datatype 0
-datatype[1].weightedsettype[0]
-datatype[1].structtype[0]
-datatype[1].documenttype[0]
-datatype[2].id 1004
-datatype[2].arraytype[1]
-datatype[2].arraytype[0].datatype 4
-datatype[2].weightedsettype[0]
-datatype[2].structtype[0]
-datatype[2].documenttype[0]
-datatype[3].id 1016
-datatype[3].arraytype[1]
-datatype[3].arraytype[0].datatype 16
-datatype[3].weightedsettype[0]
-datatype[3].structtype[0]
-datatype[3].documenttype[0]
-datatype[4].id 1001
-datatype[4].arraytype[1]
-datatype[4].arraytype[0].datatype 1
-datatype[4].weightedsettype[0]
-datatype[4].structtype[0]
-datatype[4].documenttype[0]
-datatype[5].id 2001
-datatype[5].arraytype[0]
-datatype[5].weightedsettype[1]
-datatype[5].weightedsettype[0].datatype 0
-datatype[5].weightedsettype[0].createifnonexistant false
-datatype[5].weightedsettype[0].removeifzero false
-datatype[5].structtype[0]
-datatype[5].documenttype[0]
-datatype[6].id 2002
-datatype[6].arraytype[0]
-datatype[6].weightedsettype[1]
-datatype[6].weightedsettype[0].datatype 2
-datatype[6].weightedsettype[0].createifnonexistant false
-datatype[6].weightedsettype[0].removeifzero false
-datatype[6].structtype[0]
-datatype[6].documenttype[0]
-datatype[7].id -628990518
-datatype[7].arraytype[0]
-datatype[7].weightedsettype[0]
-datatype[7].structtype[1]
-datatype[7].structtype[0].name news.header
-datatype[7].structtype[0].version 0
-datatype[7].structtype[0].field[12]
-datatype[7].structtype[0].field[0].name url
-datatype[7].structtype[0].field[0].id[0]
-datatype[7].structtype[0].field[0].datatype 10
-datatype[7].structtype[0].field[1].name title
-datatype[7].structtype[0].field[1].id[0]
-datatype[7].structtype[0].field[1].datatype 2
-datatype[7].structtype[0].field[2].name last_downloaded
-datatype[7].structtype[0].field[2].id[0]
-datatype[7].structtype[0].field[2].datatype 0
-datatype[7].structtype[0].field[3].name value_long
-datatype[7].structtype[0].field[3].id[0]
-datatype[7].structtype[0].field[3].datatype 4
-datatype[7].structtype[0].field[4].name value_content
-datatype[7].structtype[0].field[4].id[0]
-datatype[7].structtype[0].field[4].datatype 12
-datatype[7].structtype[0].field[5].name stringarr
-datatype[7].structtype[0].field[5].id[0]
-datatype[7].structtype[0].field[5].datatype 1002
-datatype[7].structtype[0].field[6].name intarr
-datatype[7].structtype[0].field[6].id[0]
-datatype[7].structtype[0].field[6].datatype 1000
-datatype[7].structtype[0].field[7].name longarr
-datatype[7].structtype[0].field[7].id[0]
-datatype[7].structtype[0].field[7].datatype 1004
-datatype[7].structtype[0].field[8].name bytearr
-datatype[7].structtype[0].field[8].id[0]
-datatype[7].structtype[0].field[8].datatype 1016
-datatype[7].structtype[0].field[9].name floatarr
-datatype[7].structtype[0].field[9].id[0]
-datatype[7].structtype[0].field[9].datatype 1001
-datatype[7].structtype[0].field[10].name weightedsetint
-datatype[7].structtype[0].field[10].id[0]
-datatype[7].structtype[0].field[10].datatype 2001
-datatype[7].structtype[0].field[11].name weightedsetstring
-datatype[7].structtype[0].field[11].id[0]
-datatype[7].structtype[0].field[11].datatype 2002
-datatype[7].documenttype[0]
-datatype[8].id 538588767
-datatype[8].arraytype[0]
-datatype[8].weightedsettype[0]
-datatype[8].structtype[1]
-datatype[8].structtype[0].name news.body
-datatype[8].structtype[0].version 0
-datatype[8].structtype[0].field[0]
-datatype[8].documenttype[0]
-datatype[9].id -1048827947
-datatype[9].arraytype[0]
-datatype[9].weightedsettype[0]
-datatype[9].structtype[0]
-datatype[9].documenttype[1]
-datatype[9].documenttype[0].name news
-datatype[9].documenttype[0].version 0
-datatype[9].documenttype[0].inherits[0]
-datatype[9].documenttype[0].headerstruct -628990518
-datatype[9].documenttype[0].bodystruct 538588767
diff --git a/document/src/tests/data/defaultdocument.cfg b/document/src/tests/data/defaultdocument.cfg
deleted file mode 100644
index 9780f43def6..00000000000
--- a/document/src/tests/data/defaultdocument.cfg
+++ /dev/null
@@ -1,94 +0,0 @@
-enablecompression false
-datatype[6]
-datatype[0].id 1000
-datatype[0].arraytype[1]
-datatype[0].arraytype[0].datatype 0
-datatype[0].weightedsettype[0]
-datatype[0].structtype[0]
-datatype[0].documenttype[0]
-datatype[1].id 1003
-datatype[1].arraytype[1]
-datatype[1].arraytype[0].datatype 3
-datatype[1].weightedsettype[0]
-datatype[1].structtype[0]
-datatype[1].documenttype[0]
-datatype[2].id 2002
-datatype[2].arraytype[0]
-datatype[2].weightedsettype[1]
-datatype[2].weightedsettype[0].datatype 2
-datatype[2].weightedsettype[0].createifnonexistant false
-datatype[2].weightedsettype[0].removeifzero false
-datatype[2].structtype[0]
-datatype[2].documenttype[0]
-datatype[3].id 5000
-datatype[3].arraytype[0]
-datatype[3].weightedsettype[0]
-datatype[3].structtype[1]
-datatype[3].structtype[0].name testdoc.header
-datatype[3].structtype[0].version 0
-datatype[3].structtype[0].field[3]
-datatype[3].structtype[0].field[0].name intattr
-datatype[3].structtype[0].field[0].id[0]
-datatype[3].structtype[0].field[0].datatype 0
-datatype[3].structtype[0].field[1].name doubleattr
-datatype[3].structtype[0].field[1].id[0]
-datatype[3].structtype[0].field[1].datatype 5
-datatype[3].structtype[0].field[2].name floatattr
-datatype[3].structtype[0].field[2].id[0]
-datatype[3].structtype[0].field[2].datatype 1
-datatype[3].documenttype[0]
-datatype[4].id 5001
-datatype[4].arraytype[0]
-datatype[4].weightedsettype[0]
-datatype[4].structtype[1]
-datatype[4].structtype[0].name testdoc.body
-datatype[4].structtype[0].version 0
-datatype[4].structtype[0].field[11]
-datatype[4].structtype[0].field[0].name stringattr
-datatype[4].structtype[0].field[0].id[0]
-datatype[4].structtype[0].field[0].datatype 2
-datatype[4].structtype[0].field[1].name stringattr2
-datatype[4].structtype[0].field[1].id[0]
-datatype[4].structtype[0].field[1].datatype 2
-datatype[4].structtype[0].field[2].name longattr
-datatype[4].structtype[0].field[2].id[0]
-datatype[4].structtype[0].field[2].datatype 4
-datatype[4].structtype[0].field[3].name byteattr
-datatype[4].structtype[0].field[3].id[0]
-datatype[4].structtype[0].field[3].datatype 16
-datatype[4].structtype[0].field[4].name rawattr
-datatype[4].structtype[0].field[4].id[0]
-datatype[4].structtype[0].field[4].datatype 3
-datatype[4].structtype[0].field[5].name minattr
-datatype[4].structtype[0].field[5].id[0]
-datatype[4].structtype[0].field[5].datatype 0
-datatype[4].structtype[0].field[6].name minattr2
-datatype[4].structtype[0].field[6].id[0]
-datatype[4].structtype[0].field[6].datatype 0
-datatype[4].structtype[0].field[7].name arrayattr
-datatype[4].structtype[0].field[7].id[0]
-datatype[4].structtype[0].field[7].datatype 1000
-datatype[4].structtype[0].field[8].name rawarrayattr
-datatype[4].structtype[0].field[8].id[0]
-datatype[4].structtype[0].field[8].datatype 1003
-datatype[4].structtype[0].field[9].name stringweightedsetattr
-datatype[4].structtype[0].field[9].id[0]
-datatype[4].structtype[0].field[9].datatype 2002
-datatype[4].structtype[0].field[10].name uri
-datatype[4].structtype[0].field[10].id[0]
-datatype[4].structtype[0].field[10].datatype 2
-datatype[4].structtype[0].field[11].name docfield
-datatype[4].structtype[0].field[11].id[0]
-datatype[4].structtype[0].field[11].datatype 8
-datatype[4].documenttype[0]
-datatype[5].id 5002
-datatype[5].arraytype[0]
-datatype[5].weightedsettype[0]
-datatype[5].structtype[0]
-datatype[5].documenttype[1]
-datatype[5].documenttype[0].name testdoc
-datatype[5].documenttype[0].version 0
-datatype[5].documenttype[0].inherits[0]
-datatype[5].documenttype[0].headerstruct 5000
-datatype[5].documenttype[0].bodystruct 5001
-
diff --git a/document/src/vespa/document/config/documentmanager.def b/document/src/vespa/document/config/documentmanager.def
index b9e7cc0f0d1..ec19ba8d802 100644
--- a/document/src/vespa/document/config/documentmanager.def
+++ b/document/src/vespa/document/config/documentmanager.def
@@ -107,3 +107,159 @@ annotationtype[].id int
annotationtype[].name string
annotationtype[].datatype int default=-1
annotationtype[].inherits[].id int
+
+
+# Here starts a new model for how datatypes are configured, where
+# everything is per document-type, and each documenttype contains the
+# datatypes it defines. Will be used (only?) if the arrays above
+# (datatype[] and annotationtype[]) are empty.
+
+
+# Note: we will include the built-in "document" document
+# type that all other doctypes inherit from also, in order
+# to get all the primitive and built-in types declared
+# with an idx we can refer to.
+
+## Name of the document type. Must be unique.
+doctype[].name string
+
+# Note: indexes are only meaningful as internal references in this
+# config; they will typically be sequential (1,2,3,...) in the order
+# that they are generated (but nothing should depend on that).
+
+## Index of this type (as a datatype which can be referred to).
+doctype[].idx int
+
+# Could also use name here?
+## Specify document types to inherit
+doctype[].inherits[].idx int
+
+## Index of struct defining document fields
+doctype[].contentstruct int
+
+## Field sets available for this document type
+doctype[].fieldsets{}.fields[] string
+
+## Imported fields (specified outside the document block in the schema)
+doctype[].importedfield[].name string
+
+# Everything below here is configuration of data types defined by
+# this document type.
+
+# Primitive types must be present as built-in static members.
+
+## Index of primitive type
+doctype[].primitivetype[].idx int
+
+## The name of this primitive type
+doctype[].primitivetype[].name string
+
+# Arrays are the simplest collection type:
+
+## Index of this array type
+doctype[].arraytype[].idx int
+
+## Index of the element type this array type contains
+doctype[].arraytype[].elementtype int
+
+
+# Maps are another collection type:
+
+## Index of this map type
+doctype[].maptype[].idx int
+
+## Index of the key type used by this map type
+doctype[].maptype[].keytype int
+
+## Index of the key type used by this map type
+doctype[].maptype[].valuetype int
+
+
+# Weighted sets are more complicated;
+# they can be considered as an collection
+# of unique elements where each element has
+# an associated weight:
+
+## Index of this weighted set type
+doctype[].wsettype[].idx int
+
+## Index of the element types contained in this weighted set type
+doctype[].wsettype[].elementtype int
+
+## Should an update to a nonexistent element cause it to be created
+doctype[].wsettype[].createifnonexistent bool default=false
+
+## Should an element in a weighted set be removed if an update changes the weight to 0
+doctype[].wsettype[].removeifzero bool default=false
+
+
+# Tensors have their own type system
+
+## Index of this tensor type
+doctype[].tensortype[].idx int
+
+## Description of the type of the actual tensors contained
+doctype[].tensortype[].detailedtype string
+
+
+# Document references refer to parent documents that a document can
+# import fields from:
+
+## Index of this reference data type:
+doctype[].documentref[].idx int
+
+# Could also use name?
+## Index of the document type this reference type refers to:
+doctype[].documentref[].targettype int
+
+
+# Annotation types are another world, but are modeled here
+# as if they were also datatypes contained inside document types:
+
+## Index of an annotation type.
+doctype[].annotationtype[].idx int
+
+## Name of the annotation type.
+doctype[].annotationtype[].name string
+
+# Could we somehow avoid this?
+## Internal id of this annotation type
+doctype[].annotationtype[].internalid int default=-1
+
+## Index of contained datatype of the annotation type, if any
+doctype[].annotationtype[].datatype int default=-1
+
+## Index of annotation type that this type inherits.
+doctype[].annotationtype[].inherits[].idx int
+
+
+# Annotation references are field values referring to
+# an annotation of a certain annotation type.
+
+## Index of this annotation reference type
+doctype[].annotationref[].idx int
+
+## Index of the annotation type this annotation reference type refers to
+doctype[].annotationref[].annotationtype int
+
+
+# A struct is just a named collections of fields:
+
+## Index of this struct type
+doctype[].structtype[].idx int
+
+## Name of the struct type. Must be unique within documenttype.
+doctype[].structtype[].name string
+
+## Index of another struct type to inherit
+doctype[].structtype[].inherits[].type int
+
+## Name of a struct field. Must be unique within the struct type.
+doctype[].structtype[].field[].name string
+
+## The "field id" - used in serialized format!
+doctype[].structtype[].field[].internalid int
+
+## Index of the type of this field
+doctype[].structtype[].field[].type int
+
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 99c7e9c68b8..2e0af3acfa7 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -70,6 +70,7 @@ vespa_define_module(
src/tests/instruction/index_lookup_table
src/tests/instruction/inplace_map_function
src/tests/instruction/join_with_number
+ src/tests/instruction/l2_distance
src/tests/instruction/mixed_inner_product_function
src/tests/instruction/mixed_simple_join_function
src/tests/instruction/pow_as_map_optimizer
diff --git a/eval/src/tests/instruction/l2_distance/CMakeLists.txt b/eval/src/tests/instruction/l2_distance/CMakeLists.txt
new file mode 100644
index 00000000000..1e0fc69a3f9
--- /dev/null
+++ b/eval/src/tests/instruction/l2_distance/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+vespa_add_executable(eval_l2_distance_test_app TEST
+ SOURCES
+ l2_distance_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_l2_distance_test_app COMMAND eval_l2_distance_test_app)
diff --git a/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp
new file mode 100644
index 00000000000..2cba9dfb18e
--- /dev/null
+++ b/eval/src/tests/instruction/l2_distance/l2_distance_test.cpp
@@ -0,0 +1,96 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/eval/instruction/l2_distance.h>
+#include <vespa/vespalib/util/stash.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+#include <vespa/vespalib/util/require.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+//-----------------------------------------------------------------------------
+
+void verify(const TensorSpec &a, const TensorSpec &b, const vespalib::string &expr, bool optimized = true) {
+ EvalFixture::ParamRepo param_repo;
+ param_repo.add("a", a).add("b", b);
+ EvalFixture fast_fixture(prod_factory, expr, param_repo, true);
+ EXPECT_EQ(fast_fixture.result(), EvalFixture::ref(expr, param_repo));
+ EXPECT_EQ(fast_fixture.find_all<L2Distance>().size(), optimized ? 1 : 0);
+}
+
+void verify_cell_types(GenSpec a, GenSpec b, const vespalib::string &expr, bool optimized = true) {
+ for (CellType act : CellTypeUtils::list_types()) {
+ for (CellType bct : CellTypeUtils::list_types()) {
+ if (optimized && (act == bct) && (act != CellType::BFLOAT16)) {
+ verify(a.cpy().cells(act), b.cpy().cells(bct), expr, true);
+ } else {
+ verify(a.cpy().cells(act), b.cpy().cells(bct), expr, false);
+ }
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
+GenSpec gen(const vespalib::string &desc, int bias) {
+ return GenSpec::from_desc(desc).cells(CellType::FLOAT).seq(N(bias));
+}
+
+//-----------------------------------------------------------------------------
+
+vespalib::string sq_l2 = "reduce((a-b)^2,sum)";
+vespalib::string alt_sq_l2 = "reduce(map((a-b),f(x)(x*x)),sum)";
+
+//-----------------------------------------------------------------------------
+
+TEST(L2DistanceTest, squared_l2_distance_can_be_optimized) {
+ verify_cell_types(gen("x5", 3), gen("x5", 7), sq_l2);
+ verify_cell_types(gen("x5", 3), gen("x5", 7), alt_sq_l2);
+}
+
+TEST(L2DistanceTest, trivial_dimensions_are_ignored) {
+ verify(gen("x5y1", 3), gen("x5", 7), sq_l2);
+ verify(gen("x5", 3), gen("x5y1", 7), sq_l2);
+}
+
+TEST(L2DistanceTest, multiple_dimensions_can_be_used) {
+ verify(gen("x5y3", 3), gen("x5y3", 7), sq_l2);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(L2DistanceTest, inputs_must_be_dense) {
+ verify(gen("x5_1", 3), gen("x5_1", 7), sq_l2, false);
+ verify(gen("x5_1y3", 3), gen("x5_1y3", 7), sq_l2, false);
+ verify(gen("x5", 3), GenSpec(7), sq_l2, false);
+ verify(GenSpec(3), gen("x5", 7), sq_l2, false);
+}
+
+TEST(L2DistanceTest, result_must_be_double) {
+ verify(gen("x5y1", 3), gen("x5y1", 7), "reduce((a-b)^2,sum,x)", false);
+ verify(gen("x5y1_1", 3), gen("x5y1_1", 7), "reduce((a-b)^2,sum,x)", false);
+}
+
+TEST(L2DistanceTest, dimensions_must_match) {
+ verify(gen("x5y3", 3), gen("x5", 7), sq_l2, false);
+ verify(gen("x5", 3), gen("x5y3", 7), sq_l2, false);
+}
+
+TEST(L2DistanceTest, similar_expressions_are_not_optimized) {
+ verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^2,prod)", false);
+ verify(gen("x5", 3), gen("x5", 7), "reduce((a-b)^3,sum)", false);
+ verify(gen("x5", 3), gen("x5", 7), "reduce((a+b)^2,sum)", false);
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
index 09814cc0b06..e1520d4deb2 100644
--- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
@@ -30,6 +30,7 @@
#include <vespa/eval/instruction/dense_tensor_create_function.h>
#include <vespa/eval/instruction/dense_tensor_peek_function.h>
#include <vespa/eval/instruction/dense_hamming_distance.h>
+#include <vespa/eval/instruction/l2_distance.h>
#include <vespa/log/log.h>
LOG_SETUP(".eval.eval.optimize_tensor_function");
@@ -56,11 +57,16 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te
Child root(expr);
run_optimize_pass(root, [&stash](const Child &child)
{
+ child.set(PowAsMapOptimizer::optimize(child.get(), stash));
+ });
+ run_optimize_pass(root, [&stash](const Child &child)
+ {
child.set(SumMaxDotProductFunction::optimize(child.get(), stash));
});
run_optimize_pass(root, [&stash](const Child &child)
{
child.set(BestSimilarityFunction::optimize(child.get(), stash));
+ child.set(L2Distance::optimize(child.get(), stash));
});
run_optimize_pass(root, [&stash](const Child &child)
{
@@ -83,7 +89,6 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te
child.set(DenseLambdaPeekOptimizer::optimize(child.get(), stash));
child.set(UnpackBitsFunction::optimize(child.get(), stash));
child.set(FastRenameOptimizer::optimize(child.get(), stash));
- child.set(PowAsMapOptimizer::optimize(child.get(), stash));
child.set(InplaceMapFunction::optimize(child.get(), stash));
child.set(MixedSimpleJoinFunction::optimize(child.get(), stash));
child.set(JoinWithNumberFunction::optimize(child.get(), stash));
diff --git a/eval/src/vespa/eval/eval/typed_cells.h b/eval/src/vespa/eval/eval/typed_cells.h
index 872488527c2..b8640698d13 100644
--- a/eval/src/vespa/eval/eval/typed_cells.h
+++ b/eval/src/vespa/eval/eval/typed_cells.h
@@ -20,8 +20,8 @@ struct TypedCells {
explicit TypedCells(ConstArrayRef<BFloat16> cells) : data(cells.begin()), type(CellType::BFLOAT16), size(cells.size()) {}
explicit TypedCells(ConstArrayRef<Int8Float> cells) : data(cells.begin()), type(CellType::INT8), size(cells.size()) {}
- TypedCells() : data(nullptr), type(CellType::DOUBLE), size(0) {}
- TypedCells(const void *dp, CellType ct, size_t sz) : data(dp), type(ct), size(sz) {}
+ TypedCells() noexcept : data(nullptr), type(CellType::DOUBLE), size(0) {}
+ TypedCells(const void *dp, CellType ct, size_t sz) noexcept : data(dp), type(ct), size(sz) {}
template <typename T> bool check_type() const { return vespalib::eval::check_cell_type<T>(type); }
diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt
index a462ece4734..56184c113d4 100644
--- a/eval/src/vespa/eval/instruction/CMakeLists.txt
+++ b/eval/src/vespa/eval/instruction/CMakeLists.txt
@@ -30,6 +30,7 @@ vespa_add_library(eval_instruction OBJECT
index_lookup_table.cpp
inplace_map_function.cpp
join_with_number_function.cpp
+ l2_distance.cpp
mixed_inner_product_function.cpp
mixed_simple_join_function.cpp
pow_as_map_optimizer.cpp
diff --git a/eval/src/vespa/eval/instruction/l2_distance.cpp b/eval/src/vespa/eval/instruction/l2_distance.cpp
new file mode 100644
index 00000000000..3f1e7632431
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/l2_distance.cpp
@@ -0,0 +1,96 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "l2_distance.h"
+#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/value.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
+#include <vespa/vespalib/util/require.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".eval.instruction.l2_distance");
+
+namespace vespalib::eval {
+
+using namespace tensor_function;
+
+namespace {
+
+static const auto &hw = hwaccelrated::IAccelrated::getAccelerator();
+
+template <typename T>
+double sq_l2(const Value &lhs, const Value &rhs, size_t len) {
+ return hw.squaredEuclideanDistance((const T *)lhs.cells().data, (const T *)rhs.cells().data, len);
+}
+
+template <>
+double sq_l2<Int8Float>(const Value &lhs, const Value &rhs, size_t len) {
+ return sq_l2<int8_t>(lhs, rhs, len);
+}
+
+template <typename CT>
+void my_squared_l2_distance_op(InterpretedFunction::State &state, uint64_t vector_size) {
+ double result = sq_l2<CT>(state.peek(1), state.peek(0), vector_size);
+ state.pop_pop_push(state.stash.create<DoubleValue>(result));
+}
+
+struct SelectOp {
+ template <typename CT>
+ static InterpretedFunction::op_function invoke() {
+ constexpr bool is_bfloat16 = std::is_same_v<CT, BFloat16>;
+ if constexpr (!is_bfloat16) {
+ return my_squared_l2_distance_op<CT>;
+ } else {
+ abort();
+ }
+ }
+};
+
+bool compatible_cell_types(CellType lhs, CellType rhs) {
+ return ((lhs == rhs) && ((lhs == CellType::INT8) ||
+ (lhs == CellType::FLOAT) ||
+ (lhs == CellType::DOUBLE)));
+}
+
+bool compatible_types(const ValueType &lhs, const ValueType &rhs) {
+ return (compatible_cell_types(lhs.cell_type(), rhs.cell_type()) &&
+ lhs.is_dense() && rhs.is_dense() &&
+ (lhs.nontrivial_indexed_dimensions() == rhs.nontrivial_indexed_dimensions()));
+}
+
+} // namespace <unnamed>
+
+L2Distance::L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in)
+ : tensor_function::Op2(ValueType::double_type(), lhs_in, rhs_in)
+{
+}
+
+InterpretedFunction::Instruction
+L2Distance::compile_self(const ValueBuilderFactory &, Stash &) const
+{
+ auto lhs_t = lhs().result_type();
+ auto rhs_t = rhs().result_type();
+ REQUIRE_EQ(lhs_t.cell_type(), rhs_t.cell_type());
+ REQUIRE_EQ(lhs_t.dense_subspace_size(), rhs_t.dense_subspace_size());
+ auto op = typify_invoke<1, TypifyCellType, SelectOp>(lhs_t.cell_type());
+ return InterpretedFunction::Instruction(op, lhs_t.dense_subspace_size());
+}
+
+const TensorFunction &
+L2Distance::optimize(const TensorFunction &expr, Stash &stash)
+{
+ auto reduce = as<Reduce>(expr);
+ if (reduce && (reduce->aggr() == Aggr::SUM) && expr.result_type().is_double()) {
+ auto map = as<Map>(reduce->child());
+ if (map && (map->function() == operation::Square::f)) {
+ auto join = as<Join>(map->child());
+ if (join && (join->function() == operation::Sub::f)) {
+ if (compatible_types(join->lhs().result_type(), join->rhs().result_type())) {
+ return stash.create<L2Distance>(join->lhs(), join->rhs());
+ }
+ }
+ }
+ }
+ return expr;
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/l2_distance.h b/eval/src/vespa/eval/instruction/l2_distance.h
new file mode 100644
index 00000000000..95b11b6c229
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/l2_distance.h
@@ -0,0 +1,21 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_function.h>
+
+namespace vespalib::eval {
+
+/**
+ * Tensor function for a squared euclidean distance producing a scalar result.
+ **/
+class L2Distance : public tensor_function::Op2
+{
+public:
+ L2Distance(const TensorFunction &lhs_in, const TensorFunction &rhs_in);
+ InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override;
+ bool result_is_mutable() const override { return true; }
+ static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash);
+};
+
+} // namespace
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 0dc06270031..bbc7231d0dc 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -386,11 +386,12 @@ public class Flags {
"Takes effect on container reboot",
ZONE_ID, HOSTNAME);
- public static final UnboundBooleanFlag CHANGE_ROUTING_STATUS_OF_ALL_UPSTREAMS = defineFeatureFlag(
- "change-routing-status-of-all-upstreams", false,
- List.of("mpolden"), "2021-12-02", "2021-12-15",
- "Whether the controller should send all known upstreams to configserver when changing routing status of a deployment",
- "Takes effect on the next change in routing status");
+ public static final UnboundBooleanFlag USE_V8_DOC_MANAGER_CFG = defineFeatureFlag(
+ "use-v8-doc-manager-cfg", false,
+ List.of("arnej", "baldersheim"), "2021-12-09", "2022-12-31",
+ "Use new (preparing for Vespa 8) section in documentmanager.def",
+ "Takes effect at redeployment",
+ ZONE_ID, APPLICATION_ID);
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
diff --git a/jdisc_core/abi-spec.json b/jdisc_core/abi-spec.json
index 497fdfad501..569f45949c8 100644
--- a/jdisc_core/abi-spec.json
+++ b/jdisc_core/abi-spec.json
@@ -934,332 +934,5 @@
"public abstract void close()"
],
"fields": []
- },
- "com.yahoo.jdisc.test.MockMetric": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.Metric"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public void set(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
- "public void add(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
- "public com.yahoo.jdisc.Metric$Context createContext(java.util.Map)",
- "public java.util.Map metrics()",
- "public java.lang.String toString()"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingClientProvider": {
- "superClass": "com.yahoo.jdisc.NoopSharedResource",
- "interfaces": [
- "com.yahoo.jdisc.service.ClientProvider"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public void start()",
- "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
- "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingCompletionHandler": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.handler.CompletionHandler"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public void completed()",
- "public void failed(java.lang.Throwable)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingContentChannel": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.handler.ContentChannel"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
- "public void close(com.yahoo.jdisc.handler.CompletionHandler)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingOsgiFramework": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.application.OsgiFramework"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public java.util.List installBundle(java.lang.String)",
- "public void startBundles(java.util.List, boolean)",
- "public void refreshPackages()",
- "public org.osgi.framework.BundleContext bundleContext()",
- "public java.util.List bundles()",
- "public java.util.List getBundles(org.osgi.framework.Bundle)",
- "public void allowDuplicateBundles(java.util.Collection)",
- "public void start()",
- "public void stop()"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingRequest": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public static varargs com.yahoo.jdisc.Request newInstance(java.lang.String, com.google.inject.Module[])"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingRequestHandler": {
- "superClass": "com.yahoo.jdisc.NoopSharedResource",
- "interfaces": [
- "com.yahoo.jdisc.handler.RequestHandler"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
- "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingResponseHandler": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.handler.ResponseHandler"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.NonWorkingServerProvider": {
- "superClass": "com.yahoo.jdisc.NoopSharedResource",
- "interfaces": [
- "com.yahoo.jdisc.service.ServerProvider"
- ],
- "attributes": [
- "public",
- "final"
- ],
- "methods": [
- "public void <init>()",
- "public void start()",
- "public void close()"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "interface",
- "abstract"
- ],
- "methods": [
- "public abstract com.google.inject.Module newConfigModule()",
- "public abstract java.lang.Class getServerProviderClass()",
- "public abstract java.lang.Object newClient(com.yahoo.jdisc.service.ServerProvider)",
- "public abstract java.lang.Object executeRequest(java.lang.Object, boolean)",
- "public abstract java.lang.Iterable newResponseContent()",
- "public abstract void validateResponse(java.lang.Object)"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.ServerProviderConformanceTest$ConformanceException": {
- "superClass": "java.lang.RuntimeException",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public void <init>(com.yahoo.jdisc.test.ServerProviderConformanceTest$Event)",
- "public java.lang.String getMessage()"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.ServerProviderConformanceTest": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public",
- "abstract"
- ],
- "methods": [
- "public void <init>()",
- "public abstract void testContainerNotReadyException()",
- "public abstract void testBindingSetNotFoundException()",
- "public abstract void testNoBindingSetSelectedException()",
- "public abstract void testBindingNotFoundException()",
- "public abstract void testRequestHandlerWithSyncCloseResponse()",
- "public abstract void testRequestHandlerWithSyncWriteResponse()",
- "public abstract void testRequestHandlerWithSyncHandleResponse()",
- "public abstract void testRequestHandlerWithAsyncHandleResponse()",
- "public abstract void testRequestException()",
- "public abstract void testRequestExceptionWithSyncCloseResponse()",
- "public abstract void testRequestExceptionWithSyncWriteResponse()",
- "public abstract void testRequestNondeterministicExceptionWithSyncHandleResponse()",
- "public abstract void testRequestExceptionBeforeResponseWriteWithSyncHandleResponse()",
- "public abstract void testRequestExceptionAfterResponseWriteWithSyncHandleResponse()",
- "public abstract void testRequestNondeterministicExceptionWithAsyncHandleResponse()",
- "public abstract void testRequestExceptionBeforeResponseWriteWithAsyncHandleResponse()",
- "public abstract void testRequestExceptionAfterResponseCloseNoContentWithAsyncHandleResponse()",
- "public abstract void testRequestExceptionAfterResponseWriteWithAsyncHandleResponse()",
- "public abstract void testRequestContentWriteWithSyncCompletion()",
- "public abstract void testRequestContentWriteWithAsyncCompletion()",
- "public abstract void testRequestContentWriteWithNondeterministicSyncFailure()",
- "public abstract void testRequestContentWriteWithSyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentWriteWithSyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentWriteWithNondeterministicAsyncFailure()",
- "public abstract void testRequestContentWriteWithAsyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseCloseNoContent()",
- "public abstract void testRequestContentWriteNondeterministicException()",
- "public abstract void testRequestContentWriteExceptionBeforeResponseWrite()",
- "public abstract void testRequestContentWriteExceptionAfterResponseWrite()",
- "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContent()",
- "public abstract void testRequestContentWriteNondeterministicExceptionWithSyncCompletion()",
- "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithSyncCompletion()",
- "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithSyncCompletion()",
- "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithSyncCompletion()",
- "public abstract void testRequestContentWriteNondeterministicExceptionWithAsyncCompletion()",
- "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithAsyncCompletion()",
- "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithAsyncCompletion()",
- "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithAsyncCompletion()",
- "public abstract void testRequestContentWriteExceptionWithNondeterministicSyncFailure()",
- "public abstract void testRequestContentWriteExceptionWithSyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseCloseNoContent()",
- "public abstract void testRequestContentWriteExceptionWithNondeterministicAsyncFailure()",
- "public abstract void testRequestContentWriteExceptionWithAsyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseCloseNoContent()",
- "public abstract void testRequestContentCloseWithSyncCompletion()",
- "public abstract void testRequestContentCloseWithAsyncCompletion()",
- "public abstract void testRequestContentCloseWithNondeterministicSyncFailure()",
- "public abstract void testRequestContentCloseWithSyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentCloseWithSyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentCloseWithSyncFailureAfterResponseCloseNoContent()",
- "public abstract void testRequestContentCloseWithNondeterministicAsyncFailure()",
- "public abstract void testRequestContentCloseWithAsyncFailureBeforeResponseWrite()",
- "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseWrite()",
- "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseCloseNoContent()",
- "public abstract void testRequestContentCloseNondeterministicException()",
- "public abstract void testRequestContentCloseExceptionBeforeResponseWrite()",
- "public abstract void testRequestContentCloseExceptionAfterResponseWrite()",
- "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContent()",
- "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncCompletion()",
- "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncCompletion()",
- "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncCompletion()",
- "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncCompletion()",
- "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncCompletion()",
- "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncCompletion()",
- "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncCompletion()",
- "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncCompletion()",
- "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncFailure()",
- "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncFailure()",
- "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncFailure()",
- "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncFailure()",
- "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncFailure()",
- "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncFailure()",
- "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncFailure()",
- "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncFailure()",
- "public abstract void testResponseWriteCompletionException()",
- "public abstract void testResponseCloseCompletionException()",
- "public abstract void testResponseCloseCompletionExceptionNoContent()",
- "protected varargs void runTest(com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter, com.google.inject.Module[])"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.TestDriver": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.application.ContainerActivator",
- "com.yahoo.jdisc.service.CurrentContainer"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public com.yahoo.jdisc.application.ContainerBuilder newContainerBuilder()",
- "public com.yahoo.jdisc.application.DeactivatedContainer activateContainer(com.yahoo.jdisc.application.ContainerBuilder)",
- "public com.yahoo.jdisc.Container newReference(java.net.URI)",
- "public com.yahoo.jdisc.core.BootstrapLoader bootstrapLoader()",
- "public com.yahoo.jdisc.application.Application application()",
- "public com.yahoo.jdisc.application.OsgiFramework osgiFramework()",
- "public com.yahoo.jdisc.handler.ContentChannel connectRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
- "public java.util.concurrent.Future dispatchRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
- "public void scheduleClose()",
- "public boolean awaitClose(long, java.util.concurrent.TimeUnit)",
- "public boolean close()",
- "public com.yahoo.jdisc.handler.RequestDispatch newRequestDispatch(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
- "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(java.lang.Class, com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(java.lang.Class, com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(com.yahoo.jdisc.application.Application, com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(com.yahoo.jdisc.application.Application, com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstance(com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstanceWithoutOsgi(com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newApplicationBundleInstance(java.lang.String, boolean, com.google.inject.Module[])",
- "public static varargs com.yahoo.jdisc.test.TestDriver newInstance(com.yahoo.jdisc.application.OsgiFramework, java.lang.String, boolean, com.google.inject.Module[])",
- "public static com.yahoo.jdisc.core.FelixFramework newOsgiFramework()",
- "public static com.yahoo.jdisc.application.OsgiFramework newNonWorkingOsgiFramework()"
- ],
- "fields": []
- },
- "com.yahoo.jdisc.test.TestTimer": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "com.yahoo.jdisc.Timer"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()",
- "public void <init>(java.time.Instant)",
- "public void setMillis(long)",
- "public void advanceMillis(long)",
- "public void advanceSeconds(long)",
- "public void advanceMinutes(long)",
- "public void advance(java.time.Duration)",
- "public java.time.Instant currentTime()",
- "public long currentTimeMillis()"
- ],
- "fields": []
}
} \ No newline at end of file
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java
index 199a12216ad..bfb4088aa99 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/package-info.java
@@ -4,5 +4,4 @@
*
* @see com.yahoo.jdisc.test.TestDriver
*/
-@com.yahoo.api.annotations.PublicApi
package com.yahoo.jdisc.test;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
index 9336451d08d..38e725360a0 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
@@ -162,7 +162,7 @@ public class RealNodeRepository implements NodeRepository {
return new NodeSpec(
node.hostname,
- Optional.ofNullable(node.openStackId),
+ Optional.ofNullable(node.id),
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString),
nodeState,
@@ -244,7 +244,7 @@ public class RealNodeRepository implements NodeRepository {
private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
- node.openStackId = addNode.id.orElse("fake-" + addNode.hostname);
+ node.id = addNode.id.orElse("fake-" + addNode.hostname);
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
addNode.nodeFlavor.ifPresent(f -> node.flavor = f);
@@ -269,7 +269,7 @@ public class RealNodeRepository implements NodeRepository {
public static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
- node.openStackId = nodeAttributes.getHostId().orElse(null);
+ node.id = nodeAttributes.getHostId().orElse(null);
node.currentDockerImage = nodeAttributes.getDockerImage().map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration().orElse(null);
node.currentRebootGeneration = nodeAttributes.getRebootGeneration().orElse(null);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
index 1e51fe279bb..f99fb3d8b76 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
@@ -25,8 +25,8 @@ public class NodeRepositoryNode {
public Set<String> ipAddresses;
@JsonProperty("additionalIpAddresses")
public Set<String> additionalIpAddresses;
- @JsonProperty("openStackId")
- public String openStackId;
+ @JsonProperty("id")
+ public String id;
@JsonProperty("flavor")
public String flavor;
@JsonProperty("resources")
@@ -99,7 +99,7 @@ public class NodeRepositoryNode {
", hostname='" + hostname + '\'' +
", ipAddresses=" + ipAddresses +
", additionalIpAddresses=" + additionalIpAddresses +
- ", openStackId='" + openStackId + '\'' +
+ ", id='" + id + '\'' +
", modelName='" + modelName + '\'' +
", flavor='" + flavor + '\'' +
", resources=" + resources +
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index e0ccbe10b10..ad20f68ca33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -26,7 +26,7 @@ public class Cluster {
private final ClusterSpec.Id id;
private final boolean exclusive;
private final ClusterResources min, max;
- private boolean required;
+ private final boolean required;
private final Optional<Suggestion> suggested;
private final Optional<ClusterResources> target;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
index 078b0621a99..849ea03665b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
@@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.List;
import java.util.Optional;
+import java.util.stream.Collectors;
/**
* @author bratseth
@@ -139,23 +140,21 @@ public class AllocatableClusterResources {
public static Optional<AllocatableClusterResources> from(ClusterResources wantedResources,
ClusterSpec clusterSpec,
Limits applicationLimits,
- boolean required,
NodeList hosts,
NodeRepository nodeRepository) {
- var capacityPolicies = new CapacityPolicies(nodeRepository);
var systemLimits = new NodeResourceLimits(nodeRepository);
boolean exclusive = clusterSpec.isExclusive();
- int actualNodes = capacityPolicies.decideSize(wantedResources.nodes(), required, true, false, clusterSpec);
if ( !clusterSpec.isExclusive() && !nodeRepository.zone().getCloud().dynamicProvisioning()) {
// We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive);
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, clusterSpec.type(), exclusive); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
- advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can request
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // What we'll really get
- if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) return Optional.empty();
+ if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type()))
+ return Optional.empty();
+
if (matchesAny(hosts, advertisedResources))
- return Optional.of(new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources),
+ return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources),
advertisedResources,
wantedResources,
clusterSpec));
@@ -168,7 +167,6 @@ public class AllocatableClusterResources {
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
- advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can get
NodeResources realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive);
// Adjust where we don't need exact match to the flavor
@@ -184,7 +182,7 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) continue;
- var candidate = new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources),
+ var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
advertisedResources,
wantedResources,
clusterSpec);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index b8a80a9bd2b..30432c1c078 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -66,15 +66,12 @@ public class AllocationOptimizer {
groupsAdjustedForRedundancy,
limits, target, current, clusterModel));
var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits,
- clusterModel.cluster().required(),
hosts, nodeRepository);
-
if (allocatableResources.isEmpty()) continue;
if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
bestAllocation = allocatableResources;
}
}
-
return bestAllocation;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index e1e670c5b01..3c26eef41d9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -32,9 +32,11 @@ public class ClusterModel {
static final double idealQueryCpuLoad = 0.8;
static final double idealWriteCpuLoad = 0.95;
static final double idealMemoryLoad = 0.65;
- static final double idealDiskLoad = 0.6;
+ static final double idealContainerDiskLoad = 0.95;
+ static final double idealContentDiskLoad = 0.6;
private final Application application;
+ private final ClusterSpec clusterSpec;
private final Cluster cluster;
/** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */
private final NodeList nodes;
@@ -54,6 +56,7 @@ public class ClusterModel {
MetricsDb metricsDb,
Clock clock) {
this.application = application;
+ this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = clusterNodes;
this.clock = clock;
@@ -64,12 +67,14 @@ public class ClusterModel {
/** For testing */
ClusterModel(Application application,
+ ClusterSpec clusterSpec,
Cluster cluster,
Clock clock,
Duration scalingDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.application = application;
+ this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = null;
this.clock = clock;
@@ -79,6 +84,8 @@ public class ClusterModel {
this.nodeTimeseries = nodeTimeseries;
}
+ public Application application() { return application; }
+ public ClusterSpec clusterSpec() { return clusterSpec; }
public Cluster cluster() { return cluster; }
/** Returns the predicted duration of a rescaling of this cluster */
@@ -110,7 +117,7 @@ public class ClusterModel {
public Load averageLoad() { return nodeTimeseries().averageLoad(clock.instant().minus(scalingDuration())); }
public Load idealLoad() {
- return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad);
+ return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad());
}
/** Ideal cpu load must take the application traffic fraction into account */
@@ -193,6 +200,12 @@ public class ClusterModel {
return duration;
}
+ private double idealDiskLoad() {
+ // Stateless clusters are not expected to consume more disk over time -
+ // if they do it is due to logs which will be rotated away right before the disk is full
+ return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
+ }
+
/**
* Create a cluster model if possible and logs a warning and returns empty otherwise.
* This is useful in cases where it's possible to continue without the cluser model,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index 3b74533772b..fbc3d236421 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -79,7 +79,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
allocatedNodes(lb.id()).isEmpty(), lb -> {
try {
attempts.add(1);
- log.log(Level.INFO, () -> "Removing expired inactive load balancer " + lb.id());
+ log.log(Level.INFO, () -> "Removing expired inactive " + lb.id());
service.remove(lb.id().application(), lb.id().cluster());
db.removeLoadBalancer(lb.id());
} catch (Exception e){
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java
index 6c103627ad4..57db874fb84 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMover.java
@@ -59,7 +59,7 @@ public abstract class NodeMover<MOVE> extends NodeRepositoryMaintainer {
protected final MOVE findBestMove(NodesAndHosts<? extends NodeList> allNodes) {
HostCapacity capacity = new HostCapacity(allNodes, nodeRepository().resourcesCalculator());
MOVE bestMove = emptyMove;
- // Shuffle nodes so we did not get stuck if the chosen move is consistently discarded. Node moves happen through
+ // Shuffle nodes to not get stuck if the chosen move is consistently discarded. Node moves happen through
// a soft request to retire (preferToRetire), which node allocation can disregard
NodeList activeNodes = allNodes.nodes().nodeType(NodeType.tenant)
.state(Node.State.active)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
index 7bea671fbac..f01e8ecd301 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
@@ -16,6 +16,7 @@ import java.time.Duration;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.logging.Logger;
/**
* Ensure that nodes within a cluster a spread across hosts on exclusive network switches.
@@ -24,6 +25,8 @@ import java.util.Set;
*/
public class SwitchRebalancer extends NodeMover<Move> {
+ private static final Logger LOG = Logger.getLogger(SwitchRebalancer.class.getName());
+
private final Metric metric;
private final Deployer deployer;
@@ -40,7 +43,12 @@ public class SwitchRebalancer extends NodeMover<Move> {
NodesAndHosts<NodeList> allNodes = NodesAndHosts.create(nodeRepository().nodes().list()); // Lockless as strong consistency is not needed
if (!zoneIsStable(allNodes.nodes())) return 1.0;
- findBestMove(allNodes).execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository());
+ Move bestMove = findBestMove(allNodes);
+ if (!bestMove.isEmpty()) {
+ LOG.info("Trying " + bestMove + " (" + bestMove.fromHost().switchHostname().orElse("<none>") +
+ " -> " + bestMove.toHost().switchHostname().orElse("<none>") + ")");
+ }
+ bestMove.execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository());
return 1.0;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java
index 8c421443a65..1c3d3f5c489 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeResourcesSerializer.java
@@ -20,6 +20,7 @@ public class NodeResourcesSerializer {
private static final String storageTypeKey = "storageType";
static void toSlime(NodeResources resources, Cursor resourcesObject) {
+ if (resources.isUnspecified()) return;
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
@@ -29,6 +30,7 @@ public class NodeResourcesSerializer {
}
static NodeResources resourcesFromSlime(Inspector resources) {
+ if ( ! resources.field(vcpuKey).valid()) return NodeResources.unspecified();
return new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble(),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 0d32b21016c..8c358301b85 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -113,7 +113,8 @@ class Activator {
var cluster = modified.cluster(clusterEntry.getKey()).get();
var previousResources = oldNodes.cluster(clusterEntry.getKey()).toResources();
var currentResources = clusterEntry.getValue().toResources();
- if ( ! previousResources.justNumbers().equals(currentResources.justNumbers())) {
+ if ( previousResources.nodeResources().isUnspecified()
+ || ! previousResources.justNumbers().equals(currentResources.justNumbers())) {
cluster = cluster.with(ScalingEvent.create(previousResources, currentResources, generation, at));
}
if (cluster.targetResources().isPresent()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 0c2c3c48df1..4088d717a67 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
@@ -29,10 +30,21 @@ public class CapacityPolicies {
this.sharedHosts = type -> PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value().isEnabled(type.name());
}
- public int decideSize(int requested, boolean required, boolean canFail, boolean isTester, ClusterSpec cluster) {
+ public Capacity applyOn(Capacity capacity, ApplicationId application) {
+ return capacity.withLimits(applyOn(capacity.minResources(), capacity, application),
+ applyOn(capacity.maxResources(), capacity, application));
+ }
+
+ private ClusterResources applyOn(ClusterResources resources, Capacity capacity, ApplicationId application) {
+ int nodes = decideSize(resources.nodes(), capacity.isRequired(), application.instance().isTester());
+ int groups = Math.min(resources.groups(), nodes); // cannot have more groups than nodes
+ var nodeResources = decideNodeResources(resources.nodeResources(), capacity.isRequired());
+ return new ClusterResources(nodes, groups, nodeResources);
+ }
+
+ private int decideSize(int requested, boolean required, boolean isTester) {
if (isTester) return 1;
- ensureRedundancy(requested, cluster, canFail);
if (required) return requested;
switch(zone.environment()) {
case dev : case test : return 1;
@@ -43,11 +55,9 @@ public class CapacityPolicies {
}
}
- public NodeResources decideNodeResources(NodeResources target, boolean required, ClusterSpec cluster) {
- if (target.isUnspecified())
- target = defaultNodeResources(cluster.type());
-
+ private NodeResources decideNodeResources(NodeResources target, boolean required) {
if (required) return target;
+ if (target.isUnspecified()) return target; // Cannot be modified
// Dev does not cap the cpu or network of containers since usage is spotty: Allocate just a small amount exclusively
if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning())
@@ -77,28 +87,11 @@ public class CapacityPolicies {
}
/**
- * Whether or not the nodes requested can share physical host with other applications.
+ * Returns whether the nodes requested can share physical host with other applications.
* A security feature which only makes sense for prod.
*/
public boolean decideExclusivity(Capacity capacity, boolean requestedExclusivity) {
return requestedExclusivity && (capacity.isRequired() || zone.environment() == Environment.prod);
}
- /**
- * Throw if the node count is 1 for container and content clusters and we're in a production zone
- *
- * @throws IllegalArgumentException if only one node is requested and we can fail
- */
- private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail) {
- if (canFail &&
- nodeCount == 1 &&
- requiresRedundancy(cluster.type()) &&
- zone.environment().isProduction())
- throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster);
- }
-
- private static boolean requiresRedundancy(ClusterSpec.Type clusterType) {
- return clusterType.isContent() || clusterType.isContainer();
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index ba46f0a9535..2d93763c631 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -71,7 +71,7 @@ public class GroupPreparer {
// Try preparing in memory without global unallocated lock. Most of the time there should be no changes and we
// can return nodes previously allocated.
NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::probeNext, wantedGroups, allNodesAndHosts);
+ indices::probeNext, wantedGroups, allNodesAndHosts);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
surplusActiveNodes.removeAll(acceptedNodes);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 7cc4acc20b0..6c22a26d88a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -99,12 +99,12 @@ class NodeAllocation {
* Note that if unallocated nodes are offered before allocated nodes, this will unnecessarily
* reject allocated nodes due to index duplicates.
*
- * @param nodesPrioritized the nodes which are potentially on offer. These may belong to a different application etc.
+ * @param candidates the nodes which are potentially on offer. These may belong to a different application etc.
* @return the subset of offeredNodes which was accepted, with the correct allocation assigned
*/
- List<Node> offer(List<NodeCandidate> nodesPrioritized) {
+ List<Node> offer(List<NodeCandidate> candidates) {
List<Node> accepted = new ArrayList<>();
- for (NodeCandidate candidate : nodesPrioritized) {
+ for (NodeCandidate candidate : candidates) {
if (candidate.allocation().isPresent()) {
Allocation allocation = candidate.allocation().get();
ClusterMembership membership = allocation.membership();
@@ -121,7 +121,7 @@ class NodeAllocation {
if ((! saturated() && hasCompatibleFlavor(candidate) && requestedNodes.acceptable(candidate)) || acceptToRetire) {
candidate = candidate.withNode();
if (candidate.isValid())
- accepted.add(acceptNode(candidate, shouldRetire(candidate, nodesPrioritized), resizeable));
+ accepted.add(acceptNode(candidate, shouldRetire(candidate, candidates), resizeable));
}
}
else if (! saturated() && hasCompatibleFlavor(candidate)) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 4f0ae688b1c..62ac1f0d0e6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -238,7 +238,6 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
private double skewWith(NodeResources resources) {
if (parent.isEmpty()) return 0;
-
NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers());
return Node.skew(parent.get().flavor().resources(), free);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index b35b0a5e301..7d15a2b30b1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -84,8 +84,8 @@ public class NodeRepositoryProvisioner implements Provisioner {
@Override
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
ProvisionLogger logger) {
- log.log(Level.FINE, () -> "Received deploy prepare request for " + requested +
- " for application " + application + ", cluster " + cluster);
+ log.log(Level.FINE, "Received deploy prepare request for " + requested +
+ " for application " + application + ", cluster " + cluster);
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
@@ -96,21 +96,21 @@ public class NodeRepositoryProvisioner implements Provisioner {
NodeResources resources;
NodeSpec nodeSpec;
if (requested.type() == NodeType.tenant) {
- ClusterResources target = decideTargetResources(application, cluster, requested);
- int nodeCount = capacityPolicies.decideSize(target.nodes(),
- requested.isRequired(),
- requested.canFail(),
- application.instance().isTester(),
- cluster);
- groups = Math.min(target.groups(), nodeCount); // cannot have more groups than nodes
- resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested.isRequired(), cluster);
- boolean exclusive = capacityPolicies.decideExclusivity(requested, cluster.isExclusive());
- nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail());
- logIfDownscaled(target.nodes(), nodeCount, cluster, logger);
+ var actual = capacityPolicies.applyOn(requested, application);
+ ClusterResources target = decideTargetResources(application, cluster, actual);
+ boolean exclusive = capacityPolicies.decideExclusivity(actual, cluster.isExclusive());
+ ensureRedundancy(target.nodes(), cluster, actual.canFail(), application);
+ logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger);
+
+ groups = target.groups();
+ resources = target.nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type())
+ : target.nodeResources();
+ nodeSpec = NodeSpec.from(target.nodes(), resources, exclusive, actual.canFail());
}
else {
groups = 1; // type request with multiple groups is not supported
- resources = requested.minResources().nodeResources();
+ resources = requested.minResources().nodeResources().isUnspecified() ? capacityPolicies.defaultNodeResources(cluster.type())
+ : requested.minResources().nodeResources();
nodeSpec = NodeSpec.from(requested.type());
}
return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources);
@@ -164,12 +164,20 @@ public class NodeRepositoryProvisioner implements Provisioner {
boolean firstDeployment = nodes.isEmpty();
AllocatableClusterResources currentResources =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableClusterResources(requested.minResources(), clusterSpec, nodeRepository)
+ ? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec), clusterSpec, nodeRepository)
: new AllocatableClusterResources(nodes.asList(), nodeRepository);
var clusterModel = new ClusterModel(application, cluster, clusterSpec, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
return within(Limits.of(requested), currentResources, firstDeployment, clusterModel);
}
+ private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec) {
+ var initial = requested.minResources();
+ if (initial.nodeResources().isUnspecified())
+ initial = initial.with(capacityPolicies.defaultNodeResources(clusterSpec.type()));
+ return initial;
+ }
+
+
/** Make the minimal adjustments needed to the current resources to stay within the limits */
private ClusterResources within(Limits limits,
AllocatableClusterResources current,
@@ -190,10 +198,28 @@ public class NodeRepositoryProvisioner implements Provisioner {
.advertisedResources();
}
- private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) {
- if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes)
- logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster +
- ", downscaling to " + actualNodes + " nodes in " + zone.environment());
+ /**
+ * Throw if the node count is 1 for container and content clusters and we're in a production zone
+ *
+ * @throws IllegalArgumentException if only one node is requested and we can fail
+ */
+ private void ensureRedundancy(int nodeCount, ClusterSpec cluster, boolean canFail, ApplicationId application) {
+ if (! application.instance().isTester() &&
+ canFail &&
+ nodeCount == 1 &&
+ requiresRedundancy(cluster.type()) &&
+ zone.environment().isProduction())
+ throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy. Not fulfilled for " + cluster);
+ }
+
+ private static boolean requiresRedundancy(ClusterSpec.Type clusterType) {
+ return clusterType.isContent() || clusterType.isContainer();
+ }
+
+ private void logIfDownscaled(int requestedMinNodes, int actualMinNodes, ClusterSpec cluster, ProvisionLogger logger) {
+ if (zone.environment().isManuallyDeployed() && actualMinNodes < requestedMinNodes)
+ logger.log(Level.INFO, "Requested " + requestedMinNodes + " nodes for " + cluster +
+ ", downscaling to " + actualMinNodes + " nodes in " + zone.environment());
}
private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 282b0d96cf4..b12368b2834 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -10,7 +10,6 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.NodesAndHosts;
-import com.yahoo.vespa.hosted.provision.node.Nodes;
import java.util.ArrayList;
import java.util.List;
@@ -25,13 +24,11 @@ import java.util.stream.Collectors;
*/
class Preparer {
- private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
public Preparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
- this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner);
}
@@ -69,9 +66,10 @@ class Preparer {
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
- GroupPreparer.PrepareResult result = groupPreparer.prepare(
- application, clusterGroup, requestedNodes.fraction(wantedGroups),
- surplusNodes, indices, wantedGroups, allNodesAndHosts);
+ GroupPreparer.PrepareResult result = groupPreparer.prepare(application, clusterGroup,
+ requestedNodes.fraction(wantedGroups),
+ surplusNodes, indices, wantedGroups,
+ allNodesAndHosts);
allNodesAndHosts = result.allNodesAndHosts; // Might have changed
List<Node> accepted = result.prepared;
if (requestedNodes.rejectNonActiveParent()) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 230278878b4..a04a3828f13 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -15,6 +15,7 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.Nodelike;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
@@ -51,10 +52,10 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.clock().advance(Duration.ofDays(1));
- assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty());
tester.addCpuMeasurements(0.25f, 1f, 59, application1);
- assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1, capacity).isEmpty());
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
@@ -62,10 +63,10 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
15, 1, 1.2, 28.6, 28.6,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.deploy(application1, cluster1, scaledResources);
- assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1, capacity).isEmpty());
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -74,19 +75,19 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
assertTrue("Load change is large, but insufficient measurements for new config -> No change",
- tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ tester.autoscale(application1, cluster1, capacity).isEmpty());
tester.addCpuMeasurements(0.19f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), capacity).target());
+ assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1, capacity).target());
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down to minimum since usage has gone down significantly",
7, 1, 1.0, 66.7, 66.7,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents();
}
@@ -109,8 +110,8 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high",
- 7, 1, 2.5, 80.0, 80.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 7, 1, 2.5, 80.0, 50.5,
+ tester.autoscale(application1, cluster1, capacity));
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -119,8 +120,8 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down since cpu usage has gone down",
- 4, 1, 2.5, 68.6, 68.6,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 4, 1, 2.5, 68.6, 27.4,
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -147,7 +148,7 @@ public class AutoscalingTest {
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
@@ -180,7 +181,7 @@ public class AutoscalingTest {
// Autoscaling: Uses disk-speed any as well
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 120, application1);
- Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), capacity);
+ Autoscaler.Advice advice = tester.autoscale(application1, cluster1, capacity);
assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed());
@@ -204,8 +205,8 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up to limit since resource usage is too high",
- 6, 1, 2.4, 78.0, 79.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 6, 1, 2.4, 78.0, 70.0,
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -224,7 +225,7 @@ public class AutoscalingTest {
tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1);
tester.assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.7, 10.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -252,7 +253,7 @@ public class AutoscalingTest {
tester.assertResources("Scaling up to limit since resource usage is too high",
4, 1,
defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(),
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -273,7 +274,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up since resource usage is too high",
6, 6, 3.6, 8.0, 10.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -291,7 +292,7 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 5, 1, resources);
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
- assertTrue(tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ assertTrue(tester.autoscale(application1, cluster1, capacity).isEmpty());
}
@Test
@@ -342,7 +343,7 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up since resource usage is too high",
- 7, 1, 2.5, 80.0, 80.0,
+ 7, 1, 2.5, 80.0, 50.5,
tester.suggest(application1, cluster1.id(), min, max));
}
@@ -361,7 +362,7 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
- tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ tester.autoscale(application1, cluster1, capacity).isEmpty());
}
@Test
@@ -379,7 +380,7 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
- tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
+ tester.autoscale(application1, cluster1, capacity).isEmpty());
}
@Test
@@ -399,8 +400,8 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up since resource usage is too high",
- 7, 7, 2.5, 80.0, 80.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 7, 7, 2.5, 80.0, 50.5,
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -422,8 +423,8 @@ public class AutoscalingTest {
t -> t == 0 ? 20.0 : 10.0,
t -> 1.0);
tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper",
- 8, 1, 2.6, 83.3, 83.3,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 8, 1, 2.6, 83.3, 52.6,
+ tester.autoscale(application1, cluster1, capacity));
}
/** Same as above but mostly write traffic, which favors smaller groups */
@@ -446,8 +447,8 @@ public class AutoscalingTest {
t -> t == 0 ? 20.0 : 10.0,
t -> 100.0);
tester.assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 4, 1, 2.1, 83.3, 83.3,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 4, 1, 2.1, 83.3, 52.6,
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -469,7 +470,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Increase group size to reduce memory load",
8, 2, 12.4, 96.2, 62.5,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -490,7 +491,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -510,7 +511,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- assertTrue(tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty());
+ assertTrue(tester.autoscale(application1, cluster1, capacity).target().isEmpty());
// Trying the same later causes autoscaling
tester.clock().advance(Duration.ofDays(2));
@@ -519,7 +520,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down",
6, 1, 1.4, 4.0, 95.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -530,7 +531,8 @@ public class AutoscalingTest {
var capacity = Capacity.from(min, max);
{ // No memory tax
- AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources,
+ AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")),
+ hostResources,
new OnlySubtractingWhenForecastingCalculator(0));
ApplicationId application1 = tester.applicationId("app1");
@@ -542,11 +544,12 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up",
4, 1, 6.7, 20.5, 200,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
{ // 15 Gb memory tax
- AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources,
+ AutoscalingTester tester = new AutoscalingTester(new Zone(Environment.prod, RegionName.from("us-east")),
+ hostResources,
new OnlySubtractingWhenForecastingCalculator(15));
ApplicationId application1 = tester.applicationId("app1");
@@ -558,7 +561,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up",
4, 1, 6.7, 35.5, 200,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
}
@@ -589,7 +592,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.9f, 0.6f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.",
8, 1, 3, 83, 34.3,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -600,7 +603,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down since resource usage has gone down",
5, 1, 3, 83, 36.0,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -622,17 +625,17 @@ public class AutoscalingTest {
// (no read share stored)
tester.assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.storeReadShare(0.25, 0.5, application1);
tester.assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.storeReadShare(0.5, 0.5, application1);
tester.assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -656,7 +659,7 @@ public class AutoscalingTest {
// (no query rate data)
tester.assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data",
5, 1, 6.3, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5));
timeAdded = tester.addQueryRateMeasurements(application1, cluster1.id(),
@@ -666,7 +669,7 @@ public class AutoscalingTest {
tester.addCpuMeasurements(0.25f, 1f, 200, application1);
tester.assertResources("Scale down since observed growth is slower than scaling time",
5, 1, 3.4, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.clearQueryRateMeasurements(application1, cluster1.id());
@@ -678,7 +681,7 @@ public class AutoscalingTest {
tester.addCpuMeasurements(0.25f, 1f, 200, application1);
tester.assertResources("Scale up since observed growth is faster than scaling time",
5, 1, 10.0, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
@@ -705,63 +708,63 @@ public class AutoscalingTest {
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0);
tester.assertResources("Query and write load is equal -> scale up somewhat",
5, 1, 7.3, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0);
tester.assertResources("Query load is 4x write load -> scale up more",
5, 1, 9.5, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.addCpuMeasurements(0.3f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
tester.assertResources("Write load is 10x query load -> scale down",
5, 1, 2.9, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0);
tester.assertResources("Query only -> largest possible",
5, 1, 10.0, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> 0.0, t -> 10.0);
tester.assertResources("Write only -> smallest possible",
5, 1, 2.1, 100, 100,
- tester.autoscale(application1, cluster1.id(), capacity));
+ tester.autoscale(application1, cluster1, capacity));
}
@Test
- public void test_cd_autoscaling_test() {
+ public void test_autoscaling_in_dev() {
NodeResources resources = new NodeResources(1, 4, 50, 1);
- ClusterResources min = new ClusterResources( 2, 1, resources);
+ ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(3, 1, resources);
- var capacity = Capacity.from(min, max);
- AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
+ Capacity capacity = Capacity.from(min, max, false, true);
+
+ AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
- tester.deploy(application1, cluster1, 2, 1, resources);
+ tester.deploy(application1, cluster1, capacity);
tester.addQueryRateMeasurements(application1, cluster1.id(),
- 500, t -> 0.0);
- tester.addCpuMeasurements(0.5f, 1f, 10, application1);
-
- tester.assertResources("Advice to scale up since observed growth is much faster than scaling time",
- 3, 1, 1, 4, 50,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 500, t -> 100.0);
+ tester.addCpuMeasurements(1.0f, 1f, 10, application1);
+ assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
+ tester.autoscale(application1, cluster1, capacity).target().isEmpty());
}
+ /** Same setup as test_autoscaling_in_dev(), just with required = true */
@Test
- public void test_autoscaling_in_dev() {
+ public void test_autoscaling_in_dev_with_required_resources() {
NodeResources resources = new NodeResources(1, 4, 50, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(3, 1, resources);
- Capacity capacity = Capacity.from(min, max, false, true);
+ Capacity capacity = Capacity.from(min, max, true, true);
AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -771,19 +774,20 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(),
500, t -> 100.0);
tester.addCpuMeasurements(1.0f, 1f, 10, application1);
- assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
- tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty());
+ tester.assertResources("We scale up even in dev because resources are required",
+ 3, 1, 1.0, 4, 50,
+ tester.autoscale(application1, cluster1, capacity));
}
- /** Same setup as test_autoscaling_in_dev(), just with required = true */
@Test
- public void test_autoscaling_in_dev_with_required_resources() {
- NodeResources resources = new NodeResources(1, 4, 50, 1);
+ public void test_autoscaling_in_dev_with_required_unspecified_resources() {
+ NodeResources resources = NodeResources.unspecified();
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(3, 1, resources);
Capacity capacity = Capacity.from(min, max, true, true);
- AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2));
+ AutoscalingTester tester = new AutoscalingTester(Environment.dev,
+ new NodeResources(10, 16, 100, 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
@@ -792,8 +796,8 @@ public class AutoscalingTest {
500, t -> 100.0);
tester.addCpuMeasurements(1.0f, 1f, 10, application1);
tester.assertResources("We scale up even in dev because resources are required",
- 3, 1, 1.0, 4, 50,
- tester.autoscale(application1, cluster1.id(), capacity));
+ 3, 1, 1.5, 8, 50,
+ tester.autoscale(application1, cluster1, capacity));
}
/**
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index 998a1e86c3e..8586704a426 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -24,6 +24,7 @@ import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
@@ -46,6 +47,7 @@ class AutoscalingTester {
private final ProvisioningTester provisioningTester;
private final Autoscaler autoscaler;
private final MockHostResourcesCalculator hostResourcesCalculator;
+ private final CapacityPolicies capacityPolicies;
/** Creates an autoscaling tester with a single host type ready */
public AutoscalingTester(NodeResources hostResources) {
@@ -53,11 +55,15 @@ class AutoscalingTester {
}
public AutoscalingTester(Environment environment, NodeResources hostResources) {
- this(environment, hostResources, null);
+ this(new Zone(environment, RegionName.from("us-east")), hostResources, null);
}
- public AutoscalingTester(Environment environment, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) {
- this(new Zone(environment, RegionName.from("us-east")), List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator);
+ public AutoscalingTester(Zone zone, NodeResources hostResources) {
+ this(zone, hostResources, null);
+ }
+
+ public AutoscalingTester(Zone zone, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) {
+ this(zone, List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator);
provisioningTester.makeReadyNodes(20, "hostFlavor", NodeType.host, 8);
provisioningTester.activateTenantHosts();
}
@@ -76,6 +82,7 @@ class AutoscalingTester {
hostResourcesCalculator = new MockHostResourcesCalculator(zone);
autoscaler = new Autoscaler(nodeRepository());
+ capacityPolicies = new CapacityPolicies(provisioningTester.nodeRepository());
}
public ProvisioningTester provisioning() { return provisioningTester; }
@@ -143,7 +150,7 @@ class AutoscalingTester {
for (Node node : nodes) {
Load load = new Load(value,
ClusterModel.idealMemoryLoad * otherResourcesLoad,
- ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
+ ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
new NodeMetricSnapshot(clock().instant(),
load,
@@ -175,7 +182,7 @@ class AutoscalingTester {
clock().advance(Duration.ofSeconds(150));
for (Node node : nodes) {
Load load = new Load(ClusterModel.idealQueryCpuLoad * otherResourcesLoad,
- ClusterModel.idealDiskLoad * otherResourcesLoad,
+ ClusterModel.idealContentDiskLoad * otherResourcesLoad,
value).multiply(oneExtraNodeFactor);
nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
new NodeMetricSnapshot(clock().instant(),
@@ -208,10 +215,10 @@ class AutoscalingTester {
for (Node node : nodes) {
float cpu = (float) 0.2 * otherResourcesLoad * oneExtraNodeFactor;
float memory = value * oneExtraNodeFactor;
- float disk = (float) ClusterModel.idealDiskLoad * otherResourcesLoad * oneExtraNodeFactor;
+ float disk = (float) ClusterModel.idealContentDiskLoad * otherResourcesLoad * oneExtraNodeFactor;
Load load = new Load(0.2 * otherResourcesLoad,
value,
- ClusterModel.idealDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
+ ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
new NodeMetricSnapshot(clock().instant(),
load,
@@ -306,13 +313,14 @@ class AutoscalingTester {
((MemoryMetricsDb)nodeMetricsDb()).clearClusterMetrics(application, cluster);
}
- public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity capacity) {
+ public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
+ capacity = capacityPolicies.applyOn(capacity, applicationId);
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterId, false, capacity);
+ .withCluster(cluster.id(), false, capacity);
try (Mutex lock = nodeRepository().nodes().lock(applicationId)) {
nodeRepository().applications().put(application, lock);
}
- return autoscaler.autoscale(application, application.clusters().get(clusterId),
+ return autoscaler.autoscale(application, application.clusters().get(cluster.id()),
nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index bd7300ad6bf..516a7a92d04 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -30,19 +30,20 @@ public class ClusterModelTest {
public void test_traffic_headroom() {
ManualClock clock = new ManualClock();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
+ ClusterSpec clusterSpec = clusterSpec();
Cluster cluster = cluster(new NodeResources(1, 10, 100, 1));
application = application.with(cluster);
// No current traffic share: Ideal load is low but capped
var model1 = new ClusterModel(application.with(new Status(0.0, 1.0)),
- cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock),
ClusterNodesTimeseries.empty());
assertEquals(0.131, model1.idealLoad().cpu(), delta);
// Almost no current traffic share: Ideal load is low but capped
var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)),
- cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock),
ClusterNodesTimeseries.empty());
assertEquals(0.131, model2.idealLoad().cpu(), delta);
@@ -53,24 +54,32 @@ public class ClusterModelTest {
ManualClock clock = new ManualClock();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
+ ClusterSpec clusterSpec = clusterSpec();
Cluster cluster = cluster(new NodeResources(1, 10, 100, 1));
application = application.with(cluster);
// No current traffic: Ideal load is low but capped
var model1 = new ClusterModel(application,
- cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0, t -> 0.0, clock),
ClusterNodesTimeseries.empty());
assertEquals(0.275, model1.idealLoad().cpu(), delta);
// Almost no current traffic: Ideal load is low but capped
var model2 = new ClusterModel(application.with(new Status(0.0001, 1.0)),
- cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster, clock, Duration.ofMinutes(10),
timeseries(cluster,100, t -> t == 0 ? 10000.0 : 0.0001, t -> 0.0, clock),
ClusterNodesTimeseries.empty());
assertEquals(0.040, model2.idealLoad().cpu(), delta);
}
+ private ClusterSpec clusterSpec() {
+ return ClusterSpec.specification(ClusterSpec.Type.content, ClusterSpec.Id.from("test"))
+ .group(ClusterSpec.Group.from(0))
+ .vespaVersion("7.1.1")
+ .build();
+ }
+
private Cluster cluster(NodeResources resources) {
return Cluster.create(ClusterSpec.Id.from("test"),
false,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
index 6a5b45db8ff..d9037181f59 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java
@@ -5,8 +5,13 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
@@ -186,7 +191,6 @@ public class AutoscalingMaintainerTest {
var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, app1Capacity));
ManualClock clock = tester.clock();
- // deploy
tester.deploy(app1, cluster1, app1Capacity);
autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester);
@@ -222,6 +226,49 @@ public class AutoscalingMaintainerTest {
tester.cluster(app1, cluster1).lastScalingEvent().get().generation());
}
+ @Test
+ public void test_cd_autoscaling_test() {
+ ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1");
+ ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec();
+ NodeResources resources = new NodeResources(1, 4, 50, 1);
+ ClusterResources min = new ClusterResources( 2, 1, resources);
+ ClusterResources max = new ClusterResources(3, 1, resources);
+ var capacity = Capacity.from(min, max);
+ var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")),
+ new MockDeployer.ApplicationContext(app1, cluster1, capacity));
+ ManualClock clock = tester.clock();
+
+ tester.deploy(app1, cluster1, capacity);
+ assertEquals(2,
+ tester.nodeRepository().nodes().list(Node.State.active)
+ .owner(app1)
+ .cluster(cluster1.id())
+ .size());
+
+ autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester);
+ assertEquals(3,
+ tester.nodeRepository().nodes().list(Node.State.active)
+ .owner(app1)
+ .cluster(cluster1.id())
+ .size());
+ }
+
+ @Test
+ public void test_cd_test_not_specifying_node_resources() {
+ ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1");
+ ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec();
+ ClusterResources resources = new ClusterResources( 2, 1, NodeResources.unspecified());
+ var capacity = Capacity.from(resources);
+ var tester = new AutoscalingMaintainerTester(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east3")),
+ new MockDeployer.ApplicationContext(app1, cluster1, capacity));
+ tester.deploy(app1, cluster1, capacity); // Deploy should succeed and allocate the nodes
+ assertEquals(2,
+ tester.nodeRepository().nodes().list(Node.State.active)
+ .owner(app1)
+ .cluster(cluster1.id())
+ .size());
+ }
+
private void autoscale(boolean down, Duration completionTime, Duration expectedWindow,
ManualClock clock, ApplicationId application, ClusterSpec cluster,
AutoscalingMaintainerTester tester) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
index a47fb983d21..e1a1a2af5fb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
@@ -42,9 +42,11 @@ public class AutoscalingMaintainerTester {
private final MockDeployer deployer;
public AutoscalingMaintainerTester(MockDeployer.ApplicationContext ... appContexts) {
- provisioningTester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east3")))
- .flavorsConfig(flavorsConfig())
- .build();
+ this(new Zone(Environment.prod, RegionName.from("us-east3")), appContexts);
+ }
+
+ public AutoscalingMaintainerTester(Zone zone, MockDeployer.ApplicationContext ... appContexts) {
+ provisioningTester = new ProvisioningTester.Builder().zone(zone).flavorsConfig(flavorsConfig()).build();
provisioningTester.clock().setInstant(Instant.ofEpochMilli(0));
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Arrays.stream(appContexts)
.collect(Collectors.toMap(c -> c.id(), c -> c));
@@ -105,7 +107,7 @@ public class AutoscalingMaintainerTester {
private FlavorsConfig flavorsConfig() {
FlavorConfigBuilder b = new FlavorConfigBuilder();
- b.addFlavor("flt", 30, 30, 40, 3, Flavor.Type.BARE_METAL);
+ b.addFlavor("flt", 30, 30, 50, 3, Flavor.Type.BARE_METAL);
b.addFlavor("cpu", 40, 20, 40, 3, Flavor.Type.BARE_METAL);
b.addFlavor("mem", 20, 40, 40, 3, Flavor.Type.BARE_METAL);
return b.build();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
index 316655e11fb..7ce26354739 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
@@ -458,7 +458,7 @@ public class DynamicProvisioningMaintainerTest {
// Provision config servers
for (int i = 0; i < provisionedHosts.size(); i++) {
- tester.makeReadyChildren(1, i + 1, NodeResources.unspecified(), hostType.childNodeType(),
+ tester.makeReadyChildren(1, i + 1, new NodeResources(1.5, 8, 50, 0.3), hostType.childNodeType(),
provisionedHosts.get(i).hostname(), (nodeIndex) -> "cfg" + nodeIndex);
}
tester.prepareAndActivateInfraApplication(configSrvApp, hostType.childNodeType());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 4c0395a0c7e..b51f4403756 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -70,7 +70,7 @@ public class ScalingSuggestionsMaintainerTest {
new TestMetric());
maintainer.maintain();
- assertEquals("11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]",
+ assertEquals("12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]",
suggestionOf(app1, cluster1, tester).get().resources().toString());
assertEquals("8 nodes with [vcpu: 11.0, memory: 4.4 Gb, disk 11.8 Gb, bandwidth: 0.1 Gbps]",
suggestionOf(app2, cluster2, tester).get().resources().toString());
@@ -80,7 +80,7 @@ public class ScalingSuggestionsMaintainerTest {
addMeasurements(0.10f, 0.10f, 0.10f, 0, 500, app1, tester.nodeRepository());
maintainer.maintain();
assertEquals("Suggestion stays at the peak value observed",
- "11 nodes with [vcpu: 6.5, memory: 5.5 Gb, disk 15.0 Gb, bandwidth: 0.1 Gbps]",
+ "12 nodes with [vcpu: 6.0, memory: 5.5 Gb, disk 10.0 Gb, bandwidth: 0.1 Gbps]",
suggestionOf(app1, cluster1, tester).get().resources().toString());
// Utilization is still way down and a week has passed
tester.clock().advance(Duration.ofDays(7));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index db165aae919..95f25612dd7 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -523,7 +523,7 @@ public class ProvisioningTest {
ApplicationId application = ProvisioningTester.applicationId();
tester.makeReadyHosts(10, defaultResources).activateTenantHosts();
- prepare(application, 1, 2, 3, 3, defaultResources, tester);
+ prepare(application, 1, 1, 1, 1, defaultResources, tester);
}
@Test
@@ -1015,10 +1015,10 @@ public class ProvisioningTest {
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.from(new ClusterResources(count, 1, NodeResources.unspecified()), required, true);
- int expectedContainer0Size = tester.decideSize(container0Size, capacity.apply(container0Size), containerCluster0, application);
- int expectedContainer1Size = tester.decideSize(container1Size, capacity.apply(container1Size), containerCluster1, application);
- int expectedContent0Size = tester.decideSize(content0Size, capacity.apply(content0Size), contentCluster0, application);
- int expectedContent1Size = tester.decideSize(content1Size, capacity.apply(content1Size), contentCluster1, application);
+ int expectedContainer0Size = tester.decideSize(capacity.apply(container0Size), application);
+ int expectedContainer1Size = tester.decideSize(capacity.apply(container1Size), application);
+ int expectedContent0Size = tester.decideSize(capacity.apply(content0Size), application);
+ int expectedContent1Size = tester.decideSize(capacity.apply(content1Size), application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 6ca93671087..c478840780f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -152,8 +152,8 @@ public class ProvisioningTester {
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); }
- public int decideSize(int size, Capacity capacity, ClusterSpec cluster, ApplicationId application) {
- return capacityPolicies.decideSize(size, capacity.isRequired(), capacity.canFail(), application.instance().isTester(), cluster);
+ public int decideSize(Capacity capacity, ApplicationId application) {
+ return capacityPolicies.applyOn(capacity, application).minResources().nodes();
}
public Node patchNode(Node node, UnaryOperator<Node> patcher) {
@@ -493,6 +493,7 @@ public class ProvisioningTester {
public List<Node> makeReadyNodes(int n, Flavor flavor, Optional<TenantName> reservedTo, NodeType type, int ipAddressPoolSize, boolean dualStack) {
List<Node> nodes = makeProvisionedNodes(n, flavor, reservedTo, type, ipAddressPoolSize, dualStack);
nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
+ nodes.forEach(node -> { if (node.resources().isUnspecified()) throw new IllegalArgumentException(); });
return nodeRepository.nodes().setReady(nodes, Agent.system, getClass().getSimpleName());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
index 689b6f3816b..fcdcdf1a8ca 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
@@ -72,7 +72,7 @@
"idealMemory": 0.65,
"currentMemory": 0.0,
"disk" : 0.0,
- "idealDisk": 0.6,
+ "idealDisk": 0.95,
"currentDisk": 0.0
},
"scalingEvents" : [
diff --git a/pom.xml b/pom.xml
index 9cf3630e463..8dbdc6f6732 100644
--- a/pom.xml
+++ b/pom.xml
@@ -133,6 +133,7 @@
<module>vespa-athenz</module>
<module>vespa-documentgen-plugin</module>
<module>vespa-feed-client</module>
+ <module>vespa-feed-client-api</module>
<module>vespa-feed-client-cli</module>
<module>vespa-hadoop</module>
<module>vespa-http-client</module>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 6ea74ae8a1d..14d9902d335 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -60,7 +60,7 @@ jobs:
environment:
LOCAL_MVN_REPO: "/tmp/vespa/mvnrepo"
- VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.javadoc.skip=true -Dmaven.source.skip=true"
+ VESPA_MAVEN_EXTRA_OPTS: "-Dmaven.repo.local=/tmp/vespa/mvnrepo -Dmaven.source.skip=true"
CCACHE_TMP_DIR: "/tmp/ccache_tmp"
CCACHE_DATA_DIR: "/tmp/vespa/ccache"
MAIN_CACHE_FILE: "/main_job_cache/vespa.tar"
diff --git a/searchcommon/src/vespa/searchcommon/attribute/config.h b/searchcommon/src/vespa/searchcommon/attribute/config.h
index e6a428e5843..f572f5038fc 100644
--- a/searchcommon/src/vespa/searchcommon/attribute/config.h
+++ b/searchcommon/src/vespa/searchcommon/attribute/config.h
@@ -6,10 +6,10 @@
#include "collectiontype.h"
#include "hnsw_index_params.h"
#include "predicate_params.h"
-#include <vespa/searchcommon/common/compaction_strategy.h>
#include <vespa/searchcommon/common/growstrategy.h>
#include <vespa/searchcommon/common/dictionary_config.h>
#include <vespa/eval/eval/value_type.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <cassert>
#include <optional>
@@ -23,6 +23,7 @@ namespace search::attribute {
class Config {
public:
enum class Match { CASED, UNCASED };
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
Config() noexcept;
Config(BasicType bt) noexcept : Config(bt, CollectionType::SINGLE) { }
Config(BasicType bt, CollectionType ct) noexcept : Config(bt, ct, false) { }
diff --git a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt
index 77e638d7193..6cc02ae7884 100644
--- a/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt
+++ b/searchcommon/src/vespa/searchcommon/common/CMakeLists.txt
@@ -1,7 +1,6 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(searchcommon_searchcommon_common OBJECT
SOURCES
- compaction_strategy.cpp
datatype.cpp
dictionary_config.cpp
growstrategy.cpp
diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp b/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp
deleted file mode 100644
index 22f50ba3049..00000000000
--- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "compaction_strategy.h"
-#include <iostream>
-namespace search {
-
-std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy)
-{
- os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() <<
- ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() <<
- "}";
- return os;
-}
-
-}
diff --git a/searchcore/src/tests/proton/attribute/attribute_test.cpp b/searchcore/src/tests/proton/attribute/attribute_test.cpp
index 3397b424ed0..1de56802484 100644
--- a/searchcore/src/tests/proton/attribute/attribute_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attribute_test.cpp
@@ -82,6 +82,7 @@ using std::string;
using vespalib::ForegroundTaskExecutor;
using vespalib::ForegroundThreadExecutor;
using vespalib::SequencedTaskExecutorObserver;
+using vespalib::datastore::CompactionStrategy;
using vespalib::eval::SimpleValue;
using vespalib::eval::TensorSpec;
using vespalib::eval::Value;
@@ -541,7 +542,7 @@ public:
AttributeCollectionSpecFactory _factory;
AttributeCollectionSpecTest(bool fastAccessOnly)
: _builder(),
- _factory(AllocStrategy(search::GrowStrategy(), search::CompactionStrategy(), 100), fastAccessOnly)
+ _factory(AllocStrategy(search::GrowStrategy(), CompactionStrategy(), 100), fastAccessOnly)
{
addAttribute("a1", false);
addAttribute("a2", true);
diff --git a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp
index 1483a0bd653..59503464222 100644
--- a/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp
+++ b/searchcore/src/tests/proton/common/alloc_config/alloc_config_test.cpp
@@ -7,8 +7,8 @@
using proton::AllocConfig;
using proton::AllocStrategy;
using proton::SubDbType;
-using search::CompactionStrategy;
using search::GrowStrategy;
+using vespalib::datastore::CompactionStrategy;
namespace {
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
index 7f28ccd0737..1851455e321 100644
--- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
@@ -60,6 +60,7 @@ using storage::spi::Timestamp;
using vespa::config::search::core::ProtonConfig;
using vespa::config::content::core::BucketspacesConfig;
using vespalib::mkdir;
+using vespalib::datastore::CompactionStrategy;
using proton::index::IndexConfig;
typedef StoreOnlyDocSubDB::Config StoreOnlyConfig;
@@ -564,7 +565,7 @@ TEST_F("require that attribute manager can be reconfigured", SearchableFixture)
TEST_F("require that subdb reflect retirement", FastAccessFixture)
{
- search::CompactionStrategy cfg(0.1, 0.3);
+ CompactionStrategy cfg(0.1, 0.3);
EXPECT_FALSE(f._subDb.isNodeRetired());
auto unretired_cfg = f._subDb.computeCompactionStrategy(cfg);
@@ -576,7 +577,7 @@ TEST_F("require that subdb reflect retirement", FastAccessFixture)
EXPECT_TRUE(f._subDb.isNodeRetired());
auto retired_cfg = f._subDb.computeCompactionStrategy(cfg);
EXPECT_TRUE(cfg != retired_cfg);
- EXPECT_TRUE(search::CompactionStrategy(0.5, 0.5) == retired_cfg);
+ EXPECT_TRUE(CompactionStrategy(0.5, 0.5) == retired_cfg);
calc->setNodeRetired(false);
f.setBucketStateCalculator(calc);
@@ -586,8 +587,8 @@ TEST_F("require that subdb reflect retirement", FastAccessFixture)
}
TEST_F("require that attribute compaction config reflect retirement", FastAccessFixture) {
- search::CompactionStrategy default_cfg(0.05, 0.2);
- search::CompactionStrategy retired_cfg(0.5, 0.5);
+ CompactionStrategy default_cfg(0.05, 0.2);
+ CompactionStrategy retired_cfg(0.5, 0.5);
auto guard = f._subDb.getAttributeManager()->getAttribute("attr1");
EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy());
@@ -702,29 +703,31 @@ assertTarget(const vespalib::string &name,
TEST_F("require that flush targets can be retrieved", FastAccessFixture)
{
IFlushTarget::List targets = getFlushTargets(f);
- EXPECT_EQUAL(7u, targets.size());
+ EXPECT_EQUAL(8u, targets.size());
EXPECT_EQUAL("subdb.attribute.flush.attr1", targets[0]->getName());
EXPECT_EQUAL("subdb.attribute.shrink.attr1", targets[1]->getName());
EXPECT_EQUAL("subdb.documentmetastore.flush", targets[2]->getName());
EXPECT_EQUAL("subdb.documentmetastore.shrink", targets[3]->getName());
- EXPECT_EQUAL("subdb.summary.compact", targets[4]->getName());
- EXPECT_EQUAL("subdb.summary.flush", targets[5]->getName());
- EXPECT_EQUAL("subdb.summary.shrink", targets[6]->getName());
+ EXPECT_EQUAL("subdb.summary.compact_bloat", targets[4]->getName());
+ EXPECT_EQUAL("subdb.summary.compact_spread", targets[5]->getName());
+ EXPECT_EQUAL("subdb.summary.flush", targets[6]->getName());
+ EXPECT_EQUAL("subdb.summary.shrink", targets[7]->getName());
}
TEST_F("require that flush targets can be retrieved", SearchableFixture)
{
IFlushTarget::List targets = getFlushTargets(f);
- EXPECT_EQUAL(9u, targets.size());
+ EXPECT_EQUAL(10u, targets.size());
EXPECT_TRUE(assertTarget("subdb.attribute.flush.attr1", FType::SYNC, FComponent::ATTRIBUTE, *targets[0]));
EXPECT_TRUE(assertTarget("subdb.attribute.shrink.attr1", FType::GC, FComponent::ATTRIBUTE, *targets[1]));
EXPECT_TRUE(assertTarget("subdb.documentmetastore.flush", FType::SYNC, FComponent::ATTRIBUTE, *targets[2]));
EXPECT_TRUE(assertTarget("subdb.documentmetastore.shrink", FType::GC, FComponent::ATTRIBUTE, *targets[3]));
EXPECT_TRUE(assertTarget("subdb.memoryindex.flush", FType::FLUSH, FComponent::INDEX, *targets[4]));
EXPECT_TRUE(assertTarget("subdb.memoryindex.fusion", FType::GC, FComponent::INDEX, *targets[5]));
- EXPECT_TRUE(assertTarget("subdb.summary.compact", FType::GC, FComponent::DOCUMENT_STORE, *targets[6]));
- EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[7]));
- EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[8]));
+ EXPECT_TRUE(assertTarget("subdb.summary.compact_bloat", FType::GC, FComponent::DOCUMENT_STORE, *targets[6]));
+ EXPECT_TRUE(assertTarget("subdb.summary.compact_spread", FType::GC, FComponent::DOCUMENT_STORE, *targets[7]));
+ EXPECT_TRUE(assertTarget("subdb.summary.flush", FType::SYNC, FComponent::DOCUMENT_STORE, *targets[8]));
+ EXPECT_TRUE(assertTarget("subdb.summary.shrink", FType::GC, FComponent::DOCUMENT_STORE, *targets[9]));
}
TEST_F("require that only fast-access attributes are instantiated", FastAccessOnlyFixture)
diff --git a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
index 42976104836..8e1b23eba67 100644
--- a/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
+++ b/searchcore/src/tests/proton/documentdb/lid_space_compaction/lid_space_common.h
@@ -15,6 +15,7 @@
#include <vespa/searchcore/proton/test/clusterstatehandler.h>
#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h>
#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/searchcore/proton/test/dummy_document_store.h>
#include <vespa/vespalib/util/idestructorcallback.h>
#include <vespa/searchlib/index/docbuilder.h>
diff --git a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
index 9a02331787e..6d3eaa30263 100644
--- a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
+++ b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
@@ -45,7 +45,7 @@ using search::TuneFileDocumentDB;
using std::map;
using vespalib::VarHolder;
using search::GrowStrategy;
-using search::CompactionStrategy;
+using vespalib::datastore::CompactionStrategy;
struct DoctypeFixture {
using UP = std::unique_ptr<DoctypeFixture>;
diff --git a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
index 3474a4297c7..516c31cb232 100644
--- a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
+++ b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
@@ -30,6 +30,10 @@ vespalib::string doc1("id:test:music::1");
}
+TEST("control sizeof(PendingGidToLidChange)") {
+ EXPECT_EQUAL(48u, sizeof(PendingGidToLidChange));
+}
+
class ListenerStats {
using lock_guard = std::lock_guard<std::mutex>;
std::mutex _lock;
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp
index 5c695f7b0f2..66be0737fe9 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_initializer.cpp
@@ -119,7 +119,7 @@ extractHeader(const vespalib::string &attrFileName)
auto df = search::FileUtil::openFile(attrFileName + ".dat");
vespalib::FileHeader datHeader;
datHeader.readFile(*df);
- return AttributeHeader::extractTags(datHeader);
+ return AttributeHeader::extractTags(datHeader, attrFileName);
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp
index 1c730b063f8..59804517d26 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_manager_initializer.cpp
@@ -7,10 +7,10 @@
#include <future>
using search::AttributeVector;
-using search::CompactionStrategy;
using search::GrowStrategy;
using search::SerialNum;
using vespa::config::search::AttributesConfig;
+using vespalib::datastore::CompactionStrategy;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp
index 3664f67f8fb..69a2d4f3ea9 100644
--- a/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/alloc_config.cpp
@@ -4,8 +4,8 @@
#include <vespa/searchcore/proton/common/subdbtype.h>
#include <algorithm>
-using search::CompactionStrategy;
using search::GrowStrategy;
+using vespalib::datastore::CompactionStrategy;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp
index cbe8309b031..32ac249f7e1 100644
--- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.cpp
@@ -3,7 +3,6 @@
#include "alloc_strategy.h"
#include <iostream>
-using search::CompactionStrategy;
using search::GrowStrategy;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h
index 4771a8637cd..9c6e24e2bfe 100644
--- a/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h
+++ b/searchcore/src/vespa/searchcore/proton/common/alloc_strategy.h
@@ -2,8 +2,8 @@
#pragma once
-#include <vespa/searchcommon/common/compaction_strategy.h>
#include <vespa/searchcommon/common/growstrategy.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <iosfwd>
namespace proton {
@@ -14,14 +14,16 @@ namespace proton {
*/
class AllocStrategy
{
+public:
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
protected:
const search::GrowStrategy _grow_strategy;
- const search::CompactionStrategy _compaction_strategy;
+ const CompactionStrategy _compaction_strategy;
const uint32_t _amortize_count;
public:
AllocStrategy(const search::GrowStrategy& grow_strategy,
- const search::CompactionStrategy& compaction_strategy,
+ const CompactionStrategy& compaction_strategy,
uint32_t amortize_count);
AllocStrategy();
@@ -32,7 +34,7 @@ public:
return !operator==(rhs);
}
const search::GrowStrategy& get_grow_strategy() const noexcept { return _grow_strategy; }
- const search::CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; }
+ const CompactionStrategy& get_compaction_strategy() const noexcept { return _compaction_strategy; }
uint32_t get_amortize_count() const noexcept { return _amortize_count; }
};
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
index 4e0cf3f9059..06bf8d0a8a6 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
@@ -9,21 +9,26 @@ using search::SerialNum;
using vespalib::makeLambdaTask;
using searchcorespi::FlushStats;
using searchcorespi::IFlushTarget;
+using searchcorespi::FlushTask;
namespace proton {
namespace {
-class Compacter : public searchcorespi::FlushTask {
+class Compacter : public FlushTask {
private:
IDocumentStore & _docStore;
FlushStats & _stats;
SerialNum _currSerial;
+ virtual void compact(IDocumentStore & docStore, SerialNum currSerial) const = 0;
public:
- Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) :
- _docStore(docStore), _stats(stats), _currSerial(currSerial) {}
+ Compacter(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial)
+ : _docStore(docStore),
+ _stats(stats),
+ _currSerial(currSerial)
+ {}
void run() override {
- _docStore.compact(_currSerial);
+ compact(_docStore, _currSerial);
updateStats();
}
void updateStats() {
@@ -36,10 +41,32 @@ public:
}
};
+class CompactBloat : public Compacter {
+public:
+ CompactBloat(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial)
+ : Compacter(docStore, stats, currSerial)
+ {}
+private:
+ void compact(IDocumentStore & docStore, SerialNum currSerial) const override {
+ docStore.compactBloat(currSerial);
+ }
+};
+
+class CompactSpread : public Compacter {
+public:
+ CompactSpread(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial)
+ : Compacter(docStore, stats, currSerial)
+ {}
+private:
+ void compact(IDocumentStore & docStore, SerialNum currSerial) const override {
+ docStore.compactSpread(currSerial);
+ }
+};
+
}
-SummaryCompactTarget::SummaryCompactTarget(vespalib::Executor & summaryService, IDocumentStore & docStore)
- : IFlushTarget("summary.compact", Type::GC, Component::DOCUMENT_STORE),
+SummaryGCTarget::SummaryGCTarget(const vespalib::string & name, vespalib::Executor & summaryService, IDocumentStore & docStore)
+ : IFlushTarget(name, Type::GC, Component::DOCUMENT_STORE),
_summaryService(summaryService),
_docStore(docStore),
_lastStats()
@@ -48,37 +75,69 @@ SummaryCompactTarget::SummaryCompactTarget(vespalib::Executor & summaryService,
}
IFlushTarget::MemoryGain
-SummaryCompactTarget::getApproxMemoryGain() const
+SummaryGCTarget::getApproxMemoryGain() const
{
return MemoryGain::noGain(_docStore.memoryUsed());
}
IFlushTarget::DiskGain
-SummaryCompactTarget::getApproxDiskGain() const
+SummaryGCTarget::getApproxDiskGain() const
{
size_t total(_docStore.getDiskFootprint());
- return DiskGain(total, total - std::min(total, _docStore.getMaxCompactGain()));
+ return DiskGain(total, total - std::min(total, getBloat(_docStore)));
}
IFlushTarget::Time
-SummaryCompactTarget::getLastFlushTime() const
+SummaryGCTarget::getLastFlushTime() const
{
return vespalib::system_clock::now();
}
SerialNum
-SummaryCompactTarget::getFlushedSerialNum() const
+SummaryGCTarget::getFlushedSerialNum() const
{
return _docStore.tentativeLastSyncToken();
}
IFlushTarget::Task::UP
-SummaryCompactTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>)
+SummaryGCTarget::initFlush(SerialNum currentSerial, std::shared_ptr<search::IFlushToken>)
{
std::promise<Task::UP> promise;
std::future<Task::UP> future = promise.get_future();
- _summaryService.execute(makeLambdaTask([&]() { promise.set_value(std::make_unique<Compacter>(_docStore, _lastStats, currentSerial)); }));
+ _summaryService.execute(makeLambdaTask([this, &promise,currentSerial]() {
+ promise.set_value(create(_docStore, _lastStats, currentSerial));
+ }));
return future.get();
}
+SummaryCompactBloatTarget::SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore)
+ : SummaryGCTarget("summary.compact_bloat", summaryService, docStore)
+{
+}
+
+size_t
+SummaryCompactBloatTarget::getBloat(const search::IDocumentStore & docStore) const {
+ return docStore.getDiskBloat();
+}
+
+FlushTask::UP
+SummaryCompactBloatTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) {
+ return std::make_unique<CompactBloat>(docStore, stats, currSerial);
+}
+
+SummaryCompactSpreadTarget::SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore)
+ : SummaryGCTarget("summary.compact_spread", summaryService, docStore)
+{
+}
+
+size_t
+SummaryCompactSpreadTarget::getBloat(const search::IDocumentStore & docStore) const {
+ return docStore.getMaxSpreadAsBloat();
+}
+
+FlushTask::UP
+SummaryCompactSpreadTarget::create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) {
+ return std::make_unique<CompactSpread>(docStore, stats, currSerial);
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h
index c8035a544f2..083f763d8e6 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.h
@@ -12,16 +12,10 @@ namespace proton {
/**
* This class implements the IFlushTarget interface to proxy a summary manager.
*/
-class SummaryCompactTarget : public searchcorespi::IFlushTarget {
-private:
- using FlushStats = searchcorespi::FlushStats;
- vespalib::Executor &_summaryService;
- search::IDocumentStore & _docStore;
- FlushStats _lastStats;
-
+class SummaryGCTarget : public searchcorespi::IFlushTarget {
public:
- SummaryCompactTarget(vespalib::Executor & summaryService, search::IDocumentStore & docStore);
-
+ using FlushStats = searchcorespi::FlushStats;
+ using IDocumentStore = search::IDocumentStore;
MemoryGain getApproxMemoryGain() const override;
DiskGain getApproxDiskGain() const override;
SerialNum getFlushedSerialNum() const override;
@@ -31,6 +25,39 @@ public:
FlushStats getLastFlushStats() const override { return _lastStats; }
uint64_t getApproxBytesToWriteToDisk() const override { return 0; }
+protected:
+ SummaryGCTarget(const vespalib::string &, vespalib::Executor & summaryService, IDocumentStore & docStore);
+private:
+
+ virtual size_t getBloat(const IDocumentStore & docStore) const = 0;
+ virtual Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) = 0;
+
+ vespalib::Executor &_summaryService;
+ IDocumentStore & _docStore;
+ FlushStats _lastStats;
+};
+
+/**
+ * Implements target to compact away removed documents. Wasted disk space is cost factor used for prioritizing.
+ */
+class SummaryCompactBloatTarget : public SummaryGCTarget {
+private:
+ size_t getBloat(const search::IDocumentStore & docStore) const override;
+ Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override;
+public:
+ SummaryCompactBloatTarget(vespalib::Executor & summaryService, IDocumentStore & docStore);
+};
+
+/**
+ * Target to ensure bucket spread is kept low. The cost is reported as a potential gain in disk space as
+ * we do not have a concept for bucket spread.
+ */
+class SummaryCompactSpreadTarget : public SummaryGCTarget {
+private:
+ size_t getBloat(const search::IDocumentStore & docStore) const override;
+ Task::UP create(IDocumentStore & docStore, FlushStats & stats, SerialNum currSerial) override;
+public:
+ SummaryCompactSpreadTarget(vespalib::Executor & summaryService, IDocumentStore & docStore);
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
index eaf5a907808..28a91e1444d 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
@@ -200,7 +200,8 @@ SummaryManager::getFlushTargets(vespalib::Executor & summaryService)
IFlushTarget::List ret;
ret.push_back(std::make_shared<SummaryFlushTarget>(getBackingStore(), summaryService));
if (dynamic_cast<LogDocumentStore *>(_docStore.get()) != nullptr) {
- ret.push_back(std::make_shared<SummaryCompactTarget>(summaryService, getBackingStore()));
+ ret.push_back(std::make_shared<SummaryCompactBloatTarget>(summaryService, getBackingStore()));
+ ret.push_back(std::make_shared<SummaryCompactSpreadTarget>(summaryService, getBackingStore()));
}
ret.push_back(createShrinkLidSpaceFlushTarget(summaryService, _docStore));
return ret;
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
index 3170654409b..641108ea46b 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
@@ -198,9 +198,7 @@ DocumentMetaStore::consider_compact_gid_to_lid_map()
return false;
}
auto &compaction_strategy = getConfig().getCompactionStrategy();
- size_t used_bytes = _cached_gid_to_lid_map_memory_usage.usedBytes();
- size_t dead_bytes = _cached_gid_to_lid_map_memory_usage.deadBytes();
- return compaction_strategy.should_compact_memory(used_bytes, dead_bytes);
+ return compaction_strategy.should_compact_memory(_cached_gid_to_lid_map_memory_usage);
}
void
@@ -209,7 +207,7 @@ DocumentMetaStore::onCommit()
if (consider_compact_gid_to_lid_map()) {
incGeneration();
_changesSinceCommit = 0;
- _gidToLidMap.compact_worst();
+ _gidToLidMap.compact_worst(getConfig().getCompactionStrategy());
_gid_to_lid_map_write_itr_prepare_serial_num = 0u;
_gid_to_lid_map_write_itr.begin(_gidToLidMap.getRoot());
incGeneration();
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
index a9873a80d0e..2eb6b1b92f0 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
@@ -48,6 +48,7 @@ using search::DocumentStore;
using search::WriteableFileChunk;
using std::make_shared;
using std::make_unique;
+using vespalib::datastore::CompactionStrategy;
using vespalib::make_string_short::fmt;
@@ -197,7 +198,7 @@ getStoreConfig(const ProtonConfig::Summary::Cache & cache, const HwInfo & hwInfo
}
LogDocumentStore::Config
-deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::Memory & flush, const HwInfo & hwInfo) {
+deriveConfig(const ProtonConfig::Summary & summary, const HwInfo & hwInfo) {
DocumentStore::Config config(getStoreConfig(summary.cache, hwInfo));
const ProtonConfig::Summary::Log & log(summary.log);
const ProtonConfig::Summary::Log::Chunk & chunk(log.chunk);
@@ -205,7 +206,6 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M
LogDataStore::Config logConfig;
logConfig.setMaxFileSize(log.maxfilesize)
.setMaxNumLids(log.maxnumlids)
- .setMaxDiskBloatFactor(std::min(flush.diskbloatfactor, flush.each.diskbloatfactor))
.setMaxBucketSpread(log.maxbucketspread).setMinFileSizeFactor(log.minfilesizefactor)
.compactCompression(deriveCompression(log.compact.compression))
.setFileConfig(fileConfig).disableCrcOnRead(chunk.skipcrconread);
@@ -213,7 +213,7 @@ deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::M
}
search::LogDocumentStore::Config buildStoreConfig(const ProtonConfig & proton, const HwInfo & hwInfo) {
- return deriveConfig(proton.summary, proton.flush.memory, hwInfo);
+ return deriveConfig(proton.summary, hwInfo);
}
using AttributesConfigSP = DocumentDBConfig::AttributesConfigSP;
@@ -264,7 +264,7 @@ build_alloc_config(const ProtonConfig& proton_config, const vespalib::string& do
auto& alloc_config = document_db_config_entry.allocation;
auto& distribution_config = proton_config.distribution;
search::GrowStrategy grow_strategy(alloc_config.initialnumdocs, alloc_config.growfactor, alloc_config.growbias, alloc_config.multivaluegrowfactor);
- search::CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio);
+ CompactionStrategy compaction_strategy(alloc_config.maxDeadBytesRatio, alloc_config.maxDeadAddressSpaceRatio);
return std::make_shared<const AllocConfig>
(AllocStrategy(grow_strategy, compaction_strategy, alloc_config.amortizecount),
distribution_config.redundancy, distribution_config.searchablecopies);
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
index 06d174497b3..6b1356da50e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
@@ -30,7 +30,6 @@
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.storeonlydocsubdb");
-using search::CompactionStrategy;
using search::GrowStrategy;
using vespalib::makeLambdaTask;
using search::index::Schema;
@@ -43,6 +42,7 @@ using vespalib::GenericHeader;
using search::common::FileHeaderContext;
using proton::initializer::InitializerTask;
using searchcorespi::IFlushTarget;
+using vespalib::datastore::CompactionStrategy;
namespace proton {
@@ -422,7 +422,7 @@ namespace {
constexpr double RETIRED_DEAD_RATIO = 0.5;
struct UpdateConfig : public search::attribute::IAttributeFunctor {
- UpdateConfig(search::CompactionStrategy compactionStrategy) noexcept
+ UpdateConfig(CompactionStrategy compactionStrategy) noexcept
: _compactionStrategy(compactionStrategy)
{}
void operator()(search::attribute::IAttributeVector &iAttributeVector) override {
@@ -433,15 +433,15 @@ struct UpdateConfig : public search::attribute::IAttributeFunctor {
attributeVector->update_config(cfg);
}
}
- search::CompactionStrategy _compactionStrategy;
+ CompactionStrategy _compactionStrategy;
};
}
-search::CompactionStrategy
-StoreOnlyDocSubDB::computeCompactionStrategy(search::CompactionStrategy strategy) const {
+CompactionStrategy
+StoreOnlyDocSubDB::computeCompactionStrategy(CompactionStrategy strategy) const {
return isNodeRetired()
- ? search::CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO)
+ ? CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO)
: strategy;
}
@@ -464,7 +464,7 @@ StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCa
bool wasNodeRetired = isNodeRetired();
_nodeRetired = calc->nodeRetired();
if (wasNodeRetired != isNodeRetired()) {
- search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
+ CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
auto cfg = _dms->getConfig();
cfg.setCompactionStrategy(compactionStrategy);
_dms->update_config(cfg);
@@ -474,7 +474,7 @@ StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCa
void
StoreOnlyDocSubDB::reconfigureAttributesConsideringNodeState(OnDone onDone) {
- search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
+ CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
auto attrMan = getAttributeManager();
if (attrMan) {
attrMan->asyncForEachAttribute(std::make_shared<UpdateConfig>(compactionStrategy), std::move(onDone));
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
index b53dfe89f59..d43b865c000 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
@@ -153,7 +153,7 @@ private:
std::shared_ptr<ShrinkLidSpaceFlushTarget> _dmsShrinkTarget;
std::shared_ptr<PendingLidTrackerBase> _pendingLidsForCommit;
bool _nodeRetired;
- search::CompactionStrategy _lastConfiguredCompactionStrategy;
+ vespalib::datastore::CompactionStrategy _lastConfiguredCompactionStrategy;
IFlushTargetList getFlushTargets() override;
protected:
@@ -234,7 +234,7 @@ public:
std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
PendingLidTrackerBase & getUncommittedLidsTracker() override { return *_pendingLidsForCommit; }
- search::CompactionStrategy computeCompactionStrategy(search::CompactionStrategy strategy) const;
+ vespalib::datastore::CompactionStrategy computeCompactionStrategy(vespalib::datastore::CompactionStrategy strategy) const;
bool isNodeRetired() const { return _nodeRetired; }
};
diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h
index d9b83bfc3a8..7194cc4d403 100644
--- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h
+++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_store.h
@@ -10,13 +10,11 @@ struct DummyDocumentStore : public search::IDocumentStore
{
vespalib::string _baseDir;
- DummyDocumentStore()
- : _baseDir("")
- {}
+ DummyDocumentStore() = default;
DummyDocumentStore(const vespalib::string &baseDir)
: _baseDir(baseDir)
{}
- ~DummyDocumentStore() {}
+ ~DummyDocumentStore() = default;
DocumentUP read(search::DocumentIdT, const document::DocumentTypeRepo &) const override {
return DocumentUP();
}
@@ -25,7 +23,8 @@ struct DummyDocumentStore : public search::IDocumentStore
void remove(uint64_t, search::DocumentIdT) override {}
void flush(uint64_t) override {}
uint64_t initFlush(uint64_t) override { return 0; }
- void compact(uint64_t) override {}
+ void compactBloat(uint64_t) override {}
+ void compactSpread(uint64_t) override {}
uint64_t lastSyncToken() const override { return 0; }
uint64_t tentativeLastSyncToken() const override { return 0; }
vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); }
@@ -34,7 +33,7 @@ struct DummyDocumentStore : public search::IDocumentStore
size_t memoryMeta() const override { return 0; }
size_t getDiskFootprint() const override { return 0; }
size_t getDiskBloat() const override { return 0; }
- size_t getMaxCompactGain() const override { return getDiskBloat(); }
+ size_t getMaxSpreadAsBloat() const override { return getDiskBloat(); }
search::CacheStats getCacheStats() const override { return search::CacheStats(); }
const vespalib::string &getBaseDir() const override { return _baseDir; }
void accept(search::IDocumentStoreReadVisitor &,
diff --git a/searchcore/src/vespa/searchcore/proton/test/test.h b/searchcore/src/vespa/searchcore/proton/test/test.h
index 1494823e899..4231d5e7717 100644
--- a/searchcore/src/vespa/searchcore/proton/test/test.h
+++ b/searchcore/src/vespa/searchcore/proton/test/test.h
@@ -5,7 +5,6 @@
#include "bucketdocuments.h"
#include "bucketstatecalculator.h"
#include "document.h"
-#include "dummy_document_store.h"
#include "dummy_feed_view.h"
#include "dummy_summary_manager.h"
#include "resulthandler.h"
diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json
index e5611324254..2d7daf2300e 100644
--- a/searchlib/abi-spec.json
+++ b/searchlib/abi-spec.json
@@ -1457,6 +1457,7 @@
"protected void <init>(com.google.common.collect.ImmutableMap, java.util.Map)",
"public com.yahoo.searchlib.rankingexpression.ExpressionFunction getFunction(java.lang.String)",
"protected com.google.common.collect.ImmutableMap functions()",
+ "protected java.util.Map getFunctions()",
"public java.lang.String getBinding(java.lang.String)",
"public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withBindings(java.util.Map)",
"public com.yahoo.searchlib.rankingexpression.rule.FunctionReferenceContext withoutBindings()"
@@ -1611,6 +1612,7 @@
"public void <init>(java.util.Map)",
"public void <init>(java.util.Collection, java.util.Map)",
"public void <init>(java.util.Collection, java.util.Map, java.util.Map)",
+ "public void <init>(java.util.Map, java.util.Map, java.util.Map)",
"public void <init>(com.google.common.collect.ImmutableMap, java.util.Map, java.util.Map)",
"public void addFunctionSerialization(java.lang.String, java.lang.String)",
"public void addArgumentTypeSerialization(java.lang.String, java.lang.String, com.yahoo.tensor.TensorType)",
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java
index f0586297b0d..287bc2655f5 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/FunctionReferenceContext.java
@@ -17,7 +17,7 @@ import java.util.Map;
public class FunctionReferenceContext {
/** Expression functions indexed by name */
- private final ImmutableMap<String, ExpressionFunction> functions;
+ private final Map<String, ExpressionFunction> functions;
/** Mapping from argument names to the expressions they resolve to */
private final Map<String, String> bindings = new HashMap<>();
@@ -43,26 +43,32 @@ public class FunctionReferenceContext {
/** Create a context for a single serialization task */
public FunctionReferenceContext(Map<String, ExpressionFunction> functions, Map<String, String> bindings) {
- this(ImmutableMap.copyOf(functions), bindings);
+ this.functions = Map.copyOf(functions);
+ if (bindings != null)
+ this.bindings.putAll(bindings);
}
+ /** @deprecated Use {@link #FunctionReferenceContext(Map, Map)} instead */
+ @Deprecated(forRemoval = true, since = "7")
protected FunctionReferenceContext(ImmutableMap<String, ExpressionFunction> functions, Map<String, String> bindings) {
- this.functions = functions;
- if (bindings != null)
- this.bindings.putAll(bindings);
+ this((Map<String, ExpressionFunction>)functions, bindings);
}
- private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) {
- ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>();
+ private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) {
+ Map<String, ExpressionFunction> mapBuilder = new HashMap<>();
for (ExpressionFunction function : list)
mapBuilder.put(function.getName(), function);
- return mapBuilder.build();
+ return Map.copyOf(mapBuilder);
}
/** Returns a function or null if it isn't defined in this context */
public ExpressionFunction getFunction(String name) { return functions.get(name); }
- protected ImmutableMap<String, ExpressionFunction> functions() { return functions; }
+ /** @deprecated Use {@link #getFunctions()} instead */
+ @Deprecated(forRemoval = true, since = "7")
+ protected ImmutableMap<String, ExpressionFunction> functions() { return ImmutableMap.copyOf(functions); }
+
+ protected Map<String, ExpressionFunction> getFunctions() { return functions; }
/** Returns the resolution of an identifier, or null if it isn't defined in this context */
public String getBinding(String name) { return bindings.get(name); }
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java
index cd2f966cc22..535ad013caf 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/SerializationContext.java
@@ -8,6 +8,7 @@ import com.yahoo.tensor.TensorType;
import java.util.Collection;
import java.util.Collections;
+import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
@@ -54,11 +55,11 @@ public class SerializationContext extends FunctionReferenceContext {
this(toMap(functions), bindings, serializedFunctions);
}
- private static ImmutableMap<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) {
- ImmutableMap.Builder<String,ExpressionFunction> mapBuilder = new ImmutableMap.Builder<>();
+ private static Map<String, ExpressionFunction> toMap(Collection<ExpressionFunction> list) {
+ Map<String,ExpressionFunction> mapBuilder = new HashMap<>();
for (ExpressionFunction function : list)
mapBuilder.put(function.getName(), function);
- return mapBuilder.build();
+ return Map.copyOf(mapBuilder);
}
/**
@@ -69,12 +70,19 @@ public class SerializationContext extends FunctionReferenceContext {
* @param serializedFunctions a cache of serializedFunctions - the ownership of this map
* is <b>transferred</b> to this and will be modified in it
*/
- public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings,
+ public SerializationContext(Map<String,ExpressionFunction> functions, Map<String, String> bindings,
Map<String, String> serializedFunctions) {
super(functions, bindings);
this.serializedFunctions = serializedFunctions;
}
+ /** @deprecated Use {@link #SerializationContext(Map, Map, Map) instead}*/
+ @Deprecated(forRemoval = true, since = "7")
+ public SerializationContext(ImmutableMap<String,ExpressionFunction> functions, Map<String, String> bindings,
+ Map<String, String> serializedFunctions) {
+ this((Map<String, ExpressionFunction>)functions, bindings, serializedFunctions);
+ }
+
/** Adds the serialization of a function */
public void addFunctionSerialization(String name, String expressionString) {
serializedFunctions.put(name, expressionString);
@@ -93,13 +101,13 @@ public class SerializationContext extends FunctionReferenceContext {
@Override
public SerializationContext withBindings(Map<String, String> bindings) {
- return new SerializationContext(functions(), bindings, this.serializedFunctions);
+ return new SerializationContext(getFunctions(), bindings, this.serializedFunctions);
}
/** Returns a fresh context without bindings */
@Override
public SerializationContext withoutBindings() {
- return new SerializationContext(functions(), null, this.serializedFunctions);
+ return new SerializationContext(getFunctions(), null, this.serializedFunctions);
}
public Map<String, String> serializedFunctions() { return serializedFunctions; }
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java
index ba5a243464e..d873963bb6e 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/TensorFunctionNode.java
@@ -328,7 +328,14 @@ public class TensorFunctionNode extends CompositeNode {
/** Returns a function or null if it isn't defined in this context */
public ExpressionFunction getFunction(String name) { return wrappedSerializationContext.getFunction(name); }
- protected ImmutableMap<String, ExpressionFunction> functions() { return wrappedSerializationContext.functions(); }
+ /** @deprecated Use {@link #getFunctions()} instead */
+ @SuppressWarnings("removal")
+ @Deprecated(forRemoval = true, since = "7")
+ protected ImmutableMap<String, ExpressionFunction> functions() {
+ return ImmutableMap.copyOf(wrappedSerializationContext.getFunctions());
+ }
+
+ @Override protected Map<String, ExpressionFunction> getFunctions() { return wrappedSerializationContext.getFunctions(); }
public ToStringContext parent() { return wrappedToStringContext; }
@@ -344,14 +351,14 @@ public class TensorFunctionNode extends CompositeNode {
/** Returns a new context with the bindings replaced by the given bindings */
@Override
public ExpressionToStringContext withBindings(Map<String, String> bindings) {
- SerializationContext serializationContext = new SerializationContext(functions(), bindings, serializedFunctions());
+ SerializationContext serializationContext = new SerializationContext(getFunctions(), bindings, serializedFunctions());
return new ExpressionToStringContext(serializationContext, wrappedToStringContext, path, parent);
}
/** Returns a fresh context without bindings */
@Override
public SerializationContext withoutBindings() {
- SerializationContext serializationContext = new SerializationContext(functions(), null, serializedFunctions());
+ SerializationContext serializationContext = new SerializationContext(getFunctions(), null, serializedFunctions());
return new ExpressionToStringContext(serializationContext, null, path, parent);
}
}
diff --git a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp
index 3c8c9ff17e0..16a04a746f3 100644
--- a/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_header/attribute_header_test.cpp
@@ -49,7 +49,7 @@ void
verify_roundtrip_serialization(const HnswIPO& hnsw_params_in)
{
auto gen_header = populate_header(hnsw_params_in);
- auto attr_header = AttributeHeader::extractTags(gen_header);
+ auto attr_header = AttributeHeader::extractTags(gen_header, file_name);
EXPECT_EQ(tensor_cfg.basicType(), attr_header.getBasicType());
EXPECT_EQ(tensor_cfg.collectionType(), attr_header.getCollectionType());
diff --git a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp
index 801aa9341fb..fbec89d27eb 100644
--- a/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp
+++ b/searchlib/src/tests/attribute/compaction/attribute_compaction_test.cpp
@@ -16,6 +16,7 @@ using search::attribute::Config;
using search::attribute::BasicType;
using search::attribute::CollectionType;
using vespalib::AddressSpace;
+using vespalib::datastore::CompactionStrategy;
using AttributePtr = AttributeVector::SP;
using AttributeStatus = search::attribute::Status;
@@ -237,7 +238,7 @@ TEST_F("Compaction limits address space usage (dead) when free lists are NOT use
{
populate_and_hammer(f, true);
AddressSpace afterSpace = f.getMultiValueAddressSpaceUsage("after");
- EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead());
+ EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, afterSpace.dead());
}
TEST_F("Compaction is not executed when free lists are used",
@@ -266,7 +267,7 @@ TEST_F("Compaction is peformed when compaction strategy is changed to enable com
f._v->commit(); // new commit might trigger further compaction
after2 = f.getMultiValueAddressSpaceUsage("after2");
}
- EXPECT_GREATER(search::CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead());
+ EXPECT_GREATER(CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK, after2.dead());
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp
index 2c8fc2966b0..b30b3e4eb71 100644
--- a/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp
+++ b/searchlib/src/tests/attribute/enum_attribute_compaction/enum_attribute_compaction_test.cpp
@@ -175,7 +175,7 @@ void
CompactionTest<VectorType>::test_enum_store_compaction()
{
constexpr uint32_t canary_stride = 256;
- uint32_t dead_limit = search::CompactionStrategy::DEAD_BYTES_SLACK / 8;
+ uint32_t dead_limit = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / 8;
uint32_t doc_count = dead_limit * 3;
if (_v->hasMultiValue() || std::is_same_v<VectorType,StringAttribute>) {
doc_count /= 2;
diff --git a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
index 9c25429932b..33477e015d6 100644
--- a/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
+++ b/searchlib/src/tests/attribute/enumstore/enumstore_test.cpp
@@ -7,7 +7,23 @@
LOG_SETUP("enumstore_test");
using Type = search::DictionaryConfig::Type;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
+using vespalib::datastore::EntryRefFilter;
+using RefT = vespalib::datastore::EntryRefT<22>;
+
+namespace vespalib::datastore {
+
+/*
+ * Print EntryRef as RefT which is used by test_normalize_posting_lists and
+ * test_foreach_posting_list to differentiate between buffers
+ */
+void PrintTo(const EntryRef &ref, std::ostream* os) {
+ RefT iref(ref);
+ *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")";
+}
+
+}
namespace search {
@@ -346,16 +362,16 @@ TEST(EnumStoreTest, address_space_usage_is_reported)
NumericEnumStore store(false, DictionaryConfig::Type::BTREE);
using vespalib::AddressSpace;
- EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_address_space_usage());
+ EXPECT_EQ(AddressSpace(1, 1, ADDRESS_LIMIT), store.get_values_address_space_usage());
EnumIndex idx1 = store.insert(10);
- EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_address_space_usage());
+ EXPECT_EQ(AddressSpace(2, 1, ADDRESS_LIMIT), store.get_values_address_space_usage());
EnumIndex idx2 = store.insert(20);
// Address limit increases because buffer is re-sized.
- EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_address_space_usage());
+ EXPECT_EQ(AddressSpace(3, 1, ADDRESS_LIMIT + 2), store.get_values_address_space_usage());
dec_ref_count(store, idx1);
- EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_address_space_usage());
+ EXPECT_EQ(AddressSpace(3, 2, ADDRESS_LIMIT + 2), store.get_values_address_space_usage());
dec_ref_count(store, idx2);
- EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_address_space_usage());
+ EXPECT_EQ(AddressSpace(3, 3, ADDRESS_LIMIT + 2), store.get_values_address_space_usage());
}
class BatchUpdaterTest : public ::testing::Test {
@@ -597,6 +613,11 @@ public:
void update_posting_idx(EnumIndex enum_idx, EntryRef old_posting_idx, EntryRef new_posting_idx);
EnumIndex insert_value(size_t value_idx);
+ void populate_sample_data(uint32_t cnt);
+ std::vector<EntryRef> get_sample_values(uint32_t cnt);
+ void clear_sample_values(uint32_t cnt);
+ void test_normalize_posting_lists(bool use_filter, bool one_filter);
+ void test_foreach_posting_list(bool one_filter);
static EntryRef fake_pidx() { return EntryRef(42); }
};
@@ -620,6 +641,149 @@ EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::insert_value(size_t val
return enum_idx;
}
+namespace {
+/*
+ * large_population should trigger multiple callbacks from normalize_values
+ * and foreach_value
+ */
+constexpr uint32_t large_population = 1200;
+
+uint32_t select_buffer(uint32_t i) {
+ if ((i % 2) == 0) {
+ return 0;
+ }
+ if ((i % 3) == 0) {
+ return 1;
+ }
+ if ((i % 5) == 0) {
+ return 2;
+ }
+ return 3;
+}
+
+EntryRef make_fake_pidx(uint32_t i) { return RefT(i + 200, select_buffer(i)); }
+EntryRef make_fake_adjusted_pidx(uint32_t i) { return RefT(i + 500, select_buffer(i)); }
+EntryRef adjust_fake_pidx(EntryRef ref) { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); }
+
+}
+
+
+template <typename EnumStoreTypeAndDictionaryType>
+void
+EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::populate_sample_data(uint32_t cnt)
+{
+ auto& dict = store.get_dictionary();
+ for (uint32_t i = 0; i < cnt; ++i) {
+ auto enum_idx = store.insert(i);
+ EXPECT_TRUE(enum_idx.valid());
+ EntryRef posting_idx(make_fake_pidx(i));
+ dict.update_posting_list(enum_idx, store.get_comparator(), [posting_idx](EntryRef) noexcept -> EntryRef { return posting_idx; });
+ }
+}
+
+template <typename EnumStoreTypeAndDictionaryType>
+std::vector<EntryRef>
+EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::get_sample_values(uint32_t cnt)
+{
+ std::vector<EntryRef> result;
+ result.reserve(cnt);
+ store.freeze_dictionary();
+ auto& dict = store.get_dictionary();
+ for (uint32_t i = 0; i < cnt; ++i) {
+ auto compare = store.make_comparator(i);
+ auto enum_idx = dict.find(compare);
+ EXPECT_TRUE(enum_idx.valid());
+ EntryRef posting_idx;
+ dict.update_posting_list(enum_idx, compare, [&posting_idx](EntryRef ref) noexcept { posting_idx = ref; return ref; });;
+ auto find_result = dict.find_posting_list(compare, dict.get_frozen_root());
+ EXPECT_EQ(enum_idx, find_result.first);
+ EXPECT_EQ(posting_idx, find_result.second);
+ result.emplace_back(find_result.second);
+ }
+ return result;
+}
+
+template <typename EnumStoreTypeAndDictionaryType>
+void
+EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::clear_sample_values(uint32_t cnt)
+{
+ auto& dict = store.get_dictionary();
+ for (uint32_t i = 0; i < cnt; ++i) {
+ auto comparator = store.make_comparator(i);
+ auto enum_idx = dict.find(comparator);
+ EXPECT_TRUE(enum_idx.valid());
+ dict.update_posting_list(enum_idx, comparator, [](EntryRef) noexcept -> EntryRef { return EntryRef(); });
+ }
+}
+
+namespace {
+
+EntryRefFilter make_entry_ref_filter(bool one_filter)
+{
+ if (one_filter) {
+ EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits);
+ filter.add_buffer(3);
+ return filter;
+ }
+ return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits);
+}
+
+}
+
+template <typename EnumStoreTypeAndDictionaryType>
+void
+EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_normalize_posting_lists(bool use_filter, bool one_filter)
+{
+ populate_sample_data(large_population);
+ auto& dict = store.get_dictionary();
+ std::vector<EntryRef> exp_refs;
+ std::vector<EntryRef> exp_adjusted_refs;
+ exp_refs.reserve(large_population);
+ exp_adjusted_refs.reserve(large_population);
+ for (uint32_t i = 0; i < large_population; ++i) {
+ exp_refs.emplace_back(make_fake_pidx(i));
+ if (!use_filter || !one_filter || select_buffer(i) == 3) {
+ exp_adjusted_refs.emplace_back(make_fake_adjusted_pidx(i));
+ } else {
+ exp_adjusted_refs.emplace_back(make_fake_pidx(i));
+ }
+ }
+ EXPECT_EQ(exp_refs, get_sample_values(large_population));
+ if (use_filter) {
+ auto filter = make_entry_ref_filter(one_filter);
+ auto dummy = [](std::vector<EntryRef>&) noexcept { };
+ auto adjust_refs = [](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { ref = adjust_fake_pidx(ref); } };
+ EXPECT_FALSE(dict.normalize_posting_lists(dummy, filter));
+ EXPECT_EQ(exp_refs, get_sample_values(large_population));
+ EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs, filter));
+ } else {
+ auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; };
+ auto adjust_refs = [](EntryRef ref) noexcept { return adjust_fake_pidx(ref); };
+ EXPECT_FALSE(dict.normalize_posting_lists(dummy));
+ EXPECT_EQ(exp_refs, get_sample_values(large_population));
+ EXPECT_TRUE(dict.normalize_posting_lists(adjust_refs));
+ }
+ EXPECT_EQ(exp_adjusted_refs, get_sample_values(large_population));
+ clear_sample_values(large_population);
+}
+
+template <typename EnumStoreTypeAndDictionaryType>
+void
+EnumStoreDictionaryTest<EnumStoreTypeAndDictionaryType>::test_foreach_posting_list(bool one_filter)
+{
+ auto filter = make_entry_ref_filter(one_filter);
+ populate_sample_data(large_population);
+ auto& dict = store.get_dictionary();
+ std::vector<EntryRef> exp_refs;
+ auto save_exp_refs = [&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); };
+ EXPECT_FALSE(dict.normalize_posting_lists(save_exp_refs, filter));
+ std::vector<EntryRef> act_refs;
+ auto save_act_refs = [&act_refs](const std::vector<EntryRef>& refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); };
+ dict.foreach_posting_list(save_act_refs, filter);
+ EXPECT_EQ(exp_refs, act_refs);
+ clear_sample_values(large_population);
+}
+
// Disable warnings emitted by gtest generated files when using typed tests
#pragma GCC diagnostic push
#ifndef __clang__
@@ -678,26 +842,27 @@ TYPED_TEST(EnumStoreDictionaryTest, find_posting_list_works)
TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_works)
{
- auto value_0_idx = this->insert_value(0);
- this->update_posting_idx(value_0_idx, EntryRef(), this->fake_pidx());
- this->store.freeze_dictionary();
- auto& dict = this->store.get_dictionary();
- auto root = dict.get_frozen_root();
- auto find_result = dict.find_posting_list(this->make_bound_comparator(0), root);
- EXPECT_EQ(value_0_idx, find_result.first);
- EXPECT_EQ(this->fake_pidx(), find_result.second);
- auto dummy = [](EntryRef posting_idx) noexcept { return posting_idx; };
- std::vector<EntryRef> saved_refs;
- auto save_refs_and_clear = [&saved_refs](EntryRef posting_idx) { saved_refs.push_back(posting_idx); return EntryRef(); };
- EXPECT_FALSE(dict.normalize_posting_lists(dummy));
- EXPECT_TRUE(dict.normalize_posting_lists(save_refs_and_clear));
- EXPECT_FALSE(dict.normalize_posting_lists(save_refs_and_clear));
- EXPECT_EQ((std::vector<EntryRef>{ this->fake_pidx(), EntryRef() }), saved_refs);
- this->store.freeze_dictionary();
- root = dict.get_frozen_root();
- find_result = dict.find_posting_list(this->make_bound_comparator(0), root);
- EXPECT_EQ(value_0_idx, find_result.first);
- EXPECT_EQ(EntryRef(), find_result.second);
+ this->test_normalize_posting_lists(false, false);
+}
+
+TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_all_filter_works)
+{
+ this->test_normalize_posting_lists(true, false);
+}
+
+TYPED_TEST(EnumStoreDictionaryTest, normalize_posting_lists_with_one_filter_works)
+{
+ this->test_normalize_posting_lists(true, true);
+}
+
+TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_all_filter_works)
+{
+ this->test_foreach_posting_list(false);
+}
+
+TYPED_TEST(EnumStoreDictionaryTest, foreach_posting_list_with_one_filter_works)
+{
+ this->test_foreach_posting_list(true);
}
namespace {
@@ -714,7 +879,7 @@ void inc_generation(generation_t &gen, NumericEnumStore &store)
TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works)
{
- size_t entry_count = (search::CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40;
+ size_t entry_count = (CompactionStrategy::DEAD_BYTES_SLACK / 8) + 40;
auto updater = this->store.make_batch_updater();
for (int32_t i = 0; (size_t) i < entry_count; ++i) {
auto idx = updater.insert(i);
@@ -727,13 +892,13 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works)
inc_generation(gen, this->store);
auto& dict = this->store.get_dictionary();
if (dict.get_has_btree_dictionary()) {
- EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
+ EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
}
if (dict.get_has_hash_dictionary()) {
- EXPECT_LT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
+ EXPECT_LT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
}
int compact_count = 0;
- search::CompactionStrategy compaction_strategy;
+ CompactionStrategy compaction_strategy;
for (uint32_t i = 0; i < 15; ++i) {
this->store.update_stat();
if (this->store.consider_compact_dictionary(compaction_strategy)) {
@@ -747,10 +912,10 @@ TYPED_TEST(EnumStoreDictionaryTest, compact_worst_works)
EXPECT_LT((TypeParam::type == Type::BTREE_AND_HASH) ? 1 : 0, compact_count);
EXPECT_GT(15, compact_count);
if (dict.get_has_btree_dictionary()) {
- EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
+ EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_btree_memory_usage().deadBytes());
}
if (dict.get_has_hash_dictionary()) {
- EXPECT_GT(search::CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
+ EXPECT_GT(CompactionStrategy::DEAD_BYTES_SLACK, dict.get_hash_memory_usage().deadBytes());
}
std::vector<int32_t> exp_values;
std::vector<int32_t> values;
diff --git a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp
index 8b1906573d4..bddaa4f4e31 100644
--- a/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp
+++ b/searchlib/src/tests/attribute/multi_value_mapping/multi_value_mapping_test.cpp
@@ -14,6 +14,8 @@
LOG_SETUP("multivaluemapping_test");
using vespalib::datastore::ArrayStoreConfig;
+using vespalib::datastore::CompactionSpec;
+using vespalib::datastore::CompactionStrategy;
template <typename EntryT>
void
@@ -142,7 +144,9 @@ public:
}
void compactWorst() {
- _mvMapping->compactWorst(true, false);
+ CompactionSpec compaction_spec(true, false);
+ CompactionStrategy compaction_strategy;
+ _mvMapping->compactWorst(compaction_spec, compaction_strategy);
_attr->commit();
_attr->incGeneration();
}
diff --git a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp
index cd78332cacd..34b8603c63c 100644
--- a/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp
+++ b/searchlib/src/tests/attribute/posting_store/posting_store_test.cpp
@@ -13,6 +13,7 @@
#include <ostream>
using vespalib::GenerationHandler;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
namespace search::attribute {
@@ -160,7 +161,7 @@ PostingStoreTest::test_compact_sequence(uint32_t sequence_length)
EntryRef old_ref2 = get_posting_ref(2);
auto usage_before = store.getMemoryUsage();
bool compaction_done = false;
- search::CompactionStrategy compaction_strategy(0.05, 0.2);
+ CompactionStrategy compaction_strategy(0.05, 0.2);
for (uint32_t pass = 0; pass < 45; ++pass) {
store.update_stat();
auto guard = _gen_handler.takeGuard();
@@ -193,7 +194,7 @@ PostingStoreTest::test_compact_btree_nodes(uint32_t sequence_length)
EntryRef old_ref2 = get_posting_ref(2);
auto usage_before = store.getMemoryUsage();
bool compaction_done = false;
- search::CompactionStrategy compaction_strategy(0.05, 0.2);
+ CompactionStrategy compaction_strategy(0.05, 0.2);
for (uint32_t pass = 0; pass < 55; ++pass) {
store.update_stat();
auto guard = _gen_handler.takeGuard();
diff --git a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
index c077ab83a6e..1a8eda40f52 100644
--- a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
+++ b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
@@ -176,7 +176,7 @@ struct ReferenceAttributeTest : public ::testing::Test {
search::attribute::Status newStatus = oldStatus;
uint64_t iter = 0;
AttributeGuard guard(_attr);
- uint64_t dropCount = search::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference);
+ uint64_t dropCount = vespalib::datastore::CompactionStrategy::DEAD_BYTES_SLACK / sizeof(Reference);
for (; iter < iterLimit; ++iter) {
clear(2);
set(2, toGid(doc2));
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index 922b0d4fb3e..f47e392c047 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -38,7 +38,6 @@ using document::WrongTensorTypeException;
using search::AddressSpaceUsage;
using search::AttributeGuard;
using search::AttributeVector;
-using search::CompactionStrategy;
using search::attribute::DistanceMetric;
using search::attribute::HnswIndexParams;
using search::queryeval::GlobalFilter;
@@ -56,6 +55,7 @@ using search::tensor::NearestNeighborIndexLoader;
using search::tensor::NearestNeighborIndexSaver;
using search::tensor::PrepareResult;
using search::tensor::TensorAttribute;
+using vespalib::datastore::CompactionStrategy;
using vespalib::eval::TensorSpec;
using vespalib::eval::CellType;
using vespalib::eval::ValueType;
diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
index dec7b911f65..f2bec30a349 100644
--- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp
+++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
@@ -25,6 +25,7 @@ struct NullDataStore : IDataStore {
size_t memoryMeta() const override { return 0; }
size_t getDiskFootprint() const override { return 0; }
size_t getDiskBloat() const override { return 0; }
+ size_t getMaxSpreadAsBloat() const override { return 0; }
uint64_t lastSyncToken() const override { return 0; }
uint64_t tentativeLastSyncToken() const override { return 0; }
vespalib::system_time getLastFlushTime() const override { return vespalib::system_time(); }
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
index 07652dfd336..378babb6ee1 100644
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
@@ -236,7 +236,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3
datastore.remove(i + 20000, i);
}
datastore.flush(datastore.initFlush(lastSyncToken));
- datastore.compact(30000);
+ datastore.compactBloat(30000);
datastore.remove(31000, 0);
checkStats(datastore, 31000, 30000);
EXPECT_LESS_EQUAL(minFiles, datastore.getAllActiveFiles().size());
@@ -252,7 +252,7 @@ void verifyGrowing(const LogDataStore::Config & config, uint32_t minFiles, uint3
}
TEST("testGrowingChunkedBySize") {
LogDataStore::Config config;
- config.setMaxFileSize(100000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2)
+ config.setMaxFileSize(100000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2)
.compactCompression({CompressionConfig::LZ4})
.setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000});
verifyGrowing(config, 40, 120);
@@ -260,7 +260,7 @@ TEST("testGrowingChunkedBySize") {
TEST("testGrowingChunkedByNumLids") {
LogDataStore::Config config;
- config.setMaxNumLids(1000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2)
+ config.setMaxNumLids(1000).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2)
.compactCompression({CompressionConfig::LZ4})
.setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000});
verifyGrowing(config,10, 10);
@@ -679,7 +679,7 @@ TEST("testWriteRead") {
EXPECT_LESS(0u, headerFootprint);
EXPECT_EQUAL(datastore.getDiskFootprint(), headerFootprint);
EXPECT_EQUAL(datastore.getDiskBloat(), 0ul);
- EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul);
+ EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul);
datastore.write(1, 0, a[0].c_str(), a[0].size());
fetchAndTest(datastore, 0, a[0].c_str(), a[0].size());
datastore.write(2, 0, a[1].c_str(), a[1].size());
@@ -701,7 +701,7 @@ TEST("testWriteRead") {
EXPECT_EQUAL(datastore.getDiskFootprint(),
2711ul + headerFootprint);
EXPECT_EQUAL(datastore.getDiskBloat(), 0ul);
- EXPECT_EQUAL(datastore.getMaxCompactGain(), 0ul);
+ EXPECT_EQUAL(datastore.getMaxSpreadAsBloat(), 0ul);
datastore.flush(datastore.initFlush(lastSyncToken));
}
{
@@ -715,7 +715,7 @@ TEST("testWriteRead") {
EXPECT_LESS(0u, headerFootprint);
EXPECT_EQUAL(4944ul + headerFootprint, datastore.getDiskFootprint());
EXPECT_EQUAL(0ul, datastore.getDiskBloat());
- EXPECT_EQUAL(0ul, datastore.getMaxCompactGain());
+ EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat());
for(size_t i=0; i < 100; i++) {
fetchAndTest(datastore, i, a[i%2].c_str(), a[i%2].size());
@@ -730,7 +730,7 @@ TEST("testWriteRead") {
EXPECT_EQUAL(7594ul + headerFootprint, datastore.getDiskFootprint());
EXPECT_EQUAL(0ul, datastore.getDiskBloat());
- EXPECT_EQUAL(0ul, datastore.getMaxCompactGain());
+ EXPECT_EQUAL(0ul, datastore.getMaxSpreadAsBloat());
}
FastOS_File::EmptyAndRemoveDirectory("empty");
}
@@ -1050,7 +1050,6 @@ TEST("require that config equality operator detects inequality") {
using C = LogDataStore::Config;
EXPECT_TRUE(C() == C());
EXPECT_FALSE(C() == C().setMaxFileSize(1));
- EXPECT_FALSE(C() == C().setMaxDiskBloatFactor(0.3));
EXPECT_FALSE(C() == C().setMaxBucketSpread(0.3));
EXPECT_FALSE(C() == C().setMinFileSizeFactor(0.3));
EXPECT_FALSE(C() == C().setFileConfig(WriteableFileChunk::Config({}, 70)));
diff --git a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
index 032960c3799..149662cd266 100644
--- a/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
+++ b/searchlib/src/tests/tensor/dense_tensor_store/dense_tensor_store_test.cpp
@@ -75,10 +75,19 @@ assertArraySize(const vespalib::string &tensorType, uint32_t expArraySize) {
TEST("require that array size is calculated correctly")
{
- TEST_DO(assertArraySize("tensor(x[1])", 32));
+ TEST_DO(assertArraySize("tensor(x[1])", 8));
TEST_DO(assertArraySize("tensor(x[10])", 96));
TEST_DO(assertArraySize("tensor(x[3])", 32));
TEST_DO(assertArraySize("tensor(x[10],y[10])", 800));
+ TEST_DO(assertArraySize("tensor<int8>(x[1])", 8));
+ TEST_DO(assertArraySize("tensor<int8>(x[8])", 8));
+ TEST_DO(assertArraySize("tensor<int8>(x[9])", 16));
+ TEST_DO(assertArraySize("tensor<int8>(x[16])", 16));
+ TEST_DO(assertArraySize("tensor<int8>(x[17])", 32));
+ TEST_DO(assertArraySize("tensor<int8>(x[32])", 32));
+ TEST_DO(assertArraySize("tensor<int8>(x[33])", 64));
+ TEST_DO(assertArraySize("tensor<int8>(x[64])", 64));
+ TEST_DO(assertArraySize("tensor<int8>(x[65])", 96));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
index 7acd3cf8b57..bb2d750eade 100644
--- a/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
+++ b/searchlib/src/tests/tensor/hnsw_index/hnsw_index_test.cpp
@@ -1,12 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/searchcommon/common/compaction_strategy.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/searchlib/tensor/distance_functions.h>
#include <vespa/searchlib/tensor/doc_vector_access.h>
#include <vespa/searchlib/tensor/hnsw_index.h>
#include <vespa/searchlib/tensor/random_level_generator.h>
#include <vespa/searchlib/tensor/inv_log_level_generator.h>
+#include <vespa/vespalib/datastore/compaction_spec.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/util/generationhandler.h>
#include <vespa/vespalib/data/slime/slime.h>
@@ -21,7 +22,8 @@ using namespace search::tensor;
using namespace vespalib::slime;
using vespalib::Slime;
using search::BitVector;
-using search::CompactionStrategy;
+using vespalib::datastore::CompactionSpec;
+using vespalib::datastore::CompactionStrategy;
template <typename FloatType>
class MyDocVectorAccess : public DocVectorAccess {
@@ -628,8 +630,10 @@ TEST_F(HnswIndexTest, hnsw_graph_is_compacted)
for (uint32_t i = 0; i < 10; ++i) {
mem_1 = mem_2;
// Forced compaction to move things around
- index->compact_link_arrays(true, false);
- index->compact_level_arrays(true, false);
+ CompactionSpec compaction_spec(true, false);
+ CompactionStrategy compaction_strategy;
+ index->compact_link_arrays(compaction_spec, compaction_strategy);
+ index->compact_level_arrays(compaction_spec, compaction_strategy);
commit();
index->update_stat();
mem_2 = commit_and_update_stat();
diff --git a/searchlib/src/tests/transactionlog/translogclient_test.cpp b/searchlib/src/tests/transactionlog/translogclient_test.cpp
index 5740eeb610d..d3c3af3a9ca 100644
--- a/searchlib/src/tests/transactionlog/translogclient_test.cpp
+++ b/searchlib/src/tests/transactionlog/translogclient_test.cpp
@@ -7,6 +7,7 @@
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/document/util/bytebuffer.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/destructor_callbacks.h>
#include <vespa/fastos/file.h>
#include <thread>
@@ -316,43 +317,33 @@ fillDomainTest(Session * s1, size_t numPackets, size_t numEntries)
}
}
-using Counter = std::atomic<size_t>;
-
-class CountDone : public IDestructorCallback {
-public:
- explicit CountDone(Counter & inFlight) noexcept : _inFlight(inFlight) { ++_inFlight; }
- ~CountDone() override { --_inFlight; }
-private:
- Counter & _inFlight;
-};
-
void
-fillDomainTest(TransLogServer & s1, const vespalib::string & domain, size_t numPackets, size_t numEntries)
+fillDomainTest(IDestructorCallback::SP onDone, TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries)
{
size_t value(0);
- Counter inFlight(0);
- auto domainWriter = s1.getWriter(domain);
- for(size_t i=0; i < numPackets; i++) {
- std::unique_ptr<Packet> p(new Packet(DEFAULT_PACKET_SIZE));
- for(size_t j=0; j < numEntries; j++, value++) {
- Packet::Entry e(value+1, j+1, vespalib::ConstBufferRef((const char *)&value, sizeof(value)));
+ auto domainWriter = tls.getWriter(domain);
+
+ for (size_t i = 0; i < numPackets; i++) {
+ auto p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE);
+ for (size_t j = 0; j < numEntries; j++, value++) {
+ Packet::Entry e(value + 1, j + 1, vespalib::ConstBufferRef((const char *) &value, sizeof(value)));
p->add(e);
- if ( p->sizeBytes() > DEFAULT_PACKET_SIZE ) {
- domainWriter->append(*p, std::make_shared<CountDone>(inFlight));
+ if (p->sizeBytes() > DEFAULT_PACKET_SIZE) {
+ domainWriter->append(*p, onDone);
p = std::make_unique<Packet>(DEFAULT_PACKET_SIZE);
}
}
- domainWriter->append(*p, std::make_shared<CountDone>(inFlight));
- auto keep = domainWriter->startCommit(Writer::DoneCallback());
- LOG(info, "Inflight %ld", inFlight.load());
- }
- while (inFlight.load() != 0) {
- std::this_thread::sleep_for(10ms);
- LOG(info, "Waiting for inflight %ld to reach zero", inFlight.load());
+ domainWriter->append(*p, onDone);
+ auto keep = domainWriter->startCommit(onDone);
}
-
}
+void
+fillDomainTest(TransLogServer & tls, const vespalib::string & domain, size_t numPackets, size_t numEntries) {
+ vespalib::Gate gate;
+ fillDomainTest(std::make_shared<vespalib::GateCallback>(gate), tls, domain, numPackets, numEntries);
+ gate.await();
+}
void
fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t entrySize)
@@ -545,7 +536,7 @@ partialUpdateTest(const vespalib::string & testDir) {
ASSERT_TRUE( visitor->visit(5, 7) );
for (size_t i(0); ! ca._eof && (i < 1000); i++ ) { std::this_thread::sleep_for(10ms); }
ASSERT_TRUE( ca._eof );
- ASSERT_TRUE( ca.map().size() == 1);
+ ASSERT_EQUAL(1u, ca.map().size());
ASSERT_TRUE( ca.hasSerial(7) );
CallBackUpdate ca1;
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
index b68923b90bf..e40717e6375 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.cpp
@@ -191,9 +191,9 @@ AttributeHeader::internalExtractTags(const vespalib::GenericHeader &header)
}
AttributeHeader
-AttributeHeader::extractTags(const vespalib::GenericHeader &header)
+AttributeHeader::extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name)
{
- AttributeHeader result;
+ AttributeHeader result(file_name);
result.internalExtractTags(header);
return result;
}
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_header.h b/searchlib/src/vespa/searchlib/attribute/attribute_header.h
index 00da28baf80..7c0b8f3084b 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_header.h
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_header.h
@@ -69,7 +69,7 @@ public:
bool getPredicateParamsSet() const { return _predicateParamsSet; }
bool getCollectionTypeParamsSet() const { return _collectionTypeParamsSet; }
const std::optional<HnswIndexParams>& get_hnsw_index_params() const { return _hnsw_index_params; }
- static AttributeHeader extractTags(const vespalib::GenericHeader &header);
+ static AttributeHeader extractTags(const vespalib::GenericHeader &header, const vespalib::string &file_name);
void addTags(vespalib::GenericHeader &header) const;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
index 6c929ad5981..8bc28abc238 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
@@ -311,6 +311,165 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists(
}
template <>
+bool
+EnumStoreDictionary<EnumTree>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)>, const EntryRefFilter&)
+{
+ LOG_ABORT("should not be reached");
+}
+
+namespace {
+
+template <typename HashDictionaryT>
+class ChangeWriterBase
+{
+protected:
+ HashDictionaryT* _hash_dict;
+ static constexpr bool has_hash_dictionary = true;
+ ChangeWriterBase()
+ : _hash_dict(nullptr)
+ {
+ }
+public:
+ void set_hash_dict(HashDictionaryT &hash_dict) { _hash_dict = &hash_dict; }
+};
+
+template <>
+class ChangeWriterBase<vespalib::datastore::NoHashDictionary>
+{
+protected:
+ static constexpr bool has_hash_dictionary = false;
+ ChangeWriterBase() = default;
+};
+
+template <typename HashDictionaryT>
+class ChangeWriter : public ChangeWriterBase<HashDictionaryT> {
+ using Parent = ChangeWriterBase<HashDictionaryT>;
+ using Parent::has_hash_dictionary;
+ std::vector<std::pair<EntryRef,uint32_t*>> _tree_refs;
+public:
+ ChangeWriter(uint32_t capacity);
+ ~ChangeWriter();
+ bool write(const std::vector<EntryRef>& refs);
+ void emplace_back(EntryRef key, uint32_t& tree_ref) { _tree_refs.emplace_back(std::make_pair(key, &tree_ref)); }
+};
+
+template <typename HashDictionaryT>
+ChangeWriter<HashDictionaryT>::ChangeWriter(uint32_t capacity)
+ : ChangeWriterBase<HashDictionaryT>(),
+ _tree_refs()
+{
+ _tree_refs.reserve(capacity);
+}
+
+template <typename HashDictionaryT>
+ChangeWriter<HashDictionaryT>::~ChangeWriter() = default;
+
+template <typename HashDictionaryT>
+bool
+ChangeWriter<HashDictionaryT>::write(const std::vector<EntryRef> &refs)
+{
+ bool changed = false;
+ assert(refs.size() == _tree_refs.size());
+ auto tree_ref = _tree_refs.begin();
+ for (auto ref : refs) {
+ EntryRef old_ref(*tree_ref->second);
+ if (ref != old_ref) {
+ if (!changed) {
+ // Note: Needs review when porting to other platforms
+ // Assumes that other CPUs observes stores from this CPU in order
+ std::atomic_thread_fence(std::memory_order_release);
+ changed = true;
+ }
+ *tree_ref->second = ref.ref();
+ if constexpr (has_hash_dictionary) {
+ auto find_result = this->_hash_dict->find(this->_hash_dict->get_default_comparator(), tree_ref->first);
+ assert(find_result != nullptr && find_result->first.load_relaxed() == tree_ref->first);
+ assert(find_result->second.load_relaxed() == old_ref);
+ find_result->second.store_release(ref);
+ }
+ }
+ ++tree_ref;
+ }
+ assert(tree_ref == _tree_refs.end());
+ _tree_refs.clear();
+ return changed;
+}
+
+}
+
+template <typename BTreeDictionaryT, typename HashDictionaryT>
+bool
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter)
+{
+ if constexpr (has_btree_dictionary) {
+ std::vector<EntryRef> refs;
+ refs.reserve(1024);
+ bool changed = false;
+ ChangeWriter<HashDictionaryT> change_writer(refs.capacity());
+ if constexpr (has_hash_dictionary) {
+ change_writer.set_hash_dict(this->_hash_dict);
+ }
+ auto& dict = this->_btree_dict;
+ for (auto itr = dict.begin(); itr.valid(); ++itr) {
+ EntryRef ref(itr.getData());
+ if (ref.valid()) {
+ if (filter.has(ref)) {
+ refs.emplace_back(ref);
+ change_writer.emplace_back(itr.getKey(), itr.getWData());
+ if (refs.size() >= refs.capacity()) {
+ normalize(refs);
+ changed |= change_writer.write(refs);
+ refs.clear();
+ }
+ }
+ }
+ }
+ if (!refs.empty()) {
+ normalize(refs);
+ changed |= change_writer.write(refs);
+ }
+ return changed;
+ } else {
+ return this->_hash_dict.normalize_values(normalize, filter);
+ }
+}
+
+template <>
+void
+EnumStoreDictionary<EnumTree>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)>, const EntryRefFilter&)
+{
+ LOG_ABORT("should not be reached");
+}
+
+template <typename BTreeDictionaryT, typename HashDictionaryT>
+void
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter)
+{
+ if constexpr (has_btree_dictionary) {
+ std::vector<EntryRef> refs;
+ refs.reserve(1024);
+ auto& dict = this->_btree_dict;
+ for (auto itr = dict.begin(); itr.valid(); ++itr) {
+ EntryRef ref(itr.getData());
+ if (ref.valid()) {
+ if (filter.has(ref)) {
+ refs.emplace_back(ref);
+ if (refs.size() >= refs.capacity()) {
+ callback(refs);
+ refs.clear();
+ }
+ }
+ }
+ }
+ if (!refs.empty()) {
+ callback(refs);
+ }
+ } else {
+ this->_hash_dict.foreach_value(callback, filter);
+ }
+}
+
+template <>
const EnumPostingTree &
EnumStoreDictionary<EnumTree>::get_posting_dictionary() const
{
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
index 4d0509c0eb1..db1176c5484 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
@@ -16,6 +16,7 @@ template <typename BTreeDictionaryT, typename HashDictionaryT = vespalib::datast
class EnumStoreDictionary : public vespalib::datastore::UniqueStoreDictionary<BTreeDictionaryT, IEnumStoreDictionary, HashDictionaryT> {
protected:
using EntryRef = IEnumStoreDictionary::EntryRef;
+ using EntryRefFilter = IEnumStoreDictionary::EntryRefFilter;
using Index = IEnumStoreDictionary::Index;
using BTreeDictionaryType = BTreeDictionaryT;
using EntryComparator = IEnumStoreDictionary::EntryComparator;
@@ -54,6 +55,8 @@ public:
void clear_all_posting_lists(std::function<void(EntryRef)> clearer) override;
void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override;
bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) override;
+ bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) override;
+ void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) override;
const EnumPostingTree& get_posting_dictionary() const override;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
index 2c6ac521b30..3e578856c2b 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
@@ -81,7 +81,7 @@ void
EnumAttribute<B>::populate_address_space_usage(AddressSpaceUsage& usage) const
{
B::populate_address_space_usage(usage);
- usage.set(AddressSpaceComponents::enum_store, _enumStore.get_address_space_usage());
+ usage.set(AddressSpaceComponents::enum_store, _enumStore.get_values_address_space_usage());
}
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h
index a140a529c7d..9dba988fb6a 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h
@@ -96,7 +96,7 @@ public:
vespalib::MemoryUsage get_values_memory_usage() const override { return _store.get_allocator().get_data_store().getMemoryUsage(); }
vespalib::MemoryUsage get_dictionary_memory_usage() const override { return _dict->get_memory_usage(); }
- vespalib::AddressSpace get_address_space_usage() const;
+ vespalib::AddressSpace get_values_address_space_usage() const override;
void transfer_hold_lists(generation_t generation);
void trim_hold_lists(generation_t first_used);
@@ -201,7 +201,7 @@ public:
void free_unused_values(IndexList to_remove);
vespalib::MemoryUsage update_stat() override;
std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override;
- std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override;
+ std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) override;
bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) override;
uint64_t get_compaction_count() const override {
return _store.get_data_store().get_compaction_count();
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
index c202d780659..ef080775dbc 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
@@ -18,10 +18,11 @@
#include <vespa/vespalib/datastore/unique_store_string_allocator.hpp>
#include <vespa/vespalib/util/array.hpp>
#include <vespa/searchlib/util/bufferwriter.h>
-#include <vespa/searchcommon/common/compaction_strategy.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
namespace search {
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryComparator;
std::unique_ptr<vespalib::datastore::IUniqueStoreDictionary>
@@ -91,9 +92,9 @@ EnumStoreT<EntryT>::~EnumStoreT() = default;
template <typename EntryT>
vespalib::AddressSpace
-EnumStoreT<EntryT>::get_address_space_usage() const
+EnumStoreT<EntryT>::get_values_address_space_usage() const
{
- return _store.get_address_space_usage();
+ return _store.get_values_address_space_usage();
}
template <typename EntryT>
@@ -228,23 +229,18 @@ template <typename EntryT>
std::unique_ptr<IEnumStore::EnumIndexRemapper>
EnumStoreT<EntryT>::consider_compact_values(const CompactionStrategy& compaction_strategy)
{
- size_t used_bytes = _cached_values_memory_usage.usedBytes();
- size_t dead_bytes = _cached_values_memory_usage.deadBytes();
- size_t used_address_space = _cached_values_address_space_usage.used();
- size_t dead_address_space = _cached_values_address_space_usage.dead();
- bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes);
- bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space);
- if (compact_memory || compact_address_space) {
- return compact_worst_values(compact_memory, compact_address_space);
+ auto compaction_spec = compaction_strategy.should_compact(_cached_values_memory_usage, _cached_values_address_space_usage);
+ if (compaction_spec.compact()) {
+ return compact_worst_values(compaction_spec, compaction_strategy);
}
return std::unique_ptr<IEnumStore::EnumIndexRemapper>();
}
template <typename EntryT>
std::unique_ptr<IEnumStore::EnumIndexRemapper>
-EnumStoreT<EntryT>::compact_worst_values(bool compact_memory, bool compact_address_space)
+EnumStoreT<EntryT>::compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- return _store.compact_worst(compact_memory, compact_address_space);
+ return _store.compact_worst(compaction_spec, compaction_strategy);
}
template <typename EntryT>
@@ -254,16 +250,14 @@ EnumStoreT<EntryT>::consider_compact_dictionary(const CompactionStrategy& compac
if (_dict->has_held_buffers()) {
return false;
}
- if (compaction_strategy.should_compact_memory(_cached_dictionary_btree_usage.usedBytes(),
- _cached_dictionary_btree_usage.deadBytes()))
+ if (compaction_strategy.should_compact_memory(_cached_dictionary_btree_usage))
{
- _dict->compact_worst(true, false);
+ _dict->compact_worst(true, false, compaction_strategy);
return true;
}
- if (compaction_strategy.should_compact_memory(_cached_dictionary_hash_usage.usedBytes(),
- _cached_dictionary_hash_usage.deadBytes()))
+ if (compaction_strategy.should_compact_memory(_cached_dictionary_hash_usage))
{
- _dict->compact_worst(false, true);
+ _dict->compact_worst(false, true, compaction_strategy);
return true;
}
return false;
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
index 1f3165828bc..cfd7a330d2c 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
@@ -6,10 +6,18 @@
#include "enum_store_types.h"
#include <vespa/vespalib/datastore/entryref.h>
#include <vespa/vespalib/datastore/unique_store_enumerator.h>
-#include <vespa/vespalib/util/memoryusage.h>
+
+namespace vespalib {
+
+class AddressSpace;
+class MemoryUsage;
+
+}
namespace vespalib::datastore {
+class CompactionSpec;
+class CompactionStrategy;
class DataStoreBase;
template <typename> class UniqueStoreRemapper;
@@ -19,7 +27,6 @@ template <typename> class UniqueStoreRemapper;
namespace search {
class BufferWriter;
-class CompactionStrategy;
class IEnumStoreDictionary;
/**
@@ -30,6 +37,8 @@ public:
using Index = enumstore::Index;
using InternalIndex = enumstore::InternalIndex;
using IndexVector = enumstore::IndexVector;
+ using CompactionSpec = vespalib::datastore::CompactionSpec;
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
using EnumHandle = enumstore::EnumHandle;
using EnumVector = enumstore::EnumVector;
using EnumIndexRemapper = vespalib::datastore::UniqueStoreRemapper<InternalIndex>;
@@ -49,10 +58,11 @@ public:
virtual const IEnumStoreDictionary& get_dictionary() const = 0;
virtual uint32_t get_num_uniques() const = 0;
virtual vespalib::MemoryUsage get_values_memory_usage() const = 0;
+ virtual vespalib::AddressSpace get_values_address_space_usage() const = 0;
virtual vespalib::MemoryUsage get_dictionary_memory_usage() const = 0;
virtual vespalib::MemoryUsage update_stat() = 0;
virtual std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) = 0;
- virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) = 0;
+ virtual std::unique_ptr<EnumIndexRemapper> compact_worst_values(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0;
virtual bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy) = 0;
virtual uint64_t get_compaction_count() const = 0;
// Should only be used by unit tests.
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
index a8cf6881b86..a9716ec5d05 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
@@ -30,6 +30,7 @@ class IEnumStoreDictionary : public vespalib::datastore::IUniqueStoreDictionary
public:
using EntryRef = vespalib::datastore::EntryRef;
using EntryComparator = vespalib::datastore::EntryComparator;
+ using EntryRefFilter = vespalib::datastore::EntryRefFilter;
using EnumVector = IEnumStore::EnumVector;
using Index = IEnumStore::Index;
using IndexList = IEnumStore::IndexList;
@@ -52,7 +53,25 @@ public:
virtual Index remap_index(Index idx) = 0;
virtual void clear_all_posting_lists(std::function<void(EntryRef)> clearer) = 0;
virtual void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0;
+ /*
+ * Scan dictionary and call normalize function for each value. If
+ * returned value is different then write back the modified value to
+ * the dictionary. Only used by unit tests.
+ */
virtual bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) = 0;
+ /*
+ * Scan dictionary and call normalize function for batches of values
+ * that pass the filter. Write back modified values to the dictionary.
+ * Used by compaction of posting lists when moving short arrays,
+ * bitvectors or btree roots.
+ */
+ virtual bool normalize_posting_lists(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter) = 0;
+ /*
+ * Scan dictionary and call callback function for batches of values
+ * that pass the filter. Used by compaction of posting lists when
+ * moving btree nodes.
+ */
+ virtual void foreach_posting_list(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter) = 0;
virtual const EnumPostingTree& get_posting_dictionary() const = 0;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
index 135870e29a5..20cec9a31c2 100644
--- a/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
+++ b/searchlib/src/vespa/searchlib/attribute/ipostinglistattributebase.h
@@ -4,7 +4,7 @@
#include <vespa/searchcommon/attribute/iattributevector.h>
-namespace search { class CompactionStrategy; }
+namespace vespalib::datastore { class CompactionStrategy; }
namespace vespalib { class MemoryUsage; }
@@ -13,16 +13,9 @@ namespace search::attribute {
class IPostingListAttributeBase
{
public:
- virtual
- ~IPostingListAttributeBase()
- {
- }
-
- virtual void
- clearPostings(IAttributeVector::EnumHandle eidx,
- uint32_t fromLid,
- uint32_t toLid) = 0;
-
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
+ virtual ~IPostingListAttributeBase() = default;
+ virtual void clearPostings(IAttributeVector::EnumHandle eidx, uint32_t fromLid, uint32_t toLid) = 0;
virtual void forwardedShrinkLidSpace(uint32_t newSize) = 0;
virtual vespalib::MemoryUsage getMemoryUsage() const = 0;
virtual bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy) = 0;
diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h
index 9720e88543d..81abaa05a45 100644
--- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h
+++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.h
@@ -44,7 +44,7 @@ public:
void doneLoadFromMultiValue() { _store.setInitializing(false); }
- void compactWorst(bool compactMemory, bool compactAddressSpace) override;
+ void compactWorst(CompactionSpec compactionSpec, const CompactionStrategy& compaction_strategy) override;
vespalib::AddressSpace getAddressSpaceUsage() const override;
vespalib::MemoryUsage getArrayStoreMemoryUsage() const override;
diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp
index 25065a200e9..fb81a60cb13 100644
--- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping.hpp
@@ -53,9 +53,9 @@ MultiValueMapping<EntryT,RefT>::replace(uint32_t docId, ConstArrayRef values)
template <typename EntryT, typename RefT>
void
-MultiValueMapping<EntryT,RefT>::compactWorst(bool compactMemory, bool compactAddressSpace)
+MultiValueMapping<EntryT,RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compactMemory, compactAddressSpace));
+ vespalib::datastore::ICompactionContext::UP compactionContext(_store.compactWorst(compaction_spec, compaction_strategy));
if (compactionContext) {
compactionContext->compact(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size()));
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp
index 2edc30cc2c4..19dd4495dc6 100644
--- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.cpp
@@ -1,11 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "multi_value_mapping_base.h"
-#include <vespa/searchcommon/common/compaction_strategy.h>
+#include <vespa/vespalib/datastore/compaction_spec.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <cassert>
namespace search::attribute {
+using vespalib::datastore::CompactionStrategy;
+
MultiValueMappingBase::MultiValueMappingBase(const vespalib::GrowStrategy &gs,
vespalib::GenerationHolder &genHolder)
: _indices(gs, genHolder),
@@ -77,14 +80,9 @@ MultiValueMappingBase::updateStat()
bool
MultiValueMappingBase::considerCompact(const CompactionStrategy &compactionStrategy)
{
- size_t usedBytes = _cachedArrayStoreMemoryUsage.usedBytes();
- size_t deadBytes = _cachedArrayStoreMemoryUsage.deadBytes();
- size_t usedArrays = _cachedArrayStoreAddressSpaceUsage.used();
- size_t deadArrays = _cachedArrayStoreAddressSpaceUsage.dead();
- bool compactMemory = compactionStrategy.should_compact_memory(usedBytes, deadBytes);
- bool compactAddressSpace = compactionStrategy.should_compact_address_space(usedArrays, deadArrays);
- if (compactMemory || compactAddressSpace) {
- compactWorst(compactMemory, compactAddressSpace);
+ auto compaction_spec = compactionStrategy.should_compact(_cachedArrayStoreMemoryUsage, _cachedArrayStoreAddressSpaceUsage);
+ if (compaction_spec.compact()) {
+ compactWorst(compaction_spec, compactionStrategy);
return true;
}
return false;
diff --git a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h
index 952e9dbbe56..0034878fea6 100644
--- a/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h
+++ b/searchlib/src/vespa/searchlib/attribute/multi_value_mapping_base.h
@@ -7,7 +7,10 @@
#include <vespa/vespalib/util/rcuvector.h>
#include <functional>
-namespace search { class CompactionStrategy; }
+namespace vespalib::datastore {
+class CompactionSpec;
+class CompactionStrategy;
+}
namespace search::attribute {
@@ -17,6 +20,8 @@ namespace search::attribute {
class MultiValueMappingBase
{
public:
+ using CompactionSpec = vespalib::datastore::CompactionSpec;
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
using EntryRef = vespalib::datastore::EntryRef;
using RefVector = vespalib::RcuVectorBase<EntryRef>;
@@ -51,7 +56,7 @@ public:
uint32_t getNumKeys() const { return _indices.size(); }
uint32_t getCapacityKeys() const { return _indices.capacity(); }
- virtual void compactWorst(bool compatMemory, bool compactAddressSpace) = 0;
+ virtual void compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy) = 0;
bool considerCompact(const CompactionStrategy &compactionStrategy);
};
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp
index b114a355bb4..8790bdd9885 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.cpp
@@ -30,13 +30,17 @@ remap_enum_store_refs(const EnumIndexRemapper& remapper, AttributeVector& v, att
v.logEnumStoreEvent("compactfixup", "drain");
{
AttributeVector::EnumModifier enum_guard(v.getEnumModifier());
+ auto& filter = remapper.get_entry_ref_filter();
v.logEnumStoreEvent("compactfixup", "start");
for (uint32_t doc = 0; doc < v.getNumDocs(); ++doc) {
vespalib::ConstArrayRef<WeightedIndex> indicesRef(multi_value_mapping.get(doc));
WeightedIndexVector indices(indicesRef.cbegin(), indicesRef.cend());
for (uint32_t i = 0; i < indices.size(); ++i) {
- EnumIndex oldIndex = indices[i].value();
- indices[i] = WeightedIndex(remapper.remap(oldIndex), indices[i].weight());
+ EnumIndex ref = indices[i].value();
+ if (ref.valid() && filter.has(ref)) {
+ ref = remapper.remap(ref);
+ }
+ indices[i] = WeightedIndex(ref, indices[i].weight());
}
std::atomic_thread_fence(std::memory_order_release);
multi_value_mapping.replace(doc, indices);
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index 3451c2b0456..55aa1b2490b 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -7,11 +7,14 @@
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreerootbase.cpp>
#include <vespa/vespalib/datastore/datastore.hpp>
+#include <vespa/vespalib/datastore/compaction_spec.h>
+#include <vespa/vespalib/datastore/entry_ref_filter.h>
#include <vespa/vespalib/datastore/buffer_type.hpp>
namespace search::attribute {
using vespalib::btree::BTreeNoLeafData;
+using vespalib::datastore::EntryRefFilter;
// #define FORCE_BITVECTORS
@@ -127,45 +130,47 @@ PostingStore<DataT>::removeSparseBitVectors()
}
}
if (needscan) {
- res = _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef
- { return consider_remove_sparse_bitvector(posting_idx); });
+ EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits);
+ filter.add_buffers(_bvType.get_active_buffers());
+ res = _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs)
+ { consider_remove_sparse_bitvector(refs); },
+ filter);
}
return res;
}
template <typename DataT>
-typename PostingStore<DataT>::EntryRef
-PostingStore<DataT>::consider_remove_sparse_bitvector(EntryRef ref)
+void
+PostingStore<DataT>::consider_remove_sparse_bitvector(std::vector<EntryRef>& refs)
{
- if (!ref.valid() || !isBitVector(getTypeId(EntryRef(ref)))) {
- return ref;
- }
- RefType iRef(ref);
- uint32_t typeId = getTypeId(iRef);
- assert(isBitVector(typeId));
- assert(_bvs.find(ref.ref() )!= _bvs.end());
- BitVectorEntry *bve = getWBitVectorEntry(iRef);
- BitVector &bv = *bve->_bv.get();
- uint32_t docFreq = bv.countTrueBits();
- if (bve->_tree.valid()) {
- RefType iRef2(bve->_tree);
- assert(isBTree(iRef2));
- const BTreeType *tree = getTreeEntry(iRef2);
- assert(tree->size(_allocator) == docFreq);
- (void) tree;
- }
- if (docFreq < _minBvDocFreq) {
- dropBitVector(ref);
- if (ref.valid()) {
+ for (auto& ref : refs) {
+ RefType iRef(ref);
+ assert(iRef.valid());
+ uint32_t typeId = getTypeId(iRef);
+ assert(isBitVector(typeId));
+ assert(_bvs.find(iRef.ref()) != _bvs.end());
+ BitVectorEntry *bve = getWBitVectorEntry(iRef);
+ BitVector &bv = *bve->_bv.get();
+ uint32_t docFreq = bv.countTrueBits();
+ if (bve->_tree.valid()) {
+ RefType iRef2(bve->_tree);
+ assert(isBTree(iRef2));
+ const BTreeType *tree = getTreeEntry(iRef2);
+ assert(tree->size(_allocator) == docFreq);
+ (void) tree;
+ }
+ if (docFreq < _minBvDocFreq) {
+ dropBitVector(ref);
iRef = ref;
- typeId = getTypeId(iRef);
- if (isBTree(typeId)) {
- BTreeType *tree = getWTreeEntry(iRef);
- normalizeTree(ref, tree, false);
+ if (iRef.valid()) {
+ typeId = getTypeId(iRef);
+ if (isBTree(typeId)) {
+ BTreeType *tree = getWTreeEntry(iRef);
+ normalizeTree(ref, tree, false);
+ }
}
}
}
- return ref;
}
template <typename DataT>
@@ -647,96 +652,114 @@ PostingStore<DataT>::update_stat()
template <typename DataT>
void
-PostingStore<DataT>::move_btree_nodes(EntryRef ref)
+PostingStore<DataT>::move_btree_nodes(const std::vector<EntryRef>& refs)
{
- if (ref.valid()) {
+ for (auto ref : refs) {
RefType iRef(ref);
+ assert(iRef.valid());
uint32_t typeId = getTypeId(iRef);
uint32_t clusterSize = getClusterSize(typeId);
- if (clusterSize == 0) {
- if (isBitVector(typeId)) {
- BitVectorEntry *bve = getWBitVectorEntry(iRef);
- RefType iRef2(bve->_tree);
- if (iRef2.valid()) {
- assert(isBTree(iRef2));
- BTreeType *tree = getWTreeEntry(iRef2);
- tree->move_nodes(_allocator);
- }
- } else {
- BTreeType *tree = getWTreeEntry(iRef);
+ assert(clusterSize == 0);
+ if (isBitVector(typeId)) {
+ BitVectorEntry *bve = getWBitVectorEntry(iRef);
+ RefType iRef2(bve->_tree);
+ if (iRef2.valid()) {
+ assert(isBTree(iRef2));
+ BTreeType *tree = getWTreeEntry(iRef2);
tree->move_nodes(_allocator);
}
+ } else {
+ assert(isBTree(typeId));
+ BTreeType *tree = getWTreeEntry(iRef);
+ tree->move_nodes(_allocator);
}
}
}
template <typename DataT>
-typename PostingStore<DataT>::EntryRef
-PostingStore<DataT>::move(EntryRef ref)
+void
+PostingStore<DataT>::move(std::vector<EntryRef>& refs)
{
- if (!ref.valid()) {
- return EntryRef();
- }
- RefType iRef(ref);
- uint32_t typeId = getTypeId(iRef);
- uint32_t clusterSize = getClusterSize(typeId);
- if (clusterSize == 0) {
- if (isBitVector(typeId)) {
- BitVectorEntry *bve = getWBitVectorEntry(iRef);
- RefType iRef2(bve->_tree);
- if (iRef2.valid()) {
- assert(isBTree(iRef2));
- if (_store.getCompacting(iRef2)) {
- BTreeType *tree = getWTreeEntry(iRef2);
- auto ref_and_ptr = allocBTreeCopy(*tree);
- tree->prepare_hold();
- bve->_tree = ref_and_ptr.ref;
+ for (auto& ref : refs) {
+ RefType iRef(ref);
+ assert(iRef.valid());
+ uint32_t typeId = getTypeId(iRef);
+ uint32_t clusterSize = getClusterSize(typeId);
+ if (clusterSize == 0) {
+ if (isBitVector(typeId)) {
+ BitVectorEntry *bve = getWBitVectorEntry(iRef);
+ RefType iRef2(bve->_tree);
+ if (iRef2.valid()) {
+ assert(isBTree(iRef2));
+ if (_store.getCompacting(iRef2)) {
+ BTreeType *tree = getWTreeEntry(iRef2);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ // Note: Needs review when porting to other platforms
+ // Assumes that other CPUs observes stores from this CPU in order
+ std::atomic_thread_fence(std::memory_order_release);
+ bve->_tree = ref_and_ptr.ref;
+ }
}
+ if (_store.getCompacting(iRef)) {
+ auto new_ref = allocBitVectorCopy(*bve).ref;
+ _bvs.erase(iRef.ref());
+ _bvs.insert(new_ref.ref());
+ ref = new_ref;
+ }
+ } else {
+ assert(isBTree(typeId));
+ assert(_store.getCompacting(iRef));
+ BTreeType *tree = getWTreeEntry(iRef);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ ref = ref_and_ptr.ref;
}
- if (!_store.getCompacting(ref)) {
- return ref;
- }
- auto new_ref = allocBitVectorCopy(*bve).ref;
- _bvs.erase(ref.ref());
- _bvs.insert(new_ref.ref());
- return new_ref;
} else {
- if (!_store.getCompacting(ref)) {
- return ref;
- }
- BTreeType *tree = getWTreeEntry(iRef);
- auto ref_and_ptr = allocBTreeCopy(*tree);
- tree->prepare_hold();
- return ref_and_ptr.ref;
+ assert(_store.getCompacting(iRef));
+ const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
+ ref = allocKeyDataCopy(shortArray, clusterSize).ref;
}
}
- if (!_store.getCompacting(ref)) {
- return ref;
- }
- const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
- return allocKeyDataCopy(shortArray, clusterSize).ref;
}
template <typename DataT>
void
-PostingStore<DataT>::compact_worst_btree_nodes()
+PostingStore<DataT>::compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy)
{
- auto to_hold = this->start_compact_worst_btree_nodes();
- _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef
- {
- move_btree_nodes(posting_idx);
- return posting_idx;
- });
+ auto to_hold = this->start_compact_worst_btree_nodes(compaction_strategy);
+ EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits);
+ // Only look at buffers containing bitvectors and btree roots
+ filter.add_buffers(this->_treeType.get_active_buffers());
+ filter.add_buffers(_bvType.get_active_buffers());
+ _dictionary.foreach_posting_list([this](const std::vector<EntryRef>& refs)
+ { move_btree_nodes(refs); }, filter);
this->finish_compact_worst_btree_nodes(to_hold);
}
template <typename DataT>
void
-PostingStore<DataT>::compact_worst_buffers()
+PostingStore<DataT>::compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- auto to_hold = this->start_compact_worst_buffers();
- _dictionary.normalize_posting_lists([this](EntryRef posting_idx) -> EntryRef
- { return move(posting_idx); });
+
+ auto to_hold = this->start_compact_worst_buffers(compaction_spec, compaction_strategy);
+ bool compact_btree_roots = false;
+ EntryRefFilter filter(RefType::numBuffers(), RefType::offset_bits);
+ filter.add_buffers(to_hold);
+ // Start with looking at buffers being compacted
+ for (uint32_t buffer_id : to_hold) {
+ if (isBTree(_store.getBufferState(buffer_id).getTypeId())) {
+ compact_btree_roots = true;
+ }
+ }
+ if (compact_btree_roots) {
+ // If we are compacting btree roots then we also have to look at bitvector
+ // buffers
+ filter.add_buffers(_bvType.get_active_buffers());
+ }
+ _dictionary.normalize_posting_lists([this](std::vector<EntryRef>& refs)
+ { return move(refs); },
+ filter);
this->finishCompact(to_hold);
}
@@ -747,8 +770,8 @@ PostingStore<DataT>::consider_compact_worst_btree_nodes(const CompactionStrategy
if (_allocator.getNodeStore().has_held_buffers()) {
return false;
}
- if (compaction_strategy.should_compact_memory(_cached_allocator_memory_usage.usedBytes(), _cached_allocator_memory_usage.deadBytes())) {
- compact_worst_btree_nodes();
+ if (compaction_strategy.should_compact_memory(_cached_allocator_memory_usage)) {
+ compact_worst_btree_nodes(compaction_strategy);
return true;
}
return false;
@@ -761,8 +784,9 @@ PostingStore<DataT>::consider_compact_worst_buffers(const CompactionStrategy& co
if (_store.has_held_buffers()) {
return false;
}
- if (compaction_strategy.should_compact_memory(_cached_store_memory_usage.usedBytes(), _cached_store_memory_usage.deadBytes())) {
- compact_worst_buffers();
+ if (compaction_strategy.should_compact_memory(_cached_store_memory_usage)) {
+ CompactionSpec compaction_spec(true, false);
+ compact_worst_buffers(compaction_spec, compaction_strategy);
return true;
}
return false;
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.h b/searchlib/src/vespa/searchlib/attribute/postingstore.h
index a0f0be1c430..58097194f50 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.h
@@ -77,6 +77,8 @@ public:
typedef typename Parent::AggregatedType AggregatedType;
typedef typename Parent::BTreeTypeRefPair BTreeTypeRefPair;
typedef typename Parent::Builder Builder;
+ using CompactionSpec = vespalib::datastore::CompactionSpec;
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
typedef vespalib::datastore::EntryRef EntryRef;
typedef std::less<uint32_t> CompareT;
using Parent::applyNewArray;
@@ -89,6 +91,7 @@ public:
using Parent::getWTreeEntry;
using Parent::getTreeEntry;
using Parent::getKeyDataEntry;
+ using Parent::isBTree;
using Parent::clusterLimit;
using Parent::allocBTree;
using Parent::allocBTreeCopy;
@@ -105,10 +108,8 @@ public:
~PostingStore();
bool removeSparseBitVectors() override;
- EntryRef consider_remove_sparse_bitvector(EntryRef ref);
+ void consider_remove_sparse_bitvector(std::vector<EntryRef> &refs);
static bool isBitVector(uint32_t typeId) { return typeId == BUFFERTYPE_BITVECTOR; }
- static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; }
- bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); }
void applyNew(EntryRef &ref, AddIter a, AddIter ae);
@@ -188,11 +189,11 @@ public:
vespalib::MemoryUsage getMemoryUsage() const;
vespalib::MemoryUsage update_stat();
- void move_btree_nodes(EntryRef ref);
- EntryRef move(EntryRef ref);
+ void move_btree_nodes(const std::vector<EntryRef> &refs);
+ void move(std::vector<EntryRef>& refs);
- void compact_worst_btree_nodes();
- void compact_worst_buffers();
+ void compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
+ void compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
bool consider_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
bool consider_compact_worst_buffers(const CompactionStrategy& compaction_strategy);
private:
diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
index d9024af724b..6268a6da701 100644
--- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
@@ -194,7 +194,7 @@ PredicateAttribute::onLoad(vespalib::Executor *)
buffer.moveFreeToData(size);
const GenericHeader &header = loaded_buffer->getHeader();
- auto attributeHeader = attribute::AttributeHeader::extractTags(header);
+ auto attributeHeader = attribute::AttributeHeader::extractTags(header, getBaseFileName());
uint32_t version = attributeHeader.getVersion();
setCreateSerialNum(attributeHeader.getCreateSerialNum());
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
index eb822313d61..4ecac63f9db 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.cpp
@@ -24,6 +24,7 @@ namespace search::attribute {
using document::DocumentId;
using document::GlobalId;
using document::IdParseException;
+using vespalib::datastore::CompactionSpec;
namespace {
@@ -291,20 +292,19 @@ ReferenceAttribute::getReference(DocId doc) const
bool
ReferenceAttribute::consider_compact_values(const CompactionStrategy &compactionStrategy)
{
- size_t used_bytes = _cached_unique_store_values_memory_usage.usedBytes();
- size_t dead_bytes = _cached_unique_store_values_memory_usage.deadBytes();
- bool compact_memory = compactionStrategy.should_compact_memory(used_bytes, dead_bytes);
+ bool compact_memory = compactionStrategy.should_compact_memory(_cached_unique_store_values_memory_usage);
if (compact_memory) {
- compact_worst_values();
+ compact_worst_values(compactionStrategy);
return true;
}
return false;
}
void
-ReferenceAttribute::compact_worst_values()
+ReferenceAttribute::compact_worst_values(const CompactionStrategy& compaction_strategy)
{
- auto remapper(_store.compact_worst(true, true));
+ CompactionSpec compaction_spec(true, true);
+ auto remapper(_store.compact_worst(compaction_spec, compaction_strategy));
if (remapper) {
remapper->remap(vespalib::ArrayRef<EntryRef>(&_indices[0], _indices.size()));
remapper->done();
@@ -318,10 +318,9 @@ ReferenceAttribute::consider_compact_dictionary(const CompactionStrategy &compac
if (dictionary.has_held_buffers()) {
return false;
}
- if (compaction_strategy.should_compact_memory(_cached_unique_store_dictionary_memory_usage.usedBytes(),
- _cached_unique_store_dictionary_memory_usage.deadBytes()))
+ if (compaction_strategy.should_compact_memory(_cached_unique_store_dictionary_memory_usage))
{
- dictionary.compact_worst(true, true);
+ dictionary.compact_worst(true, true, compaction_strategy);
return true;
}
return false;
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
index 4016230ef89..237a0f1ddd7 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute.h
@@ -25,6 +25,7 @@ namespace search::attribute {
class ReferenceAttribute : public NotImplementedAttribute
{
public:
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
using EntryRef = vespalib::datastore::EntryRef;
using GlobalId = document::GlobalId;
using ReferenceStore = vespalib::datastore::UniqueStore<Reference>;
@@ -57,7 +58,7 @@ private:
uint64_t getUniqueValueCount() const override;
bool consider_compact_values(const CompactionStrategy &compactionStrategy);
- void compact_worst_values();
+ void compact_worst_values(const CompactionStrategy& compaction_strategy);
bool consider_compact_dictionary(const CompactionStrategy& compaction_strategy);
IndicesCopyVector getIndicesCopy(uint32_t size) const;
void removeReverseMapping(EntryRef oldRef, uint32_t lid);
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp
index 4323e57f6b1..18805a7b20f 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.cpp
@@ -49,13 +49,16 @@ SingleValueEnumAttributeBase::remap_enum_store_refs(const EnumIndexRemapper& rem
{
// update _enumIndices with new EnumIndex values after enum store has been compacted.
v.logEnumStoreEvent("reenumerate", "reserved");
- auto new_indexes = std::make_unique<vespalib::Array<EnumIndex>>();
- new_indexes->reserve(_enumIndices.capacity());
+ vespalib::Array<EnumIndex> new_indexes;
+ new_indexes.reserve(_enumIndices.capacity());
v.logEnumStoreEvent("reenumerate", "start");
+ auto& filter = remapper.get_entry_ref_filter();
for (uint32_t i = 0; i < _enumIndices.size(); ++i) {
- EnumIndex old_index = _enumIndices[i];
- EnumIndex new_index = remapper.remap(old_index);
- new_indexes->push_back_fast(new_index);
+ EnumIndex ref = _enumIndices[i];
+ if (ref.valid() && filter.has(ref)) {
+ ref = remapper.remap(ref);
+ }
+ new_indexes.push_back_fast(ref);
}
v.logEnumStoreEvent("compactfixup", "drain");
{
diff --git a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h
index bb404f27709..cea251272dc 100644
--- a/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h
+++ b/searchlib/src/vespa/searchlib/common/i_compactable_lid_space.h
@@ -11,7 +11,7 @@ namespace search::common {
* Interface for a component that has a lid space that can be compacted and shrunk.
*/
struct ICompactableLidSpace {
- virtual ~ICompactableLidSpace() {}
+ virtual ~ICompactableLidSpace() = default;
/**
* Compacts the lid space down to the wanted given doc id limit.
diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.cpp b/searchlib/src/vespa/searchlib/docstore/compacter.cpp
index 38f3fbef0b0..26fb79f8a4e 100644
--- a/searchlib/src/vespa/searchlib/docstore/compacter.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/compacter.cpp
@@ -26,7 +26,7 @@ BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, const Compress
_bucketizer(bucketizer),
_writeCount(0),
_maxBucketGuardDuration(vespalib::duration::zero()),
- _lastSample(),
+ _lastSample(vespalib::steady_clock::now()),
_lock(),
_backingMemory(Alloc::alloc(0x40000000), &_lock),
_tmpStore(),
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
index 7aaee7180df..b4ff050c0f6 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
@@ -112,7 +112,6 @@ public:
}
-using VisitCache = docstore::VisitCache;
using docstore::Value;
bool
@@ -239,7 +238,14 @@ DocumentStore::remove(uint64_t syncToken, DocumentIdT lid)
}
void
-DocumentStore::compact(uint64_t syncToken)
+DocumentStore::compactBloat(uint64_t syncToken)
+{
+ (void) syncToken;
+ // Most implementations does not offer compact.
+}
+
+void
+DocumentStore::compactSpread(uint64_t syncToken)
{
(void) syncToken;
// Most implementations does not offer compact.
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h
index b6021d34bef..6402c16cd5e 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h
@@ -72,7 +72,8 @@ public:
void remove(uint64_t syncToken, DocumentIdT lid) override;
void flush(uint64_t syncToken) override;
uint64_t initFlush(uint64_t synctoken) override;
- void compact(uint64_t syncToken) override;
+ void compactBloat(uint64_t syncToken) override;
+ void compactSpread(uint64_t syncToken) override;
uint64_t lastSyncToken() const override;
uint64_t tentativeLastSyncToken() const override;
vespalib::system_time getLastFlushTime() const override;
@@ -80,7 +81,7 @@ public:
size_t memoryUsed() const override { return _backingStore.memoryUsed(); }
size_t getDiskFootprint() const override { return _backingStore.getDiskFootprint(); }
size_t getDiskBloat() const override { return _backingStore.getDiskBloat(); }
- size_t getMaxCompactGain() const override { return _backingStore.getMaxCompactGain(); }
+ size_t getMaxSpreadAsBloat() const override { return _backingStore.getMaxSpreadAsBloat(); }
CacheStats getCacheStats() const override;
size_t memoryMeta() const override { return _backingStore.memoryMeta(); }
const vespalib::string & getBaseDir() const override { return _backingStore.getBaseDir(); }
diff --git a/searchlib/src/vespa/searchlib/docstore/idatastore.h b/searchlib/src/vespa/searchlib/docstore/idatastore.h
index b18bb0a3827..fc0eae1d15e 100644
--- a/searchlib/src/vespa/searchlib/docstore/idatastore.h
+++ b/searchlib/src/vespa/searchlib/docstore/idatastore.h
@@ -17,14 +17,14 @@ class IBufferVisitor;
class IDataStoreVisitor
{
public:
- virtual ~IDataStoreVisitor() { }
+ virtual ~IDataStoreVisitor() = default;
virtual void visit(uint32_t lid, const void *buffer, size_t sz) = 0;
};
class IDataStoreVisitorProgress
{
public:
- virtual ~IDataStoreVisitorProgress() { }
+ virtual ~IDataStoreVisitorProgress() = default;
virtual void updateProgress(double progress) = 0;
};
@@ -46,11 +46,7 @@ public:
* @param dirName The directory that will contain the data file.
**/
IDataStore(const vespalib::string & dirName);
-
- /**
- * Allow inhertitance.
- **/
- virtual ~IDataStore();
+ ~IDataStore() override;
/**
* Read data from the data store into a buffer.
@@ -125,7 +121,7 @@ public:
* to avoid misuse we let the report a more conservative number here if necessary.
* @return diskspace to be gained.
*/
- virtual size_t getMaxCompactGain() const { return getDiskBloat(); }
+ virtual size_t getMaxSpreadAsBloat() const = 0;
/**
diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp
index e1558f2238b..4f9b91f3e15 100644
--- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.cpp
@@ -5,10 +5,6 @@
namespace search {
-IDocumentStore::IDocumentStore() = default;
-
-IDocumentStore::~IDocumentStore() = default;
-
void IDocumentStore::visit(const LidVector & lids, const document::DocumentTypeRepo &repo, IDocumentVisitor & visitor) const {
for (uint32_t lid : lids) {
visitor.visit(lid, read(lid, repo));
diff --git a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h
index 2a7864a6f47..d84a5ad7e7e 100644
--- a/searchlib/src/vespa/searchlib/docstore/idocumentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/idocumentstore.h
@@ -22,7 +22,7 @@ class IDocumentStoreReadVisitor
{
public:
using DocumentSP = std::shared_ptr<document::Document>;
- virtual ~IDocumentStoreReadVisitor() { }
+ virtual ~IDocumentStoreReadVisitor() = default;
virtual void visit(uint32_t lid, const DocumentSP &doc) = 0;
virtual void visit(uint32_t lid) = 0;
};
@@ -31,14 +31,14 @@ class IDocumentStoreRewriteVisitor
{
public:
using DocumentSP = std::shared_ptr<document::Document>;
- virtual ~IDocumentStoreRewriteVisitor() { }
+ virtual ~IDocumentStoreRewriteVisitor() = default;
virtual void visit(uint32_t lid, const DocumentSP &doc) = 0;
};
class IDocumentStoreVisitorProgress
{
public:
- virtual ~IDocumentStoreVisitorProgress() { }
+ virtual ~IDocumentStoreVisitorProgress() = default;
virtual void updateProgress(double progress) = 0;
};
@@ -47,7 +47,7 @@ class IDocumentVisitor
{
public:
using DocumentUP = std::unique_ptr<document::Document>;
- virtual ~IDocumentVisitor() { }
+ virtual ~IDocumentVisitor() = default;
virtual void visit(uint32_t lid, DocumentUP doc) = 0;
virtual bool allowVisitCaching() const = 0;
private:
@@ -68,17 +68,6 @@ public:
using LidVector = std::vector<uint32_t>;
using DocumentUP = std::unique_ptr<document::Document>;
-
- /**
- * Construct a document store.
- *
- * @throws vespalib::IoException if the file is corrupt or other IO problems occur.
- * @param docMan The document type manager to use when deserializing.
- * @param baseDir The path to a directory where the implementaion specific files will reside.
- **/
- IDocumentStore();
- virtual ~IDocumentStore();
-
/**
* Make a Document from a stored serialized data blob.
* @param lid The local ID associated with the document.
@@ -111,7 +100,8 @@ public:
/**
* If possible compact the disk.
**/
- virtual void compact(uint64_t syncToken) = 0;
+ virtual void compactBloat(uint64_t syncToken) = 0;
+ virtual void compactSpread(uint64_t syncToken) = 0;
/**
* The sync token used for the last successful flush() operation,
@@ -164,12 +154,11 @@ public:
virtual size_t getDiskBloat() const = 0;
/**
- * Calculates how much diskspace can be compacted during a flush.
- * default is to return th ebloat limit, but as some targets have some internal limits
- * to avoid misuse we let the report a more conservative number here if necessary.
- * @return diskspace to be gained.
+ * Calculates the gain from keeping buckets close. It is converted to diskbloat
+ * so it can be prioritized accordingly.
+ * @return spread as disk bloat.
*/
- virtual size_t getMaxCompactGain() const { return getDiskBloat(); }
+ virtual size_t getMaxSpreadAsBloat() const = 0;
/**
* Returns statistics about the cache.
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index fd25dd56235..6a9ae40cc93 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -36,7 +36,6 @@ using namespace std::literals;
LogDataStore::Config::Config()
: _maxFileSize(DEFAULT_MAX_FILESIZE),
- _maxDiskBloatFactor(0.2),
_maxBucketSpread(2.5),
_minFileSizeFactor(0.2),
_maxNumLids(DEFAULT_MAX_LIDS_PER_FILE),
@@ -48,7 +47,6 @@ LogDataStore::Config::Config()
bool
LogDataStore::Config::operator == (const Config & rhs) const {
return (_maxBucketSpread == rhs._maxBucketSpread) &&
- (_maxDiskBloatFactor == rhs._maxDiskBloatFactor) &&
(_maxFileSize == rhs._maxFileSize) &&
(_minFileSizeFactor == rhs._minFileSizeFactor) &&
(_skipCrcOnRead == rhs._skipCrcOnRead) &&
@@ -294,46 +292,14 @@ vespalib::string bloatMsg(size_t bloat, size_t usage) {
}
-void
-LogDataStore::compact(uint64_t syncToken)
-{
- uint64_t usage = getDiskFootprint();
- uint64_t bloat = getDiskBloat();
- LOG(debug, "%s", bloatMsg(bloat, usage).c_str());
- const bool doCompact = (_fileChunks.size() > 1);
- if (doCompact) {
- LOG(info, "%s. Will compact", bloatMsg(bloat, usage).c_str());
- compactWorst(_config.getMaxDiskBloatFactor(), _config.getMaxBucketSpread(), isTotalDiskBloatExceeded(usage, bloat));
- }
- flushActiveAndWait(syncToken);
- if (doCompact) {
- usage = getDiskFootprint();
- bloat = getDiskBloat();
- LOG(info, "Done compacting. %s", bloatMsg(bloat, usage).c_str());
- }
-}
-
-bool
-LogDataStore::isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const {
- const size_t maxConfiguredDiskBloat = diskFootPrint * _config.getMaxDiskBloatFactor();
- return bloat > maxConfiguredDiskBloat;
-}
-
size_t
-LogDataStore::getMaxCompactGain() const
+LogDataStore::getMaxSpreadAsBloat() const
{
- size_t bloat = getDiskBloat();
const size_t diskFootPrint = getDiskFootprint();
- if ( ! isTotalDiskBloatExceeded(diskFootPrint, bloat) ) {
- bloat = 0;
- }
-
const double maxSpread = getMaxBucketSpread();
- size_t spreadAsBloat = diskFootPrint * (1.0 - 1.0/maxSpread);
- if ( maxSpread < _config.getMaxBucketSpread()) {
- spreadAsBloat = 0;
- }
- return (bloat + spreadAsBloat);
+ return (maxSpread > _config.getMaxBucketSpread())
+ ? diskFootPrint * (1.0 - 1.0/maxSpread)
+ : 0;
}
void
@@ -380,40 +346,34 @@ LogDataStore::getMaxBucketSpread() const
}
std::pair<bool, LogDataStore::FileId>
-LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat)
+LogDataStore::findNextToCompact(bool dueToBloat)
{
typedef std::multimap<double, FileId, std::greater<double>> CostMap;
- CostMap worstBloat;
- CostMap worstSpread;
+ CostMap worst;
MonitorGuard guard(_updateLock);
for (size_t i(0); i < _fileChunks.size(); i++) {
const auto & fc(_fileChunks[i]);
if (fc && fc->frozen() && (_currentlyCompacting.find(fc->getNameId()) == _currentlyCompacting.end())) {
uint64_t usage = fc->getDiskFootprint();
- uint64_t bloat = fc->getDiskBloat();
- if (_bucketizer) {
- worstSpread.emplace(fc->getBucketSpread(), FileId(i));
- }
- if (usage > 0) {
- double tmp(double(bloat)/usage);
- worstBloat.emplace(tmp, FileId(i));
+ if ( ! dueToBloat && _bucketizer) {
+ worst.emplace(fc->getBucketSpread(), FileId(i));
+ } else if (dueToBloat && usage > 0) {
+ double tmp(double(fc->getDiskBloat())/usage);
+ worst.emplace(tmp, FileId(i));
}
}
}
if (LOG_WOULD_LOG(debug)) {
- for (const auto & it : worstBloat) {
+ for (const auto & it : worst) {
const FileChunk & fc = *_fileChunks[it.second.getId()];
LOG(debug, "File '%s' has bloat '%2.2f' and bucket-spread '%1.4f numChunks=%d , numBuckets=%ld, numUniqueBuckets=%ld",
fc.getName().c_str(), it.first * 100, fc.getBucketSpread(), fc.getNumChunks(), fc.getNumBuckets(), fc.getNumUniqueBuckets());
}
}
std::pair<bool, FileId> retval(false, FileId(-1));
- if ( ! worstBloat.empty() && (worstBloat.begin()->first > bloatLimit) && prioritizeDiskBloat) {
- retval.first = true;
- retval.second = worstBloat.begin()->second;
- } else if ( ! worstSpread.empty() && (worstSpread.begin()->first > spreadLimit)) {
+ if ( ! worst.empty()) {
retval.first = true;
- retval.second = worstSpread.begin()->second;
+ retval.second = worst.begin()->second;
}
if (retval.first) {
_currentlyCompacting.insert(_fileChunks[retval.second.getId()]->getNameId());
@@ -422,10 +382,24 @@ LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit, bool prio
}
void
-LogDataStore::compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat) {
- auto worst = findNextToCompact(bloatLimit, spreadLimit, prioritizeDiskBloat);
- if (worst.first) {
- compactFile(worst.second);
+LogDataStore::compactWorst(uint64_t syncToken, bool compactDiskBloat) {
+ uint64_t usage = getDiskFootprint();
+ uint64_t bloat = getDiskBloat();
+ const char * reason = compactDiskBloat ? "bloat" : "spread";
+ LOG(debug, "%s", bloatMsg(bloat, usage).c_str());
+ const bool doCompact = (_fileChunks.size() > 1);
+ if (doCompact) {
+ LOG(debug, "Will compact due to %s: %s", reason, bloatMsg(bloat, usage).c_str());
+ auto worst = findNextToCompact(compactDiskBloat);
+ if (worst.first) {
+ compactFile(worst.second);
+ }
+ flushActiveAndWait(syncToken);
+ usage = getDiskFootprint();
+ bloat = getDiskBloat();
+ LOG(info, "Done compacting due to %s: %s", reason, bloatMsg(bloat, usage).c_str());
+ } else {
+ flushActiveAndWait(syncToken);
}
}
@@ -1001,7 +975,7 @@ LogDataStore::computeNumberOfSignificantBucketIdBits(const IBucketizer & bucketi
while ((msb > 0) && (msbHistogram[msb - 1] == 0)) {
msb--;
}
- LOG(info, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time());
+ LOG(debug, "computeNumberOfSignificantBucketIdBits(file=%d) = %ld = %ld took %1.3f", fileId.getId(), msb, msbHistogram[msb-1], timer.min_time());
return msb;
}
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
index 0e11b88a178..62f87076759 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
@@ -41,7 +41,6 @@ public:
Config & setMaxFileSize(size_t v) { _maxFileSize = v; return *this; }
Config & setMaxNumLids(size_t v) { _maxNumLids = v; return *this; }
- Config & setMaxDiskBloatFactor(double v) { _maxDiskBloatFactor = v; return *this; }
Config & setMaxBucketSpread(double v) { _maxBucketSpread = v; return *this; }
Config & setMinFileSizeFactor(double v) { _minFileSizeFactor = v; return *this; }
@@ -49,7 +48,6 @@ public:
Config & setFileConfig(WriteableFileChunk::Config v) { _fileConfig = v; return *this; }
size_t getMaxFileSize() const { return _maxFileSize; }
- double getMaxDiskBloatFactor() const { return _maxDiskBloatFactor; }
double getMaxBucketSpread() const { return _maxBucketSpread; }
double getMinFileSizeFactor() const { return _minFileSizeFactor; }
uint32_t getMaxNumLids() const { return _maxNumLids; }
@@ -63,7 +61,6 @@ public:
bool operator == (const Config &) const;
private:
size_t _maxFileSize;
- double _maxDiskBloatFactor;
double _maxBucketSpread;
double _minFileSizeFactor;
uint32_t _maxNumLids;
@@ -109,12 +106,10 @@ public:
size_t getDiskFootprint() const override;
size_t getDiskHeaderFootprint() const override;
size_t getDiskBloat() const override;
- size_t getMaxCompactGain() const override;
+ size_t getMaxSpreadAsBloat() const override;
- /**
- * Will compact the docsummary up to a lower limit of 5% bloat.
- */
- void compact(uint64_t syncToken);
+ void compactBloat(uint64_t syncToken) { compactWorst(syncToken, true); }
+ void compactSpread(uint64_t syncToken) { compactWorst(syncToken, false);}
const Config & getConfig() const { return _config; }
Config & getConfig() { return _config; }
@@ -183,10 +178,9 @@ private:
class WrapVisitorProgress;
class FileChunkHolder;
- // Implements ISetLid API
void setLid(const ISetLid::unique_lock & guard, uint32_t lid, const LidInfo & lm) override;
- void compactWorst(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat);
+ void compactWorst(uint64_t syncToken, bool compactDiskBloat);
void compactFile(FileId chunkId);
typedef vespalib::RcuVector<uint64_t> LidInfoVector;
@@ -202,8 +196,6 @@ private:
NameIdSet eraseIncompleteCompactedFiles(NameIdSet partList);
void internalFlushAll();
- bool isTotalDiskBloatExceeded(size_t diskFootPrint, size_t bloat) const;
-
NameIdSet scanDir(const vespalib::string &dir, const vespalib::string &suffix);
FileId allocateFileId(const MonitorGuard & guard);
void setNewFileChunk(const MonitorGuard & guard, FileChunk::UP fileChunk);
@@ -248,7 +240,7 @@ private:
return (_fileChunks.empty() ? 0 : _fileChunks.back()->getLastPersistedSerialNum());
}
bool shouldCompactToActiveFile(size_t compactedSize) const;
- std::pair<bool, FileId> findNextToCompact(double bloatLimit, double spreadLimit, bool prioritizeDiskBloat);
+ std::pair<bool, FileId> findNextToCompact(bool compactDiskBloat);
void incGeneration();
bool canShrinkLidSpace(const MonitorGuard &guard) const;
diff --git a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
index de36155bedb..2931f8bce2d 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
@@ -51,7 +51,8 @@ public:
~LogDocumentStore() override;
void reconfigure(const Config & config);
private:
- void compact(uint64_t syncToken) override { _backingStore.compact(syncToken); }
+ void compactBloat(uint64_t syncToken) override { _backingStore.compactBloat(syncToken); }
+ void compactSpread(uint64_t syncToken) override { _backingStore.compactSpread(syncToken); }
LogDataStore _backingStore;
};
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
index 0f4326aac40..5217c44df97 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
@@ -75,7 +75,7 @@ BlobSequenceReader::BlobSequenceReader(AttributeVector& attr, bool has_index)
: ReaderBase(attr),
_use_index_file(has_index && has_index_file(attr) &&
can_use_index_save_file(attr.getConfig(),
- search::attribute::AttributeHeader::extractTags(getDatHeader()))),
+ search::attribute::AttributeHeader::extractTags(getDatHeader(), attr.getBaseFileName()))),
_index_file(_use_index_file ?
attribute::LoadUtils::openFile(attr, DenseTensorAttributeSaver::index_file_suffix()) :
std::unique_ptr<Fast_BufferedFile>())
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
index d3c2998333a..ed3fb737b7d 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.cpp
@@ -17,6 +17,8 @@ namespace {
constexpr size_t MIN_BUFFER_ARRAYS = 1024;
constexpr size_t DENSE_TENSOR_ALIGNMENT = 32;
+constexpr size_t DENSE_TENSOR_ALIGNMENT_SMALL = 16;
+constexpr size_t DENSE_TENSOR_ALIGNMENT_MIN = 8;
size_t my_align(size_t size, size_t alignment) {
size += alignment - 1;
@@ -27,17 +29,20 @@ size_t my_align(size_t size, size_t alignment) {
DenseTensorStore::TensorSizeCalc::TensorSizeCalc(const ValueType &type)
: _numCells(1u),
- _cell_type(type.cell_type())
+ _cell_type(type.cell_type()),
+ _aligned_size(0u)
{
for (const auto &dim: type.dimensions()) {
_numCells *= dim.size;
}
-}
-
-size_t
-DenseTensorStore::TensorSizeCalc::alignedSize() const
-{
- return my_align(bufSize(), DENSE_TENSOR_ALIGNMENT);
+ auto buf_size = bufSize();
+ size_t alignment = DENSE_TENSOR_ALIGNMENT;
+ if (buf_size <= DENSE_TENSOR_ALIGNMENT_MIN) {
+ alignment = DENSE_TENSOR_ALIGNMENT_MIN;
+ } else if (buf_size <= DENSE_TENSOR_ALIGNMENT_SMALL) {
+ alignment = DENSE_TENSOR_ALIGNMENT_SMALL;
+ }
+ _aligned_size = my_align(buf_size, alignment);
}
DenseTensorStore::BufferType::BufferType(const TensorSizeCalc &tensorSizeCalc, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator)
@@ -79,12 +84,6 @@ DenseTensorStore::~DenseTensorStore()
_store.dropBuffers();
}
-const void *
-DenseTensorStore::getRawBuffer(RefType ref) const
-{
- return _store.getEntryArray<char>(ref, _bufferType.getArraySize());
-}
-
namespace {
void clearPadAreaAfterBuffer(char *buffer, size_t bufSize, size_t alignedBufSize) {
@@ -136,15 +135,6 @@ DenseTensorStore::getTensor(EntryRef ref) const
return std::make_unique<vespalib::eval::DenseValueView>(_type, cells_ref);
}
-vespalib::eval::TypedCells
-DenseTensorStore::get_typed_cells(EntryRef ref) const
-{
- if (!ref.valid()) {
- return vespalib::eval::TypedCells(&_emptySpace[0], _type.cell_type(), getNumCells());
- }
- return vespalib::eval::TypedCells(getRawBuffer(ref), _type.cell_type(), getNumCells());
-}
-
template <class TensorType>
TensorStore::EntryRef
DenseTensorStore::setDenseTensor(const TensorType &tensor)
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
index 3b7cb71863e..47932fbff7e 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_store.h
@@ -25,12 +25,13 @@ public:
{
size_t _numCells; // product of dimension sizes
vespalib::eval::CellType _cell_type;
+ size_t _aligned_size;
TensorSizeCalc(const ValueType &type);
size_t bufSize() const {
return vespalib::eval::CellTypeUtils::mem_size(_cell_type, _numCells);
}
- size_t alignedSize() const;
+ size_t alignedSize() const noexcept { return _aligned_size; }
};
class BufferType : public vespalib::datastore::BufferType<char>
@@ -50,12 +51,9 @@ private:
ValueType _type; // type of dense tensor
std::vector<char> _emptySpace;
- size_t unboundCells(const void *buffer) const;
-
template <class TensorType>
TensorStore::EntryRef
setDenseTensor(const TensorType &tensor);
-
public:
DenseTensorStore(const ValueType &type, std::unique_ptr<vespalib::alloc::MemoryAllocator> allocator);
~DenseTensorStore() override;
@@ -63,12 +61,17 @@ public:
const ValueType &type() const { return _type; }
size_t getNumCells() const { return _tensorSizeCalc._numCells; }
size_t getBufSize() const { return _tensorSizeCalc.bufSize(); }
- const void *getRawBuffer(RefType ref) const;
+ const void *getRawBuffer(RefType ref) const {
+ return _store.getEntryArray<char>(ref, _bufferType.getArraySize());
+ }
vespalib::datastore::Handle<char> allocRawBuffer();
void holdTensor(EntryRef ref) override;
EntryRef move(EntryRef ref) override;
std::unique_ptr<vespalib::eval::Value> getTensor(EntryRef ref) const;
- vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const;
+ vespalib::eval::TypedCells get_typed_cells(EntryRef ref) const {
+ return vespalib::eval::TypedCells(ref.valid() ? getRawBuffer(ref) : &_emptySpace[0],
+ _type.cell_type(), getNumCells());
+ }
EntryRef setTensor(const vespalib::eval::Value &tensor);
// The following method is meant to be used only for unit tests.
uint32_t getArraySize() const { return _bufferType.getArraySize(); }
diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp
index 7f9f20e07c4..43596478a6f 100644
--- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.cpp
@@ -43,4 +43,13 @@ HammingDistance::calc(const vespalib::eval::TypedCells& lhs,
}
}
+double
+HammingDistance::calc_with_limit(const vespalib::eval::TypedCells& lhs,
+ const vespalib::eval::TypedCells& rhs,
+ double) const
+{
+ // consider optimizing:
+ return calc(lhs, rhs);
+}
+
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h
index f0b7b159b90..c64fc5b532d 100644
--- a/searchlib/src/vespa/searchlib/tensor/hamming_distance.h
+++ b/searchlib/src/vespa/searchlib/tensor/hamming_distance.h
@@ -15,7 +15,7 @@ namespace search::tensor {
* or (for int8 cells, aka binary data only)
* "number of bits that are different"
*/
-class HammingDistance : public DistanceFunction {
+class HammingDistance final : public DistanceFunction {
public:
HammingDistance(vespalib::eval::CellType expected) : DistanceFunction(expected) {}
double calc(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs) const override;
@@ -26,13 +26,7 @@ public:
double score = 1.0 / (1.0 + distance);
return score;
}
- double calc_with_limit(const vespalib::eval::TypedCells& lhs,
- const vespalib::eval::TypedCells& rhs,
- double) const override
- {
- // consider optimizing:
- return calc(lhs, rhs);
- }
+ double calc_with_limit(const vespalib::eval::TypedCells& lhs, const vespalib::eval::TypedCells& rhs, double) const override;
};
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
index 2889dc425db..185f1038e39 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
@@ -8,7 +8,6 @@
#include "hnsw_index_saver.h"
#include "random_level_generator.h"
#include "reusable_set_visited_tracker.h"
-#include <vespa/searchcommon/common/compaction_strategy.h>
#include <vespa/searchlib/attribute/address_space_components.h>
#include <vespa/searchlib/attribute/address_space_usage.h>
#include <vespa/searchlib/util/fileutil.h>
@@ -16,6 +15,7 @@
#include <vespa/vespalib/data/slime/cursor.h>
#include <vespa/vespalib/data/slime/inserter.h>
#include <vespa/vespalib/datastore/array_store.hpp>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/util/memory_allocator.h>
#include <vespa/vespalib/util/rcuvector.hpp>
#include <vespa/vespalib/util/size_literals.h>
@@ -30,6 +30,7 @@ namespace search::tensor {
using search::AddressSpaceComponents;
using search::StateExplorerUtils;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
namespace {
@@ -531,18 +532,18 @@ HnswIndex::trim_hold_lists(generation_t first_used_gen)
}
void
-HnswIndex::compact_level_arrays(bool compact_memory, bool compact_address_space)
+HnswIndex::compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- auto context = _graph.nodes.compactWorst(compact_memory, compact_address_space);
+ auto context = _graph.nodes.compactWorst(compaction_spec, compaction_strategy);
uint32_t doc_id_limit = _graph.node_refs.size();
vespalib::ArrayRef<AtomicEntryRef> refs(&_graph.node_refs[0], doc_id_limit);
context->compact(refs);
}
void
-HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space)
+HnswIndex::compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- auto context = _graph.links.compactWorst(compact_memory, compact_address_space);
+ auto context = _graph.links.compactWorst(compaction_spec, compaction_strategy);
uint32_t doc_id_limit = _graph.node_refs.size();
for (uint32_t doc_id = 1; doc_id < doc_id_limit; ++doc_id) {
EntryRef level_ref = _graph.node_refs[doc_id].load_relaxed();
@@ -556,16 +557,11 @@ HnswIndex::compact_link_arrays(bool compact_memory, bool compact_address_space)
namespace {
bool
-consider_compact_arrays(const CompactionStrategy& compaction_strategy, vespalib::MemoryUsage& memory_usage, vespalib::AddressSpace& address_space_usage, std::function<void(bool,bool)> compact_arrays)
-{
- size_t used_bytes = memory_usage.usedBytes();
- size_t dead_bytes = memory_usage.deadBytes();
- bool compact_memory = compaction_strategy.should_compact_memory(used_bytes, dead_bytes);
- size_t used_address_space = address_space_usage.used();
- size_t dead_address_space = address_space_usage.dead();
- bool compact_address_space = compaction_strategy.should_compact_address_space(used_address_space, dead_address_space);
- if (compact_memory || compact_address_space) {
- compact_arrays(compact_memory, compact_address_space);
+consider_compact_arrays(const CompactionStrategy& compaction_strategy, vespalib::MemoryUsage& memory_usage, vespalib::AddressSpace& address_space_usage, std::function<void(vespalib::datastore::CompactionSpec, const CompactionStrategy&)> compact_arrays)
+{
+ auto compaction_spec = compaction_strategy.should_compact(memory_usage, address_space_usage);
+ if (compaction_spec.compact()) {
+ compact_arrays(compaction_spec, compaction_strategy);
return true;
}
return false;
@@ -577,16 +573,16 @@ bool
HnswIndex::consider_compact_level_arrays(const CompactionStrategy& compaction_strategy)
{
return consider_compact_arrays(compaction_strategy, _cached_level_arrays_memory_usage, _cached_level_arrays_address_space_usage,
- [this](bool compact_memory, bool compact_address_space)
- { compact_level_arrays(compact_memory, compact_address_space); });
+ [this](CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy_fwd)
+ { compact_level_arrays(compaction_spec, compaction_strategy_fwd); });
}
bool
HnswIndex::consider_compact_link_arrays(const CompactionStrategy& compaction_strategy)
{
return consider_compact_arrays(compaction_strategy, _cached_link_arrays_memory_usage, _cached_link_arrays_address_space_usage,
- [this](bool compact_memory, bool compact_address_space)
- { compact_link_arrays(compact_memory, compact_address_space); });
+ [this](CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy_fwd)
+ { compact_link_arrays(compaction_spec, compaction_strategy_fwd); });
}
bool
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
index d8f3c4c97fa..5b5f9382517 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
@@ -171,8 +171,8 @@ public:
void remove_document(uint32_t docid) override;
void transfer_hold_lists(generation_t current_gen) override;
void trim_hold_lists(generation_t first_used_gen) override;
- void compact_level_arrays(bool compact_memory, bool compact_addreess_space);
- void compact_link_arrays(bool compact_memory, bool compact_address_space);
+ void compact_level_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
+ void compact_link_arrays(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
bool consider_compact_level_arrays(const CompactionStrategy& compaction_strategy);
bool consider_compact_link_arrays(const CompactionStrategy& compaction_strategy);
bool consider_compact(const CompactionStrategy& compaction_strategy) override;
diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
index 411d09cd2d3..c1fa4da05d1 100644
--- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
@@ -12,6 +12,10 @@
class FastOS_FileInterface;
+namespace vespalib::datastore {
+class CompactionSpec;
+class CompactionStrategy;
+}
namespace vespalib::slime { struct Inserter; }
namespace search::fileutil { class LoadedBuffer; }
@@ -19,7 +23,6 @@ namespace search::fileutil { class LoadedBuffer; }
namespace search {
class AddressSpaceUsage;
class BitVector;
-class CompactionStrategy;
}
namespace search::tensor {
@@ -32,6 +35,8 @@ class NearestNeighborIndexSaver;
*/
class NearestNeighborIndex {
public:
+ using CompactionSpec = vespalib::datastore::CompactionSpec;
+ using CompactionStrategy = vespalib::datastore::CompactionStrategy;
using generation_t = vespalib::GenerationHandler::generation_t;
struct Neighbor {
uint32_t docid;
diff --git a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
index a19541072da..5bd14d2c234 100644
--- a/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/tensor_attribute.cpp
@@ -89,9 +89,7 @@ TensorAttribute::onCommit()
incGeneration();
if (getFirstUsedGeneration() > _compactGeneration) {
// No data held from previous compact operation
- size_t used = _cached_tensor_store_memory_usage.usedBytes();
- size_t dead = _cached_tensor_store_memory_usage.deadBytes();
- if (getConfig().getCompactionStrategy().should_compact_memory(used, dead)) {
+ if (getConfig().getCompactionStrategy().should_compact_memory(_cached_tensor_store_memory_usage)) {
compactWorst();
}
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
index 24943b53e6d..96b94955570 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
@@ -12,6 +12,7 @@
#include <algorithm>
#include <thread>
#include <cassert>
+#include <future>
#include <vespa/log/log.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
@@ -56,11 +57,13 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec
_fileHeaderContext(fileHeaderContext),
_markedDeleted(false)
{
- int retval(0);
- if ((retval = makeDirectory(_baseDir.c_str())) != 0) {
+ assert(_config.getEncoding().getCompression() != Encoding::Compression::none);
+ int retval = makeDirectory(_baseDir.c_str());
+ if (retval != 0) {
throw runtime_error(fmt("Failed creating basedirectory %s r(%d), e(%d)", _baseDir.c_str(), retval, errno));
}
- if ((retval = makeDirectory(dir().c_str())) != 0) {
+ retval = makeDirectory(dir().c_str());
+ if (retval != 0) {
throw runtime_error(fmt("Failed creating domaindir %s r(%d), e(%d)", dir().c_str(), retval, errno));
}
SerialNumList partIdVector = scanDir();
@@ -76,8 +79,7 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec
}
pending.waitForZeroRefCount();
if (_parts.empty() || _parts.crbegin()->second->isClosed()) {
- _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _config.getEncoding(),
- _config.getCompressionlevel(), _fileHeaderContext, false);
+ _parts[lastPart] = std::make_shared<DomainPart>(_name, dir(), lastPart, _fileHeaderContext, false);
vespalib::File::sync(dir());
}
_lastSerial = end();
@@ -86,13 +88,13 @@ Domain::Domain(const string &domainName, const string & baseDir, Executor & exec
Domain &
Domain::setConfig(const DomainConfig & cfg) {
_config = cfg;
+ assert(_config.getEncoding().getCompression() != Encoding::Compression::none);
return *this;
}
void
Domain::addPart(SerialNum partId, bool isLastPart) {
- auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _config.getEncoding(),
- _config.getCompressionlevel(), _fileHeaderContext, isLastPart);
+ auto dp = std::make_shared<DomainPart>(_name, dir(), partId, _fileHeaderContext, isLastPart);
if (dp->size() == 0) {
// Only last domain part is allowed to be truncated down to
// empty size.
@@ -331,8 +333,7 @@ Domain::optionallyRotateFile(SerialNum serialNum) {
triggerSyncNow({});
waitPendingSync(_syncMonitor, _syncCond, _pendingSync);
dp->close();
- dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _config.getEncoding(),
- _config.getCompressionlevel(), _fileHeaderContext, false);
+ dp = std::make_shared<DomainPart>(_name, dir(), serialNum, _fileHeaderContext, false);
{
std::lock_guard guard(_lock);
_parts[serialNum] = dp;
@@ -394,25 +395,32 @@ Domain::grabCurrentChunk(const UniqueLock & guard) {
void
Domain::commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard) {
assert(chunkOrderGuard.mutex() == &_currentChunkMonitor && chunkOrderGuard.owns_lock());
- _singleCommitter->execute( makeLambdaTask([this, chunk = std::move(chunk)]() mutable {
- doCommit(std::move(chunk));
+ if (chunk->getPacket().empty()) return;
+ std::promise<SerializedChunk> promise;
+ std::future<SerializedChunk> future = promise.get_future();
+ _executor.execute(makeLambdaTask([promise=std::move(promise), chunk = std::move(chunk),
+ encoding=_config.getEncoding(), compressionLevel=_config.getCompressionlevel()]() mutable {
+ promise.set_value(SerializedChunk(std::move(chunk), encoding, compressionLevel));
+ }));
+ _singleCommitter->execute( makeLambdaTask([this, future = std::move(future)]() mutable {
+ doCommit(future.get());
}));
}
+
+
void
-Domain::doCommit(std::unique_ptr<CommitChunk> chunk) {
- const Packet & packet = chunk->getPacket();
- if (packet.empty()) return;
+Domain::doCommit(const SerializedChunk & serialized) {
- SerialNum firstSerial = packet.range().from();
- DomainPart::SP dp = optionallyRotateFile(firstSerial);
- dp->commit(firstSerial, packet);
+ SerialNumRange range = serialized.range();
+ DomainPart::SP dp = optionallyRotateFile(range.from());
+ dp->commit(serialized);
if (_config.getFSyncOnCommit()) {
dp->sync();
}
cleanSessions();
LOG(debug, "Releasing %zu acks and %zu entries and %zu bytes.",
- chunk->getNumCallBacks(), chunk->getPacket().size(), chunk->sizeBytes());
+ serialized.commitChunk().getNumCallBacks(), serialized.getNumEntries(), serialized.getData().size());
}
bool
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.h b/searchlib/src/vespa/searchlib/transactionlog/domain.h
index 2e912ad6201..eb3d0b6b10b 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domain.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/domain.h
@@ -64,7 +64,7 @@ private:
std::unique_ptr<CommitChunk> grabCurrentChunk(const UniqueLock & guard);
void commitChunk(std::unique_ptr<CommitChunk> chunk, const UniqueLock & chunkOrderGuard);
- void doCommit(std::unique_ptr<CommitChunk> chunk);
+ void doCommit(const SerializedChunk & serialized);
SerialNum begin(const UniqueLock & guard) const;
SerialNum end(const UniqueLock & guard) const;
size_t byteSize(const UniqueLock & guard) const;
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
index 3dad67df177..2ca2f15545d 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
@@ -247,11 +247,9 @@ DomainPart::buildPacketMapping(bool allowTruncate)
return currPos;
}
-DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, Encoding encoding,
- uint8_t compressionLevel, const FileHeaderContext &fileHeaderContext, bool allowTruncate)
- : _encoding(encoding),
- _compressionLevel(compressionLevel),
- _lock(),
+DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s,
+ const FileHeaderContext &fileHeaderContext, bool allowTruncate)
+ : _lock(),
_fileLock(),
_range(s),
_sz(0),
@@ -379,35 +377,21 @@ DomainPart::erase(SerialNum to)
}
void
-DomainPart::commit(SerialNum firstSerial, const Packet &packet)
+DomainPart::commit(const SerializedChunk & serialized)
{
+ SerialNumRange range = serialized.range();
+
int64_t firstPos(byteSize());
- nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size());
+ assert(_range.to() < range.to());
+ _sz += serialized.getNumEntries();
+ _range.to(range.to());
if (_range.from() == 0) {
- _range.from(firstSerial);
- }
- IChunk::UP chunk = IChunk::create(_encoding, _compressionLevel);
- for (size_t i(0); h.size() > 0; i++) {
- //LOG(spam,
- //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)",
- //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining());
- Packet::Entry entry;
- entry.deserialize(h);
- if (_range.to() < entry.serial()) {
- chunk->add(entry);
- assert(_encoding.getCompression() != Encoding::Compression::none);
- _sz++;
- _range.to(entry.serial());
- } else {
- throw runtime_error(fmt("Incoming serial number(%" PRIu64 ") must be bigger than the last one (%" PRIu64 ").",
- entry.serial(), _range.to()));
- }
- }
- if ( ! chunk->getEntries().empty()) {
- write(*_transLog, *chunk);
+ _range.from(range.from());
}
+
+ write(*_transLog, range, serialized.getData());
std::lock_guard guard(_lock);
- _skipList.emplace_back(firstSerial, firstPos);
+ _skipList.emplace_back(range.from(), firstPos);
}
void
@@ -442,26 +426,15 @@ DomainPart::visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet)
}
void
-DomainPart::write(FastOS_FileInterface &file, const IChunk & chunk)
+DomainPart::write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf)
{
- nbostream os;
- size_t begin = os.wp();
- os << _encoding.getRaw(); // Placeholder for encoding
- os << uint32_t(0); // Placeholder for size
- Encoding realEncoding = chunk.encode(os);
- size_t end = os.wp();
- os.wp(0);
- os << realEncoding.getRaw(); //Patching real encoding
- os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size.
- os.wp(end);
std::lock_guard guard(_writeLock);
- if ( ! file.CheckedWrite(os.data(), os.size()) ) {
- throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), chunk.range(), os.size()));
+ if ( ! file.CheckedWrite(buf.data(), buf.size()) ) {
+ throw runtime_error(handleWriteError("Failed writing the entry.", file, byteSize(), range, buf.size()));
}
- LOG(debug, "Wrote chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)",
- chunk.getEntries().size(), os.size(), chunk.range().from(), chunk.range().to(), _encoding.getRaw(), realEncoding.getRaw());
- _writtenSerial = chunk.range().to();
- _byteSize.fetch_add(os.size(), std::memory_order_release);
+ LOG(debug, "Wrote chunk with and %zu bytes, range[%" PRIu64 ", %" PRIu64 "]", buf.size(), range.from(), range.to());
+ _writtenSerial = range.to();
+ _byteSize.fetch_add(buf.size(), std::memory_order_release);
}
bool
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
index 9ab0db54391..ea5290c433b 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
@@ -19,13 +19,13 @@ public:
using SP = std::shared_ptr<DomainPart>;
DomainPart(const DomainPart &) = delete;
DomainPart& operator=(const DomainPart &) = delete;
- DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, Encoding defaultEncoding,
- uint8_t compressionLevel, const common::FileHeaderContext &FileHeaderContext, bool allowTruncate);
+ DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s,
+ const common::FileHeaderContext &FileHeaderContext, bool allowTruncate);
~DomainPart();
const vespalib::string &fileName() const { return _fileName; }
- void commit(SerialNum firstSerial, const Packet &packet);
+ void commit(const SerializedChunk & serialized);
bool erase(SerialNum to);
bool visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet);
bool close();
@@ -49,7 +49,7 @@ private:
static Packet readPacket(FastOS_FileInterface & file, SerialNumRange wanted, size_t targetSize, bool allowTruncate);
static bool read(FastOS_FileInterface &file, IChunk::UP & chunk, Alloc &buf, bool allowTruncate);
- void write(FastOS_FileInterface &file, const IChunk & entry);
+ void write(FastOS_FileInterface &file, SerialNumRange range, vespalib::ConstBufferRef buf);
void writeHeader(const common::FileHeaderContext &fileHeaderContext);
class SkipInfo
@@ -69,8 +69,6 @@ private:
SerialNum _id;
uint64_t _pos;
};
- const Encoding _encoding;
- const uint8_t _compressionLevel;
std::mutex _lock;
std::mutex _fileLock;
SerialNumRange _range;
diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp
index ee1631ea8c2..99370d263ec 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.cpp
@@ -8,6 +8,9 @@
#include <cassert>
#include <ostream>
+#include <vespa/log/log.h>
+LOG_SETUP(".searchlib.transactionlog.ichunk");
+
using std::make_unique;
using vespalib::make_string_short::fmt;
using vespalib::nbostream_longlivedbuf;
@@ -115,4 +118,48 @@ std::ostream &
operator << (std::ostream & os, Encoding e) {
return os << "crc=" << e.getCrc() << " compression=" << e.getCompression();
}
+
+void
+encode(vespalib::nbostream & os, const IChunk & chunk, Encoding encoding) {
+ size_t begin = os.wp();
+ os << encoding.getRaw(); // Placeholder for encoding
+ os << uint32_t(0); // Placeholder for size
+ Encoding realEncoding = chunk.encode(os);
+ size_t end = os.wp();
+ os.wp(0);
+ os << realEncoding.getRaw(); //Patching real encoding
+ os << uint32_t(end - (begin + sizeof(uint32_t) + sizeof(uint8_t))); // Patching actual size.
+ os.wp(end);
+ SerialNumRange range = chunk.range();
+ LOG(spam, "Encoded chunk with %zu entries and %zu bytes, range[%" PRIu64 ", %" PRIu64 "] encoding(wanted=%x, real=%x)",
+ chunk.getEntries().size(), os.size(), range.from(), range.to(), encoding.getRaw(), realEncoding.getRaw());
+}
+
+SerializedChunk::SerializedChunk(std::unique_ptr<CommitChunk> commitChunk, Encoding encoding, uint8_t compressionLevel)
+ : _commitChunk(std::move(commitChunk)),
+ _os(),
+ _range(_commitChunk->getPacket().range()),
+ _numEntries(_commitChunk->getPacket().size())
+{
+ const Packet & packet = _commitChunk->getPacket();
+ nbostream_longlivedbuf h(packet.getHandle().data(), packet.getHandle().size());
+
+ IChunk::UP chunk = IChunk::create(encoding, compressionLevel);
+ SerialNum prev = 0;
+ for (size_t i(0); h.size() > 0; i++) {
+ //LOG(spam,
+ //"Pos(%d) Len(%d), Lim(%d), Remaining(%d)",
+ //h.getPos(), h.getLength(), h.getLimit(), h.getRemaining());
+ Packet::Entry entry;
+ entry.deserialize(h);
+ assert (prev < entry.serial());
+ chunk->add(entry);
+ prev = entry.serial();
+ }
+ assert(! chunk->getEntries().empty());
+ encode(_os, *chunk, encoding);
+}
+vespalib::ConstBufferRef SerializedChunk::getData() const {
+ return vespalib::ConstBufferRef(_os.data(), _os.size());
+}
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h
index 02bd0ce9426..e5daeb810f4 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/ichunk.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/ichunk.h
@@ -33,6 +33,29 @@ private:
std::ostream & operator << (std::ostream & os, Encoding e);
/**
+ * Represents a completely encoded chunk with a buffer ready to be persisted,
+ * and the range and number of entries it covers.
+ */
+class SerializedChunk {
+public:
+ SerializedChunk(std::unique_ptr<CommitChunk> chunk, Encoding encoding, uint8_t compressionLevel);
+ SerializedChunk(SerializedChunk &&) = default;
+ SerializedChunk & operator=(SerializedChunk &&) = default;
+ SerializedChunk(const SerializedChunk &) = delete;
+ SerializedChunk & operator=(const SerializedChunk &) = delete;
+ vespalib::ConstBufferRef getData() const;
+ SerialNumRange range() const { return _range; }
+ size_t getNumEntries() const { return _numEntries; }
+ const CommitChunk & commitChunk() const { return *_commitChunk; }
+private:
+ // CommitChunk is required to ensure we do not reply until committed to the TLS.
+ std::unique_ptr<CommitChunk> _commitChunk;
+ vespalib::nbostream _os;
+ SerialNumRange _range;
+ size_t _numEntries;
+};
+
+/**
* Interface for different chunk formats.
* Format specifies both crc type, and compression type.
*/
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
index ce190d2c093..db2cf2a255d 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
@@ -578,9 +578,10 @@ TransLogServer::domainCommit(FRT_RPCRequest *req)
try {
vespalib::Gate gate;
{
+ auto onDone = make_shared<vespalib::GateCallback>(gate);
// Need to scope in order to drain out all the callbacks.
- domain->append(packet, make_shared<vespalib::GateCallback>(gate));
- auto keep = domain->startCommit(make_shared<vespalib::IgnoreCallback>());
+ domain->append(packet, onDone);
+ auto keep = domain->startCommit(onDone);
}
gate.await();
ret.AddInt32(0);
diff --git a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp
index b962ecf611e..5601336fdfd 100644
--- a/slobrok/src/vespa/slobrok/server/slobrokserver.cpp
+++ b/slobrok/src/vespa/slobrok/server/slobrokserver.cpp
@@ -2,21 +2,20 @@
#include "slobrokserver.h"
-#include <vespa/log/log.h>
-LOG_SETUP(".slobrok.server");
-
namespace slobrok {
+VESPA_THREAD_STACK_TAG(slobrok_server_thread);
+
SlobrokServer::SlobrokServer(ConfigShim &shim)
: _env(shim),
- _thread(*this)
+ _thread(*this, slobrok_server_thread)
{
_thread.start();
}
SlobrokServer::SlobrokServer(uint32_t port)
: _env(ConfigShim(port)),
- _thread(*this)
+ _thread(*this, slobrok_server_thread)
{
_thread.start();
}
diff --git a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp
index 732ab122546..dd71380f64a 100644
--- a/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp
+++ b/staging_vespalib/src/tests/singleexecutor/singleexecutor_test.cpp
@@ -35,13 +35,18 @@ void verifyResizeTaskLimit(bool up) {
std::condition_variable cond;
std::atomic<uint64_t> started(0);
std::atomic<uint64_t> allowed(0);
- SingleExecutor executor(sequenced_executor, 10);
+ constexpr uint32_t INITIAL = 20;
+ const uint32_t INITIAL_2inN = roundUp2inN(INITIAL);
+ double waterMarkRatio = 0.5;
+ SingleExecutor executor(sequenced_executor, INITIAL, INITIAL*waterMarkRatio, 10ms);
+ EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit());
+ EXPECT_EQUAL(uint32_t(INITIAL_2inN*waterMarkRatio), executor.get_watermark());
- uint32_t targetTaskLimit = up ? 20 : 5;
+ uint32_t targetTaskLimit = up ? 40 : 5;
uint32_t roundedTaskLimit = roundUp2inN(targetTaskLimit);
- EXPECT_NOT_EQUAL(16u, roundedTaskLimit);
+ EXPECT_NOT_EQUAL(INITIAL_2inN, roundedTaskLimit);
- for (uint64_t i(0); i < 10; i++) {
+ for (uint64_t i(0); i < INITIAL; i++) {
executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] {
started++;
std::unique_lock guard(lock);
@@ -53,15 +58,16 @@ void verifyResizeTaskLimit(bool up) {
while (started < 1);
EXPECT_EQUAL(1u, started);
executor.setTaskLimit(targetTaskLimit);
- EXPECT_EQUAL(16u, executor.getTaskLimit());
+ EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit());
+ EXPECT_EQUAL(INITIAL_2inN*waterMarkRatio, executor.get_watermark());
allowed = 5;
while (started < 6);
EXPECT_EQUAL(6u, started);
- EXPECT_EQUAL(16u, executor.getTaskLimit());
- allowed = 10;
- while (started < 10);
- EXPECT_EQUAL(10u, started);
- EXPECT_EQUAL(16u, executor.getTaskLimit());
+ EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit());
+ allowed = INITIAL;
+ while (started < INITIAL);
+ EXPECT_EQUAL(INITIAL, started);
+ EXPECT_EQUAL(INITIAL_2inN, executor.getTaskLimit());
executor.execute(makeLambdaTask([&lock, &cond, &started, &allowed] {
started++;
std::unique_lock guard(lock);
@@ -69,11 +75,13 @@ void verifyResizeTaskLimit(bool up) {
cond.wait_for(guard, 1ms);
}
}));
- while (started < 11);
- EXPECT_EQUAL(11u, started);
+ while (started < INITIAL + 1);
+ EXPECT_EQUAL(INITIAL + 1, started);
EXPECT_EQUAL(roundedTaskLimit, executor.getTaskLimit());
- allowed = 11;
+ EXPECT_EQUAL(roundedTaskLimit*waterMarkRatio, executor.get_watermark());
+ allowed = INITIAL + 1;
}
+
TEST("test that resizing up and down works") {
TEST_DO(verifyResizeTaskLimit(true));
TEST_DO(verifyResizeTaskLimit(false));
diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
index db27c13463f..76b0235301b 100644
--- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
@@ -66,7 +66,7 @@ SequencedTaskExecutor::create(Runnable::init_fun_t func, uint32_t threads, uint3
executors.reserve(threads);
for (uint32_t id = 0; id < threads; ++id) {
if (optimize == OptimizeFor::THROUGHPUT) {
- uint32_t watermark = kindOfWatermark == 0 ? taskLimit / 2 : kindOfWatermark;
+ uint32_t watermark = (kindOfWatermark == 0) ? taskLimit / 10 : kindOfWatermark;
executors.push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, 100ms));
} else {
executors.push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit, func));
diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp
index a2962c6ea84..a99bce0a705 100644
--- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.cpp
@@ -11,14 +11,15 @@ SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit)
{ }
SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t watermark, duration reactionTime)
- : _taskLimit(vespalib::roundUp2inN(taskLimit)),
+ : _watermarkRatio(watermark < taskLimit ? double(watermark) / taskLimit : 1.0),
+ _taskLimit(vespalib::roundUp2inN(taskLimit)),
_wantedTaskLimit(_taskLimit.load()),
_rp(0),
_tasks(std::make_unique<Task::UP[]>(_taskLimit)),
_mutex(),
_consumerCondition(),
_producerCondition(),
- _thread(*this),
+ _thread(*this, func),
_idleTracker(steady_clock::now()),
_threadIdleTracker(),
_wakeupCount(0),
@@ -27,11 +28,10 @@ SingleExecutor::SingleExecutor(init_fun_t func, uint32_t taskLimit, uint32_t wat
_wakeupConsumerAt(0),
_producerNeedWakeupAt(0),
_wp(0),
- _watermark(std::min(_taskLimit.load(), watermark)),
+ _watermark(_taskLimit.load()*_watermarkRatio),
_reactionTime(reactionTime),
_closed(false)
{
- (void) func; //TODO implement similar to ThreadStackExecutor
assert(taskLimit >= watermark);
_thread.start();
}
@@ -75,7 +75,7 @@ SingleExecutor::execute(Task::UP task) {
void
SingleExecutor::setTaskLimit(uint32_t taskLimit) {
- _wantedTaskLimit = std::max(vespalib::roundUp2inN(taskLimit), size_t(_watermark));
+ _wantedTaskLimit = vespalib::roundUp2inN(taskLimit);
}
void
@@ -117,7 +117,7 @@ SingleExecutor::run() {
while (!_thread.stopped()) {
drain_tasks();
_producerCondition.notify_all();
- _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + _watermark, std::memory_order_relaxed);
+ _wakeupConsumerAt.store(_wp.load(std::memory_order_relaxed) + get_watermark(), std::memory_order_relaxed);
Lock lock(_mutex);
if (numTasks() <= 0) {
steady_time now = steady_clock::now();
@@ -159,10 +159,11 @@ SingleExecutor::wait_for_room(Lock & lock) {
drain(lock);
_tasks = std::make_unique<Task::UP[]>(_wantedTaskLimit);
_taskLimit = _wantedTaskLimit.load();
+ _watermark = _taskLimit * _watermarkRatio;
}
_queueSize.add(numTasks());
while (numTasks() >= _taskLimit.load(std::memory_order_relaxed)) {
- sleepProducer(lock, _reactionTime, wp - _watermark);
+ sleepProducer(lock, _reactionTime, wp - get_watermark());
}
}
diff --git a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h
index 7d868322558..e76e3f17a41 100644
--- a/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h
+++ b/staging_vespalib/src/vespa/vespalib/util/singleexecutor.h
@@ -28,7 +28,7 @@ public:
void wakeup() override;
size_t getNumThreads() const override;
uint32_t getTaskLimit() const override { return _taskLimit.load(std::memory_order_relaxed); }
- uint32_t get_watermark() const { return _watermark; }
+ uint32_t get_watermark() const { return _watermark.load(std::memory_order_relaxed); }
duration get_reaction_time() const { return _reactionTime; }
ExecutorStats getStats() override;
SingleExecutor & shutdown() override;
@@ -47,6 +47,7 @@ private:
uint64_t numTasks() const {
return _wp.load(std::memory_order_relaxed) - _rp.load(std::memory_order_acquire);
}
+ const double _watermarkRatio;
std::atomic<uint32_t> _taskLimit;
std::atomic<uint32_t> _wantedTaskLimit;
std::atomic<uint64_t> _rp;
@@ -63,7 +64,7 @@ private:
std::atomic<uint64_t> _wakeupConsumerAt;
std::atomic<uint64_t> _producerNeedWakeupAt;
std::atomic<uint64_t> _wp;
- const uint32_t _watermark;
+ std::atomic<uint32_t> _watermark;
const duration _reactionTime;
bool _closed;
};
diff --git a/standalone-container/src/main/sh/standalone-container.sh b/standalone-container/src/main/sh/standalone-container.sh
index f3048690eec..b34535c6867 100755
--- a/standalone-container/src/main/sh/standalone-container.sh
+++ b/standalone-container/src/main/sh/standalone-container.sh
@@ -175,7 +175,6 @@ StartCommand() {
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
- --add-opens=java.base/sun.security.util=ALL-UNNAMED \
-Djava.library.path="$VESPA_HOME/lib64" \
-Djava.awt.headless=true \
-Dsun.rmi.dgc.client.gcInterval=3600000 \
diff --git a/storage/src/tests/distributor/getoperationtest.cpp b/storage/src/tests/distributor/getoperationtest.cpp
index dfe4f09de3f..9fecb005659 100644
--- a/storage/src/tests/distributor/getoperationtest.cpp
+++ b/storage/src/tests/distributor/getoperationtest.cpp
@@ -267,6 +267,26 @@ TEST_F(GetOperationTest, send_to_all_invalid_nodes_when_inconsistent) {
EXPECT_EQ("newauthor", getLastReplyAuthor());
}
+// GetOperation document-level consistency checks are used by the multi-phase update
+// logic to see if we can fall back to a fast path even though not all replicas are in sync.
+// Empty replicas are not considered part of the send-set, so only looking at replies from
+// replicas _sent_ to will not detect this case.
+// If we haphazardly treat an empty replicas as implicitly being in sync we risk triggering
+// undetectable inconsistencies at the document level. This can happen if we send create-if-missing
+// updates to an empty replica as well as a non-empty replica, and the document exists in the
+// latter replica. The document would then be implicitly created on the empty replica with the
+// same timestamp as that of the non-empty one, even though their contents would almost
+// certainly differ.
+TEST_F(GetOperationTest, get_not_sent_to_empty_replicas_but_bucket_tagged_as_inconsistent) {
+ setClusterState("distributor:1 storage:4");
+ addNodesToBucketDB(bucketId, "2=0/0/0,3=1/2/3");
+ sendGet();
+ ASSERT_EQ("Get => 3", _sender.getCommands(true));
+ ASSERT_NO_FATAL_FAILURE(sendReply(0, api::ReturnCode::OK, "newauthor", 2));
+ EXPECT_FALSE(op->any_replicas_failed());
+ EXPECT_FALSE(last_reply_had_consistent_replicas());
+}
+
TEST_F(GetOperationTest, inconsistent_split) {
setClusterState("distributor:1 storage:4");
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index a047fb7d79c..b02395717e0 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -51,9 +51,8 @@ public:
document::BucketId createAndSendSampleDocument(vespalib::duration timeout);
void sendReply(int idx = -1,
- api::ReturnCode::Result result
- = api::ReturnCode::OK,
- api::BucketInfo info = api::BucketInfo(1,2,3,4,5))
+ api::ReturnCode::Result result = api::ReturnCode::OK,
+ api::BucketInfo info = api::BucketInfo(1,2,3,4,5))
{
ASSERT_FALSE(_sender.commands().empty());
if (idx == -1) {
@@ -152,6 +151,33 @@ TEST_F(PutOperationTest, bucket_database_gets_special_entry_when_CreateBucket_se
ASSERT_EQ("Create bucket => 0,Put => 0", _sender.getCommands(true));
}
+TEST_F(PutOperationTest, failed_CreateBucket_removes_replica_from_db_and_sends_RequestBucketInfo) {
+ setup_stripe(2, 2, "distributor:1 storage:2");
+
+ auto doc = createDummyDocument("test", "test");
+ sendPut(createPut(doc));
+
+ ASSERT_EQ("Create bucket => 1,Create bucket => 0,Put => 1,Put => 0", _sender.getCommands(true));
+
+ // Simulate timeouts on node 1. Replica existence is in a Schrödinger's cat state until we send
+ // a RequestBucketInfo to the node and open the box to find out for sure.
+ sendReply(0, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket
+ sendReply(2, api::ReturnCode::TIMEOUT, api::BucketInfo()); // Put
+ // Pretend everything went fine on node 0
+ sendReply(1); // CreateBucket
+ sendReply(3); // Put
+
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
+ "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
+ dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId())));
+
+ // TODO remove revert concept; does not make sense with Proton (since it's not a multi-version store and
+ // therefore does not have anything to revert back to) and is config-disabled by default for this provider.
+ ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1,"
+ "Revert(BucketId(0x4000000000008f09)) => 0",
+ _sender.getCommands(true, true, 4));
+}
+
TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) {
setup_stripe(1, 1, "storage:1 distributor:1");
auto cfg = make_config();
diff --git a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
index a3f0182ba30..1752de5fb80 100644
--- a/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
+++ b/storage/src/tests/persistence/filestorage/operationabortingtest.cpp
@@ -22,6 +22,8 @@ namespace storage {
namespace {
+VESPA_THREAD_STACK_TAG(test_thread);
+
// Exploit the fact that PersistenceProviderWrapper already provides a forwarding
// implementation of all SPI calls, so we can selectively override.
class BlockingMockProvider : public PersistenceProviderWrapper
@@ -294,7 +296,7 @@ TEST_F(OperationAbortingTest, wait_for_current_operation_completion_for_aborted_
auto abortCmd = makeAbortCmd(abortSet);
SendTask sendTask(abortCmd, *_queueBarrier, c.top);
- vespalib::Thread thread(sendTask);
+ vespalib::Thread thread(sendTask, test_thread);
thread.start();
LOG(debug, "waiting for threads to reach barriers");
diff --git a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
index 06872cadde6..868de8d0ae2 100644
--- a/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/getoperation.cpp
@@ -267,6 +267,11 @@ GetOperation::assignTargetNodeGroups(const BucketDatabase::ReadGuard& read_guard
_responses[GroupId(e.getBucketId(), copy.getChecksum(), copy.getNode())].emplace_back(copy);
} else if (!copy.empty()) {
_responses[GroupId(e.getBucketId(), copy.getChecksum(), -1)].emplace_back(copy);
+ } else { // empty replica
+ // We must treat a bucket with empty replicas as inherently inconsistent.
+ // See GetOperationTest::get_not_sent_to_empty_replicas_but_bucket_tagged_as_inconsistent for
+ // rationale as to why this is the case.
+ _has_replica_inconsistency = true;
}
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
index a16eef0ab6f..5233e5678fa 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
@@ -90,6 +90,7 @@ UpdateOperation::onStart(DistributorStripeMessageSender& sender)
// An UpdateOperation should only be started iff all replicas are consistent
// with each other, so sampling a single replica should be equal to sampling them all.
+ // FIXME this no longer holds when replicas are consistent at the _document_ level but not at the _bucket_ level.
assert(_entries[0].getBucketInfo().getNodeCount() > 0); // Empty buckets are not allowed
_infoAtSendTime = _entries[0].getBucketInfo().getNodeRef(0).getBucketInfo();
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index 8cacbb0bf5a..45129f7be04 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -259,7 +259,14 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply(
&& reply.getResult().getResult() != api::ReturnCode::EXISTS)
{
LOG(spam, "Create bucket reply failed, so deleting it from bucket db");
+ // We don't know if the bucket exists at this point, so we remove it from the DB.
+ // If we get subsequent write load the bucket will be implicitly created again
+ // (which is an idempotent operation) and all is well. But since we don't know _if_
+ // we'll get any further write load we send a RequestBucketInfo to bring the bucket
+ // back into the DB if it _was_ successfully created. We have to do the latter to
+ // avoid the risk of introducing an orphaned bucket replica on the content node.
_op_ctx.remove_node_from_bucket_database(reply.getBucket(), node);
+ _op_ctx.recheck_bucket_info(node, reply.getBucket());
}
}
diff --git a/vbench/src/apps/vbench/vbench.cpp b/vbench/src/apps/vbench/vbench.cpp
index 00499519dcc..edaa68b8838 100644
--- a/vbench/src/apps/vbench/vbench.cpp
+++ b/vbench/src/apps/vbench/vbench.cpp
@@ -8,6 +8,8 @@
using namespace vbench;
+VESPA_THREAD_STACK_TAG(vbench_thread);
+
typedef vespalib::SignalHandler SIG;
struct NotifyDone : public vespalib::Runnable {
@@ -31,8 +33,7 @@ int run(const std::string &cfg_name) {
return 1;
}
vespalib::Slime cfg;
- vespalib::Memory mapped_cfg(cfg_file.get().data,
- cfg_file.get().size);
+ vespalib::Memory mapped_cfg(cfg_file.get().data, cfg_file.get().size);
if (!vespalib::slime::JsonFormat::decode(mapped_cfg, cfg)) {
fprintf(stderr, "unable to parse config file: %s\n",
cfg.toString().c_str());
@@ -43,7 +44,7 @@ int run(const std::string &cfg_name) {
VBench vbench(cfg);
NotifyDone notify(done);
vespalib::RunnablePair runBoth(vbench, notify);
- vespalib::Thread thread(runBoth);
+ vespalib::Thread thread(runBoth, vbench_thread);
thread.start();
while (!SIG::INT.check() && !SIG::TERM.check() && !done.await(1s)) {}
if (!done.await(vespalib::duration::zero())) {
diff --git a/vbench/src/tests/dispatcher/dispatcher_test.cpp b/vbench/src/tests/dispatcher/dispatcher_test.cpp
index b2c002e3e50..618940aab57 100644
--- a/vbench/src/tests/dispatcher/dispatcher_test.cpp
+++ b/vbench/src/tests/dispatcher/dispatcher_test.cpp
@@ -17,6 +17,9 @@ struct Fetcher : public vespalib::Runnable {
void run() override { handler.handle(provider.provide()); }
};
+VESPA_THREAD_STACK_TAG(fetcher1_thread);
+VESPA_THREAD_STACK_TAG(fetcher2_thread);
+
TEST("dispatcher") {
MyHandler dropped;
MyHandler handler1;
@@ -24,8 +27,8 @@ TEST("dispatcher") {
Dispatcher<int> dispatcher(dropped);
Fetcher fetcher1(dispatcher, handler1);
Fetcher fetcher2(dispatcher, handler2);
- vespalib::Thread thread1(fetcher1);
- vespalib::Thread thread2(fetcher2);
+ vespalib::Thread thread1(fetcher1, fetcher1_thread);
+ vespalib::Thread thread2(fetcher2, fetcher2_thread);
thread1.start();
EXPECT_TRUE(dispatcher.waitForThreads(1, 512));
thread2.start();
diff --git a/vbench/src/tests/handler_thread/handler_thread_test.cpp b/vbench/src/tests/handler_thread/handler_thread_test.cpp
index fd7d630f705..97a12e82ac8 100644
--- a/vbench/src/tests/handler_thread/handler_thread_test.cpp
+++ b/vbench/src/tests/handler_thread/handler_thread_test.cpp
@@ -15,9 +15,11 @@ struct MyHandler : Handler<int> {
MyHandler::~MyHandler() = default;
+VESPA_THREAD_STACK_TAG(test_thread);
+
TEST("handler thread") {
MyHandler handler;
- HandlerThread<int> th(handler);
+ HandlerThread<int> th(handler, test_thread);
th.handle(std::unique_ptr<int>(new int(1)));
th.handle(std::unique_ptr<int>(new int(2)));
th.handle(std::unique_ptr<int>(new int(3)));
diff --git a/vbench/src/vbench/core/handler_thread.h b/vbench/src/vbench/core/handler_thread.h
index b4aaf08eee8..402ecbeb0dc 100644
--- a/vbench/src/vbench/core/handler_thread.h
+++ b/vbench/src/vbench/core/handler_thread.h
@@ -33,7 +33,7 @@ private:
void run() override;
public:
- HandlerThread(Handler<T> &next);
+ HandlerThread(Handler<T> &next, init_fun_t init_fun);
~HandlerThread();
void handle(std::unique_ptr<T> obj) override;
void join() override;
diff --git a/vbench/src/vbench/core/handler_thread.hpp b/vbench/src/vbench/core/handler_thread.hpp
index 3d1dc423411..56cc0a7771d 100644
--- a/vbench/src/vbench/core/handler_thread.hpp
+++ b/vbench/src/vbench/core/handler_thread.hpp
@@ -23,12 +23,12 @@ HandlerThread<T>::run()
}
template <typename T>
-HandlerThread<T>::HandlerThread(Handler<T> &next)
+HandlerThread<T>::HandlerThread(Handler<T> &next, init_fun_t init_fun)
: _lock(),
_cond(),
_queue(),
_next(next),
- _thread(*this),
+ _thread(*this, init_fun),
_done(false)
{
_thread.start();
diff --git a/vbench/src/vbench/vbench/request_scheduler.cpp b/vbench/src/vbench/vbench/request_scheduler.cpp
index 80aec6c308e..95d29181b1f 100644
--- a/vbench/src/vbench/vbench/request_scheduler.cpp
+++ b/vbench/src/vbench/vbench/request_scheduler.cpp
@@ -1,11 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "request_scheduler.h"
-
#include <vbench/core/timer.h>
namespace vbench {
+VESPA_THREAD_STACK_TAG(vbench_request_scheduler_thread);
+VESPA_THREAD_STACK_TAG(vbench_handler_thread);
+
void
RequestScheduler::run()
{
@@ -24,16 +26,16 @@ RequestScheduler::run()
RequestScheduler::RequestScheduler(CryptoEngine::SP crypto, Handler<Request> &next, size_t numWorkers)
: _timer(),
- _proxy(next),
+ _proxy(next, vbench_handler_thread),
_queue(10.0, 0.020),
_droppedTagger(_proxy),
_dispatcher(_droppedTagger),
- _thread(*this),
+ _thread(*this, vbench_request_scheduler_thread),
_connectionPool(std::move(crypto), _timer),
_workers()
{
for (size_t i = 0; i < numWorkers; ++i) {
- _workers.push_back(std::unique_ptr<Worker>(new Worker(_dispatcher, _proxy, _connectionPool, _timer)));
+ _workers.push_back(std::make_unique<Worker>(_dispatcher, _proxy, _connectionPool, _timer));
}
_dispatcher.waitForThreads(numWorkers, 256);
}
diff --git a/vbench/src/vbench/vbench/vbench.cpp b/vbench/src/vbench/vbench/vbench.cpp
index d636f7a1cd7..9a5adad262e 100644
--- a/vbench/src/vbench/vbench/vbench.cpp
+++ b/vbench/src/vbench/vbench/vbench.cpp
@@ -40,6 +40,8 @@ CryptoEngine::SP setup_crypto(const vespalib::slime::Inspector &tls) {
} // namespace vbench::<unnamed>
+VESPA_THREAD_STACK_TAG(vbench_inputchain_generator);
+
VBench::VBench(const vespalib::Slime &cfg)
: _factory(),
_analyzers(),
@@ -76,7 +78,7 @@ VBench::VBench(const vespalib::Slime &cfg)
}
inputChain->generator = _factory.createGenerator(generator, *inputChain->taggers.back());
if (inputChain->generator.get() != 0) {
- inputChain->thread.reset(new vespalib::Thread(*inputChain->generator));
+ inputChain->thread.reset(new vespalib::Thread(*inputChain->generator, vbench_inputchain_generator));
_inputs.push_back(std::move(inputChain));
}
}
diff --git a/vbench/src/vbench/vbench/worker.cpp b/vbench/src/vbench/vbench/worker.cpp
index a64956f710b..afccc7de39f 100644
--- a/vbench/src/vbench/vbench/worker.cpp
+++ b/vbench/src/vbench/vbench/worker.cpp
@@ -5,6 +5,8 @@
namespace vbench {
+VESPA_THREAD_STACK_TAG(vbench_worker_thread);
+
void
Worker::run()
{
@@ -22,7 +24,7 @@ Worker::run()
Worker::Worker(Provider<Request> &provider, Handler<Request> &next,
HttpConnectionPool &pool, Timer &timer)
- : _thread(*this),
+ : _thread(*this, vbench_worker_thread),
_provider(provider),
_next(next),
_pool(pool),
diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client-api/abi-spec.json
index 7f78e81b447..cabe9afde20 100644
--- a/vespa-feed-client/abi-spec.json
+++ b/vespa-feed-client-api/abi-spec.json
@@ -1,20 +1,4 @@
{
- "ai.vespa.feed.client.BenchmarkingCluster": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "ai.vespa.feed.client.Cluster"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(ai.vespa.feed.client.Cluster)",
- "public void dispatch(ai.vespa.feed.client.HttpRequest, java.util.concurrent.CompletableFuture)",
- "public ai.vespa.feed.client.OperationStats stats()",
- "public void close()"
- ],
- "fields": []
- },
"ai.vespa.feed.client.DocumentId": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -37,21 +21,6 @@
],
"fields": []
},
- "ai.vespa.feed.client.DynamicThrottler": {
- "superClass": "ai.vespa.feed.client.StaticThrottler",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(ai.vespa.feed.client.FeedClientBuilder)",
- "public void sent(long, java.util.concurrent.CompletableFuture)",
- "public void success()",
- "public void throttled(long)",
- "public long targetInflight()"
- ],
- "fields": []
- },
"ai.vespa.feed.client.FeedClient$CircuitBreaker$State": {
"superClass": "java.lang.Enum",
"interfaces": [],
@@ -145,27 +114,30 @@
"superClass": "java.lang.Object",
"interfaces": [],
"attributes": [
- "public"
+ "public",
+ "interface",
+ "abstract"
],
"methods": [
"public static ai.vespa.feed.client.FeedClientBuilder create(java.net.URI)",
"public static ai.vespa.feed.client.FeedClientBuilder create(java.util.List)",
- "public ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)",
- "public ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)",
- "public ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)",
- "public ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)",
- "public ai.vespa.feed.client.FeedClientBuilder noBenchmarking()",
- "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)",
- "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)",
- "public ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)",
- "public ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)",
- "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)",
- "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)",
- "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)",
- "public ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)",
- "public ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)",
- "public ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)",
- "public ai.vespa.feed.client.FeedClient build()"
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder noBenchmarking()",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setDryrun(boolean)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)",
+ "public abstract ai.vespa.feed.client.FeedClientBuilder setEndpointUris(java.util.List)",
+ "public abstract ai.vespa.feed.client.FeedClient build()"
],
"fields": []
},
@@ -186,21 +158,18 @@
],
"fields": []
},
- "ai.vespa.feed.client.GracePeriodCircuitBreaker": {
+ "ai.vespa.feed.client.HttpResponse": {
"superClass": "java.lang.Object",
- "interfaces": [
- "ai.vespa.feed.client.FeedClient$CircuitBreaker"
- ],
+ "interfaces": [],
"attributes": [
- "public"
+ "public",
+ "interface",
+ "abstract"
],
"methods": [
- "public void <init>(java.time.Duration)",
- "public void <init>(java.time.Duration, java.time.Duration)",
- "public void success()",
- "public void failure(ai.vespa.feed.client.HttpResponse)",
- "public void failure(java.lang.Throwable)",
- "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()"
+ "public abstract int code()",
+ "public abstract byte[] body()",
+ "public static ai.vespa.feed.client.HttpResponse of(int, byte[])"
],
"fields": []
},
@@ -333,14 +302,15 @@
"superClass": "java.lang.Object",
"interfaces": [],
"attributes": [
- "public"
+ "public",
+ "interface",
+ "abstract"
],
"methods": [
- "public ai.vespa.feed.client.Result$Type type()",
- "public ai.vespa.feed.client.DocumentId documentId()",
- "public java.util.Optional resultMessage()",
- "public java.util.Optional traceMessage()",
- "public java.lang.String toString()"
+ "public abstract ai.vespa.feed.client.Result$Type type()",
+ "public abstract ai.vespa.feed.client.DocumentId documentId()",
+ "public abstract java.util.Optional resultMessage()",
+ "public abstract java.util.Optional traceMessage()"
],
"fields": []
},
@@ -367,25 +337,5 @@
"public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)"
],
"fields": []
- },
- "ai.vespa.feed.client.StaticThrottler": {
- "superClass": "java.lang.Object",
- "interfaces": [
- "ai.vespa.feed.client.Throttler"
- ],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(ai.vespa.feed.client.FeedClientBuilder)",
- "public void sent(long, java.util.concurrent.CompletableFuture)",
- "public void success()",
- "public void throttled(long)",
- "public long targetInflight()"
- ],
- "fields": [
- "protected final long maxInflight",
- "protected final long minInflight"
- ]
}
} \ No newline at end of file
diff --git a/vespa-feed-client-api/pom.xml b/vespa-feed-client-api/pom.xml
new file mode 100644
index 00000000000..df5fd531f06
--- /dev/null
+++ b/vespa-feed-client-api/pom.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>7-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <artifactId>vespa-feed-client-api</artifactId>
+ <packaging>jar</packaging>
+ <version>7-SNAPSHOT</version>
+
+ <dependencies>
+ <!-- compile scope -->
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>annotations</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.fasterxml.jackson.core</groupId>
+ <artifactId>jackson-core</artifactId>
+ <scope>compile</scope>
+ </dependency>
+
+ <!-- test scope -->
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter</artifactId>
+ <scope>test</scope>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <release>${vespaClients.jdk.releaseVersion}</release>
+ <showDeprecation>true</showDeprecation>
+ <compilerArgs>
+ <arg>-Xlint:all</arg>
+ <arg>-Xlint:-serial</arg>
+ <arg>-Werror</arg>
+ </compilerArgs>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+</project>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java
index 5474bcfda01..5474bcfda01 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/DocumentId.java
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java
index d463c611d6a..d463c611d6a 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClient.java
diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
new file mode 100644
index 00000000000..daf3f62dac1
--- /dev/null
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
@@ -0,0 +1,128 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.SSLContext;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.nio.file.Path;
+import java.security.PrivateKey;
+import java.security.cert.X509Certificate;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ServiceLoader;
+import java.util.function.Supplier;
+
+/**
+ * Builder for creating a {@link FeedClient} instance.
+ *
+ * @author bjorncs
+ * @author jonmv
+ */
+public interface FeedClientBuilder {
+
+ /** Creates a builder for a single container endpoint **/
+ static FeedClientBuilder create(URI endpoint) { return create(Collections.singletonList(endpoint)); }
+
+ /** Creates a builder for multiple container endpoints **/
+ static FeedClientBuilder create(List<URI> endpoints) {
+ Iterator<FeedClientBuilder> iterator = ServiceLoader.load(FeedClientBuilder.class).iterator();
+ if (iterator.hasNext()) {
+ return iterator.next().setEndpointUris(endpoints);
+ } else {
+ try {
+ Class<?> aClass = Class.forName("ai.vespa.feed.client.impl.FeedClientBuilderImpl");
+ for (Constructor<?> constructor : aClass.getConstructors()) {
+ if (constructor.getParameterTypes().length==0) {
+ return ((FeedClientBuilder)constructor.newInstance()).setEndpointUris(endpoints);
+ }
+ }
+ throw new RuntimeException("Could not find Feed client builder implementation");
+ } catch (ClassNotFoundException | InvocationTargetException | InstantiationException | IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ /**
+ * Sets the number of connections this client will use per endpoint.
+ *
+ * A reasonable value here is a value that lets all feed clients (if more than one)
+ * collectively have a number of connections which is a small multiple of the numbers
+ * of containers in the cluster to feed, so load can be balanced across these containers.
+ * In general, this value should be kept as low as possible, but poor connectivity
+ * between feeder and cluster may also warrant a higher number of connections.
+ */
+ FeedClientBuilder setConnectionsPerEndpoint(int max);
+
+ /**
+ * Sets the maximum number of streams per HTTP/2 connection for this client.
+ *
+ * This determines the maximum number of concurrent, inflight requests for this client,
+ * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over
+ * more connections, when possible.
+ * The feed client automatically throttles load to achieve the best throughput, and the
+ * actual number of streams per connection is usually lower than the maximum.
+ */
+ FeedClientBuilder setMaxStreamPerConnection(int max);
+
+ /** Sets {@link SSLContext} instance. */
+ FeedClientBuilder setSslContext(SSLContext context);
+
+ /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */
+ FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier);
+
+ /** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */
+ FeedClientBuilder noBenchmarking();
+
+ /** Adds HTTP request header to all client requests. */
+ FeedClientBuilder addRequestHeader(String name, String value);
+
+ /**
+ * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request,
+ * i.e. value can be dynamically updated during a feed.
+ */
+ FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier);
+
+ /**
+ * Overrides default retry strategy.
+ * @see FeedClient.RetryStrategy
+ */
+ FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy);
+
+ /**
+ * Overrides default circuit breaker.
+ * @see FeedClient.CircuitBreaker
+ */
+ FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker);
+
+ /** Sets path to client SSL certificate/key PEM files */
+ FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile);
+
+ /** Sets client SSL certificates/key */
+ FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey);
+
+ /** Sets client SSL certificate/key */
+ FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey);
+
+ FeedClientBuilder setDryrun(boolean enabled);
+
+ /**
+ * Overrides JVM default SSL truststore
+ * @param caCertificatesFile Path to PEM encoded file containing trusted certificates
+ */
+ FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile);
+
+ /** Overrides JVM default SSL truststore */
+ FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates);
+
+ /** Overrides endpoint URIs for this client */
+ FeedClientBuilder setEndpointUris(List<URI> endpoints);
+
+ /** Constructs instance of {@link FeedClient} from builder configuration */
+ FeedClient build();
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java
index 1936eb09418..1936eb09418 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/FeedException.java
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java
index 07fdb2d7257..62850fef32d 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/HttpResponse.java
@@ -1,7 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-interface HttpResponse {
+public interface HttpResponse {
int code();
byte[] body();
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java
index 2d7caea9f26..41b432449df 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/JsonFeeder.java
@@ -387,13 +387,13 @@ public class JsonFeeder implements Closeable {
CompletableFuture<Result> next() throws IOException {
JsonToken token = parser.nextToken();
- if (multipleOperations && ! arrayPrefixParsed && token == START_ARRAY) {
+ if (multipleOperations && ! arrayPrefixParsed && token == JsonToken.START_ARRAY) {
arrayPrefixParsed = true;
token = parser.nextToken();
}
- if (token == END_ARRAY && multipleOperations) return null;
+ if (token == JsonToken.END_ARRAY && multipleOperations) return null;
else if (token == null && ! arrayPrefixParsed) return null;
- else if (token != START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'");
+ else if (token != JsonToken.START_OBJECT) throw parseException("Unexpected token '" + parser.currentToken() + "'");
long start = 0, end = -1;
OperationType type = null;
DocumentId id = null;
@@ -459,8 +459,8 @@ public class JsonFeeder implements Closeable {
private String readString() throws IOException {
String value = parser.nextTextValue();
if (value == null)
- throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+ throw new OperationParseException("Expected '" + JsonToken.VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
}
@@ -468,8 +468,8 @@ public class JsonFeeder implements Closeable {
private boolean readBoolean() throws IOException {
Boolean value = parser.nextBooleanValue();
if (value == null)
- throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+ throw new OperationParseException("Expected '" + JsonToken.VALUE_FALSE + "' or '" + JsonToken.VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java
index 0ec40e114df..0ec40e114df 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParameters.java
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java
index f60368dd67f..4404462be2e 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationParseException.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import ai.vespa.feed.client.FeedException;
+
/**
* Signals that supplied JSON for a document/operation is invalid
*
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java
index ab2faf245d8..ab2faf245d8 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/OperationStats.java
diff --git a/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java
new file mode 100644
index 00000000000..fa114f6a183
--- /dev/null
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/Result.java
@@ -0,0 +1,23 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Optional;
+
+/**
+ * Result for a document operation which completed normally.
+ *
+ * @author bjorncs
+ * @author jonmv
+ */
+public interface Result {
+
+ enum Type {
+ success,
+ conditionNotMet
+ }
+
+ Type type();
+ DocumentId documentId();
+ Optional<String> resultMessage();
+ Optional<String> traceMessage();
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java
index d9eaff40d74..27803898c01 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultException.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultException.java
@@ -1,6 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.OperationParameters;
+
import java.util.Optional;
/**
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java
index 947ab9f0560..f149b13196b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/ResultParseException.java
@@ -1,6 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedException;
+
/**
* Signals that the client was unable to obtain a proper response/result from container
*
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java
index daab16a9ff2..daab16a9ff2 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java
+++ b/vespa-feed-client-api/src/main/java/ai/vespa/feed/client/package-info.java
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
index e4fb5cb5bef..d795678db39 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
+++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayInputStream;
@@ -14,6 +15,7 @@ import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -148,7 +150,7 @@ class JsonFeederTest {
" }\n" +
" }\n";
Result result = feeder.feedSingle(json).get();
- assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId());
+ Assertions.assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId());
assertEquals(Result.Type.success, result.type());
assertEquals("success", result.resultMessage().get());
client.assertPutOperation("abc1", "{\"fields\":{\n \"lul\":\"lal\"\n }}");
@@ -188,7 +190,12 @@ class JsonFeederTest {
public void close(boolean graceful) { }
private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
- return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
+ return CompletableFuture.completedFuture(new Result(){
+ @Override public Type type() { return Type.success; }
+ @Override public DocumentId documentId() { return documentId; }
+ @Override public Optional<String> resultMessage() { return Optional.of("success"); }
+ @Override public Optional<String> traceMessage() { return Optional.empty(); }
+ });
}
void assertDocumentIds(Collection<DocumentId> keys, String... expectedUserSpecificIds) {
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
index b951fb62fb5..b951fb62fb5 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
+++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
index 3d4ce150fcf..3d4ce150fcf 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
+++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
index 4e6473a6568..4e6473a6568 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
+++ b/vespa-feed-client-api/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml
index aff625fe3a4..16d6f8827f2 100644
--- a/vespa-feed-client-cli/pom.xml
+++ b/vespa-feed-client-cli/pom.xml
@@ -74,7 +74,7 @@
<attach>false</attach>
<archive>
<manifest>
- <mainClass>ai.vespa.feed.client.CliClient</mainClass>
+ <mainClass>ai.vespa.feed.client.impl.CliClient</mainClass>
</manifest>
</archive>
<descriptorRefs>
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
index 0de81d2de36..2fc7e5af7b4 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliArguments.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java
index e40b543f26a..7e036b8dec3 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliClient.java
@@ -1,7 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.JsonFeeder;
import ai.vespa.feed.client.JsonFeeder.ResultCallback;
+import ai.vespa.feed.client.OperationStats;
+import ai.vespa.feed.client.Result;
+import ai.vespa.feed.client.ResultException;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
index b236a516691..c4e70c362b0 100755
--- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
@@ -6,4 +6,4 @@ exec java \
-Xms128m -Xmx2048m \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
-Djava.util.logging.config.file=`dirname $0`/logging.properties \
--cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
+-cp `dirname $0`/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
index fbd172e7423..7dbdc056524 100755
--- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
@@ -81,4 +81,4 @@ exec java \
-Xms128m -Xmx2048m $(getJavaOptionsIPV46) \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
-Djava.util.logging.config.file=${VESPA_HOME}/conf/vespa-feed-client/logging.properties \
--cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
+-cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.impl.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java
index 622956db530..19b93c3172b 100644
--- a/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/CliArgumentsTest.java
+++ b/vespa-feed-client-cli/src/test/java/ai/vespa/feed/client/impl/CliArgumentsTest.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.impl.CliArguments;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
index 68c9e4b4b7c..a53e7f78b20 100644
--- a/vespa-feed-client/pom.xml
+++ b/vespa-feed-client/pom.xml
@@ -34,6 +34,11 @@
<artifactId>jackson-core</artifactId>
<scope>compile</scope>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespa-feed-client-api</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<!-- test scope -->
<dependency>
@@ -72,17 +77,13 @@
<executable>src/main/sh/vespa-version-generator.sh</executable>
<arguments>
<argument>${project.basedir}/../dist/vtag.map</argument>
- <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/Vespa.java</argument>
+ <argument>${project.build.directory}/generated-sources/vespa-version/ai/vespa/feed/client/impl/Vespa.java</argument>
</arguments>
<sourceRoot>${project.build.directory}/generated-sources/vespa-version</sourceRoot>
</configuration>
</execution>
</executions>
</plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>abi-check-plugin</artifactId>
- </plugin>
</plugins>
</build>
</project>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
index 52d7af2fb31..6dc9ec4efb1 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.HttpResponse;
import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
import org.apache.hc.client5.http.config.RequestConfig;
@@ -18,7 +19,6 @@ import org.apache.hc.core5.util.Timeout;
import javax.net.ssl.SSLContext;
import java.io.IOException;
import java.net.URI;
-import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -43,7 +43,7 @@ class ApacheCluster implements Cluster {
.setResponseTimeout(Timeout.ofMinutes(5))
.build();
- ApacheCluster(FeedClientBuilder builder) throws IOException {
+ ApacheCluster(FeedClientBuilderImpl builder) throws IOException {
for (URI endpoint : builder.endpoints)
for (int i = 0; i < builder.connectionsPerEndpoint; i++)
endpoints.add(new Endpoint(createHttpClient(builder), endpoint));
@@ -114,7 +114,7 @@ class ApacheCluster implements Cluster {
}
- private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder) throws IOException {
+ private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException {
SSLContext sslContext = builder.constructSslContext();
String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites()));
if (allowedCiphers.length == 0)
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java
index 05ff6e99308..40049bad217 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/BenchmarkingCluster.java
@@ -1,5 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationStats;
import java.util.HashMap;
import java.util.Map;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java
index 57c028426fe..ee9188fdc2b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Cluster.java
@@ -1,8 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationStats;
import java.io.Closeable;
-import java.util.Collections;
import java.util.concurrent.CompletableFuture;
/**
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java
index 282e4e14285..96cf7998681 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DryrunCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DryrunCluster.java
@@ -1,5 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java
index a379a8b066b..5969fe267c0 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/DynamicThrottler.java
@@ -1,7 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
-import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicLong;
@@ -25,7 +26,7 @@ public class DynamicThrottler extends StaticThrottler {
private long startNanos = System.nanoTime();
private long sent = 0;
- public DynamicThrottler(FeedClientBuilder builder) {
+ public DynamicThrottler(FeedClientBuilderImpl builder) {
super(builder);
targetInflight = new AtomicLong(8 * minInflight);
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
index 3b79d47b494..7dafeb0b541 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
@@ -1,5 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLContext;
@@ -16,6 +19,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.function.Supplier;
import static java.util.Objects.requireNonNull;
@@ -26,11 +30,11 @@ import static java.util.Objects.requireNonNull;
* @author bjorncs
* @author jonmv
*/
-public class FeedClientBuilder {
+public class FeedClientBuilderImpl implements FeedClientBuilder {
static final FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { };
- final List<URI> endpoints;
+ List<URI> endpoints;
final Map<String, Supplier<String>> requestHeaders = new HashMap<>();
SSLContext sslContext;
HostnameVerifier hostnameVerifier;
@@ -47,72 +51,65 @@ public class FeedClientBuilder {
boolean benchmark = true;
boolean dryrun = false;
- /** Creates a builder for a single container endpoint **/
- public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(Collections.singletonList(endpoint)); }
- /** Creates a builder for multiple container endpoints **/
- public static FeedClientBuilder create(List<URI> endpoints) { return new FeedClientBuilder(endpoints); }
- private FeedClientBuilder(List<URI> endpoints) {
+ public FeedClientBuilderImpl() {
+ }
+
+ FeedClientBuilderImpl(List<URI> endpoints) {
+ this();
+ setEndpointUris(endpoints);
+ }
+
+ @Override
+ public FeedClientBuilder setEndpointUris(List<URI> endpoints) {
if (endpoints.isEmpty())
throw new IllegalArgumentException("At least one endpoint must be provided");
for (URI endpoint : endpoints)
requireNonNull(endpoint.getHost());
-
this.endpoints = new ArrayList<>(endpoints);
+ return this;
}
- /**
- * Sets the number of connections this client will use per endpoint.
- *
- * A reasonable value here is a value that lets all feed clients (if more than one)
- * collectively have a number of connections which is a small multiple of the numbers
- * of containers in the cluster to feed, so load can be balanced across these containers.
- * In general, this value should be kept as low as possible, but poor connectivity
- * between feeder and cluster may also warrant a higher number of connections.
- */
- public FeedClientBuilder setConnectionsPerEndpoint(int max) {
+ @Override
+ public FeedClientBuilderImpl setConnectionsPerEndpoint(int max) {
if (max < 1) throw new IllegalArgumentException("Max connections must be at least 1, but was " + max);
this.connectionsPerEndpoint = max;
return this;
}
- /**
- * Sets the maximum number of streams per HTTP/2 connection for this client.
- *
- * This determines the maximum number of concurrent, inflight requests for this client,
- * which is {@code maxConnections * maxStreamsPerConnection}. Prefer more streams over
- * more connections, when possible.
- * The feed client automatically throttles load to achieve the best throughput, and the
- * actual number of streams per connection is usually lower than the maximum.
- */
- public FeedClientBuilder setMaxStreamPerConnection(int max) {
+ @Override
+ public FeedClientBuilderImpl setMaxStreamPerConnection(int max) {
if (max < 1) throw new IllegalArgumentException("Max streams per connection must be at least 1, but was " + max);
this.maxStreamsPerConnection = max;
return this;
}
/** Sets {@link SSLContext} instance. */
- public FeedClientBuilder setSslContext(SSLContext context) {
+ @Override
+ public FeedClientBuilderImpl setSslContext(SSLContext context) {
this.sslContext = requireNonNull(context);
return this;
}
/** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */
- public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) {
+ @Override
+ public FeedClientBuilderImpl setHostnameVerifier(HostnameVerifier verifier) {
this.hostnameVerifier = requireNonNull(verifier);
return this;
}
/** Turns off benchmarking. Attempting to get {@link FeedClient#stats()} will result in an exception. */
- public FeedClientBuilder noBenchmarking() {
+ @Override
+ public FeedClientBuilderImpl noBenchmarking() {
this.benchmark = false;
return this;
}
/** Adds HTTP request header to all client requests. */
- public FeedClientBuilder addRequestHeader(String name, String value) {
+ @Override
+ public FeedClientBuilderImpl addRequestHeader(String name, String value) {
return addRequestHeader(name, () -> requireNonNull(value));
}
@@ -120,7 +117,8 @@ public class FeedClientBuilder {
* Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request,
* i.e. value can be dynamically updated during a feed.
*/
- public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) {
+ @Override
+ public FeedClientBuilderImpl addRequestHeader(String name, Supplier<String> valueSupplier) {
this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier));
return this;
}
@@ -129,7 +127,8 @@ public class FeedClientBuilder {
* Overrides default retry strategy.
* @see FeedClient.RetryStrategy
*/
- public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) {
+ @Override
+ public FeedClientBuilderImpl setRetryStrategy(FeedClient.RetryStrategy strategy) {
this.retryStrategy = requireNonNull(strategy);
return this;
}
@@ -138,31 +137,36 @@ public class FeedClientBuilder {
* Overrides default circuit breaker.
* @see FeedClient.CircuitBreaker
*/
- public FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker) {
+ @Override
+ public FeedClientBuilderImpl setCircuitBreaker(FeedClient.CircuitBreaker breaker) {
this.circuitBreaker = requireNonNull(breaker);
return this;
}
/** Sets path to client SSL certificate/key PEM files */
- public FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile) {
+ @Override
+ public FeedClientBuilderImpl setCertificate(Path certificatePemFile, Path privateKeyPemFile) {
this.certificateFile = certificatePemFile;
this.privateKeyFile = privateKeyPemFile;
return this;
}
/** Sets client SSL certificates/key */
- public FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) {
+ @Override
+ public FeedClientBuilderImpl setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) {
this.certificate = certificate;
this.privateKey = privateKey;
return this;
}
/** Sets client SSL certificate/key */
- public FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey) {
+ @Override
+ public FeedClientBuilderImpl setCertificate(X509Certificate certificate, PrivateKey privateKey) {
return setCertificate(Collections.singletonList(certificate), privateKey);
}
- public FeedClientBuilder setDryrun(boolean enabled) {
+ @Override
+ public FeedClientBuilderImpl setDryrun(boolean enabled) {
this.dryrun = enabled;
return this;
}
@@ -171,18 +175,21 @@ public class FeedClientBuilder {
* Overrides JVM default SSL truststore
* @param caCertificatesFile Path to PEM encoded file containing trusted certificates
*/
- public FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile) {
+ @Override
+ public FeedClientBuilderImpl setCaCertificatesFile(Path caCertificatesFile) {
this.caCertificatesFile = caCertificatesFile;
return this;
}
/** Overrides JVM default SSL truststore */
- public FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates) {
+ @Override
+ public FeedClientBuilderImpl setCaCertificates(Collection<X509Certificate> caCertificates) {
this.caCertificates = caCertificates;
return this;
}
/** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */
+ @Override
public FeedClient build() {
try {
validateConfiguration();
@@ -209,6 +216,9 @@ public class FeedClientBuilder {
}
private void validateConfiguration() {
+ if (endpoints == null) {
+ throw new IllegalArgumentException("At least one endpoint must be provided");
+ }
if (sslContext != null && (
certificateFile != null || caCertificatesFile != null || privateKeyFile != null ||
certificate != null || caCertificates != null || privateKey != null)) {
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java
index cb5e35c79a5..b223fce7cab 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreaker.java
@@ -1,5 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.HttpResponse;
import java.time.Duration;
import java.util.concurrent.atomic.AtomicBoolean;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
index eb818ba1d48..3fd44596d63 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
@@ -1,6 +1,15 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.OperationStats;
+import ai.vespa.feed.client.Result;
+import ai.vespa.feed.client.ResultException;
+import ai.vespa.feed.client.ResultParseException;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
@@ -33,11 +42,11 @@ class HttpFeedClient implements FeedClient {
private final RequestStrategy requestStrategy;
private final AtomicBoolean closed = new AtomicBoolean();
- HttpFeedClient(FeedClientBuilder builder) throws IOException {
+ HttpFeedClient(FeedClientBuilderImpl builder) throws IOException {
this(builder, new HttpRequestStrategy(builder));
}
- HttpFeedClient(FeedClientBuilder builder, RequestStrategy requestStrategy) {
+ HttpFeedClient(FeedClientBuilderImpl builder, RequestStrategy requestStrategy) {
this.requestHeaders = new HashMap<>(builder.requestHeaders);
this.requestStrategy = requestStrategy;
}
@@ -173,7 +182,7 @@ class HttpFeedClient implements FeedClient {
if (outcome == Outcome.vespaFailure)
throw new ResultException(documentId, message, trace);
- return new Result(toResultType(outcome), documentId, message, trace);
+ return new ResultImpl(toResultType(outcome), documentId, message, trace);
}
static String getPath(DocumentId documentId) {
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java
index 48defd71ea8..08b8ca08c61 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequest.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
import java.util.Map;
import java.util.function.Supplier;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
index cf65a874f3b..6fec0029bc3 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
@@ -1,8 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
import ai.vespa.feed.client.FeedClient.CircuitBreaker;
import ai.vespa.feed.client.FeedClient.RetryStrategy;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.HttpResponse ;
+import ai.vespa.feed.client.OperationStats;
import java.io.IOException;
import java.nio.channels.CancelledKeyException;
@@ -62,11 +67,11 @@ class HttpRequestStrategy implements RequestStrategy {
return thread;
});
- HttpRequestStrategy(FeedClientBuilder builder) throws IOException {
+ HttpRequestStrategy(FeedClientBuilderImpl builder) throws IOException {
this(builder, builder.dryrun ? new DryrunCluster() : new ApacheCluster(builder));
}
- HttpRequestStrategy(FeedClientBuilder builder, Cluster cluster) {
+ HttpRequestStrategy(FeedClientBuilderImpl builder, Cluster cluster) {
this.cluster = builder.benchmark ? new BenchmarkingCluster(cluster) : cluster;
this.strategy = builder.retryStrategy;
this.breaker = builder.circuitBreaker;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java
index 9a97f7daa66..e3b6b594593 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/RequestStrategy.java
@@ -1,7 +1,10 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.DocumentId;
import ai.vespa.feed.client.FeedClient.CircuitBreaker.State;
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationStats;
import java.util.concurrent.CompletableFuture;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java
index 5ff3fd0a219..dabf76cba34 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ResultImpl.java
@@ -1,5 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.Result;
import java.util.Optional;
@@ -9,29 +12,24 @@ import java.util.Optional;
* @author bjorncs
* @author jonmv
*/
-public class Result {
+public class ResultImpl implements Result {
private final Type type;
private final DocumentId documentId;
private final String resultMessage;
private final String traceMessage;
- Result(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
+ ResultImpl(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
this.type = type;
this.documentId = documentId;
this.resultMessage = resultMessage;
this.traceMessage = traceMessage;
}
- public enum Type {
- success,
- conditionNotMet
- }
-
- public Type type() { return type; }
- public DocumentId documentId() { return documentId; }
- public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); }
- public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); }
+ @Override public Type type() { return type; }
+ @Override public DocumentId documentId() { return documentId; }
+ @Override public Optional<String> resultMessage() { return Optional.ofNullable(resultMessage); }
+ @Override public Optional<String> traceMessage() { return Optional.ofNullable(traceMessage); }
@Override
public String toString() {
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java
index f5e13eccd56..2ca4577abe6 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/SslContextBuilder.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
import org.bouncycastle.asn1.ASN1ObjectIdentifier;
import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java
index 5137a18d923..1f9cf8e5155 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/StaticThrottler.java
@@ -1,5 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicLong;
@@ -18,7 +20,7 @@ public class StaticThrottler implements Throttler {
protected final long minInflight;
private final AtomicLong targetX10;
- public StaticThrottler(FeedClientBuilder builder) {
+ public StaticThrottler(FeedClientBuilderImpl builder) {
minInflight = 16L * builder.connectionsPerEndpoint * builder.endpoints.size();
maxInflight = 256 * minInflight; // 4096 max streams per connection on the server side.
targetX10 = new AtomicLong(10 * maxInflight); // 10x the actual value to allow for smaller updates.
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java
index f2453c27879..700a6f6f805 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Throttler.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/Throttler.java
@@ -1,5 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+
+import ai.vespa.feed.client.HttpResponse;
import java.util.concurrent.CompletableFuture;
diff --git a/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder
new file mode 100644
index 00000000000..b6e28b1806c
--- /dev/null
+++ b/vespa-feed-client/src/main/resources/META-INF.services/ai.vespa.feed.client.FeedClientBuilder
@@ -0,0 +1,2 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+ai.vespa.feed.client.impl.FeedClientBuilderImpl \ No newline at end of file
diff --git a/vespa-feed-client/src/main/sh/vespa-version-generator.sh b/vespa-feed-client/src/main/sh/vespa-version-generator.sh
index 5aafb3e2bf7..44fb7d167db 100755
--- a/vespa-feed-client/src/main/sh/vespa-version-generator.sh
+++ b/vespa-feed-client/src/main/sh/vespa-version-generator.sh
@@ -16,7 +16,7 @@ mkdir -p $destinationDir
versionNumber=$(cat $source | grep V_TAG_COMPONENT | awk '{print $2}' )
cat > $destination <<- END
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
class Vespa {
static final String VERSION = "$versionNumber";
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java
index df790056309..61526b80fe7 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/DocumentIdTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/DocumentIdTest.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.DocumentId;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -14,8 +16,8 @@ class DocumentIdTest {
@Test
void testParsing() {
- assertEquals("id:ns:type::user",
- DocumentId.of("id:ns:type::user").toString());
+ Assertions.assertEquals("id:ns:type::user",
+ DocumentId.of("id:ns:type::user").toString());
assertEquals("id:ns:type:n=123:user",
DocumentId.of("id:ns:type:n=123:user").toString());
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java
index 8eaffc3e9be..b7dac5ce52e 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/GracePeriodCircuitBreakerTest.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
import ai.vespa.feed.client.FeedClient.CircuitBreaker;
import org.junit.jupiter.api.Test;
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java
index d92958a5838..5353ab92fb6 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpFeedClientTest.java
@@ -1,10 +1,19 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.OperationStats;
+import ai.vespa.feed.client.Result;
+import ai.vespa.feed.client.ResultException;
import org.junit.jupiter.api.Test;
import java.net.URI;
import java.time.Duration;
+import java.util.Collections;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
@@ -33,7 +42,7 @@ class HttpFeedClientTest {
@Override public void await() { throw new UnsupportedOperationException(); }
@Override public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) { return dispatch.get().apply(documentId, request); }
}
- FeedClient client = new HttpFeedClient(FeedClientBuilder.create(URI.create("https://dummy:123")), new MockRequestStrategy());
+ FeedClient client = new HttpFeedClient(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy:123"))), new MockRequestStrategy());
// Update is a PUT, and 200 OK is a success.
dispatch.set((documentId, request) -> {
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
index 0f840201ca8..d293abf4f3e 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
@@ -1,19 +1,23 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
import ai.vespa.feed.client.FeedClient.CircuitBreaker;
-import org.apache.hc.core5.http.ContentType;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.HttpResponse;
+import ai.vespa.feed.client.OperationStats;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.net.URI;
import java.time.Duration;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Phaser;
import java.util.concurrent.ScheduledExecutorService;
@@ -42,7 +46,7 @@ class HttpRequestStrategyTest {
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
Cluster cluster = new BenchmarkingCluster((__, vessel) -> executor.schedule(() -> vessel.complete(response), (int) (Math.random() * 2 * 10), TimeUnit.MILLISECONDS));
- HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ HttpRequestStrategy strategy = new HttpRequestStrategy( new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123")))
.setConnectionsPerEndpoint(1 << 10)
.setMaxStreamPerConnection(1 << 12),
cluster);
@@ -82,7 +86,7 @@ class HttpRequestStrategyTest {
MockCluster cluster = new MockCluster();
AtomicLong now = new AtomicLong(0);
CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10));
- HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123")))
.setRetryStrategy(new FeedClient.RetryStrategy() {
@Override public boolean retry(FeedClient.OperationType type) { return type == FeedClient.OperationType.PUT; }
@Override public int retries() { return 1; }
@@ -189,7 +193,7 @@ class HttpRequestStrategyTest {
MockCluster cluster = new MockCluster();
AtomicLong now = new AtomicLong(0);
CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10));
- HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ HttpRequestStrategy strategy = new HttpRequestStrategy(new FeedClientBuilderImpl(Collections.singletonList(URI.create("https://dummy.com:123")))
.setRetryStrategy(new FeedClient.RetryStrategy() {
@Override public int retries() { return 1; }
})
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java
index a74f63f5cd2..f7c1b4d2b03 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/SslContextBuilderTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/SslContextBuilderTest.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
+package ai.vespa.feed.client.impl;
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
@@ -8,6 +8,7 @@ import org.bouncycastle.operator.ContentSigner;
import org.bouncycastle.operator.OperatorCreationException;
import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
import org.bouncycastle.util.io.pem.PemObject;
+import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -52,7 +53,7 @@ class SslContextBuilderTest {
@Test
void successfully_constructs_sslcontext_from_pem_files() {
- SSLContext sslContext = assertDoesNotThrow(() ->
+ SSLContext sslContext = Assertions.assertDoesNotThrow(() ->
new SslContextBuilder()
.withCaCertificates(certificateFile)
.withCertificateAndKey(certificateFile, privateKeyFile)
@@ -62,13 +63,13 @@ class SslContextBuilderTest {
@Test
void successfully_constructs_sslcontext_when_no_builder_parameter_given() {
- SSLContext sslContext = assertDoesNotThrow(() -> new SslContextBuilder().build());
+ SSLContext sslContext = Assertions.assertDoesNotThrow(() -> new SslContextBuilder().build());
assertEquals("TLS", sslContext.getProtocol());
}
@Test
void successfully_constructs_sslcontext_with_only_certificate_file() {
- SSLContext sslContext = assertDoesNotThrow(() ->
+ SSLContext sslContext = Assertions.assertDoesNotThrow(() ->
new SslContextBuilder()
.withCertificateAndKey(certificateFile, privateKeyFile)
.build());
@@ -77,7 +78,7 @@ class SslContextBuilderTest {
@Test
void successfully_constructs_sslcontext_with_only_ca_certificate_file() {
- SSLContext sslContext = assertDoesNotThrow(() ->
+ SSLContext sslContext = Assertions.assertDoesNotThrow(() ->
new SslContextBuilder()
.withCaCertificates(certificateFile)
.build());
diff --git a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
deleted file mode 100644
index 74baf9f1065..00000000000
--- a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-import ai.vespa.feed.client.Result.Type;
-
-/**
- * Workaround for package-private {@link Result} constructor.
- *
- * @author bjorncs
- */
-public class DryrunResult {
-
- private DryrunResult() {}
-
- public static Result create(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
- return new Result(type, documentId, resultMessage, traceMessage);
- }
-}
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java
index 2dab634d8be..711ce403d50 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/CompletableFutures.java
@@ -1,8 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.concurrent;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.yahoo.yolean.UncheckedInterruptedException;
+
import java.util.List;
+import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
/**
* Helper for {@link java.util.concurrent.CompletableFuture} / {@link java.util.concurrent.CompletionStage}.
@@ -64,4 +70,47 @@ public class CompletableFutures {
return combiner.combined;
}
+ /**
+ * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis
+ * @deprecated to be removed in Vespa 8
+ */
+ @SuppressWarnings("unchecked")
+ @Deprecated(forRemoval = true, since = "7")
+ public static <V> ListenableFuture<V> toGuavaListenableFuture(CompletableFuture<V> future) {
+ if (future instanceof ListenableFuture) {
+ return ((ListenableFuture<V>) future);
+ }
+ SettableFuture<V> guavaFuture = SettableFuture.create();
+ future.whenComplete((result, error) -> {
+ if (result != null) guavaFuture.set(result);
+ else if (error instanceof CancellationException) guavaFuture.setException(error);
+ else guavaFuture.cancel(true);
+ });
+ return guavaFuture;
+ }
+
+ /**
+ * Helper for migrating from {@link ListenableFuture} to {@link CompletableFuture} in Vespa public apis
+ * @deprecated to be removed in Vespa 8
+ */
+ @Deprecated(forRemoval = true, since = "7")
+ public static <V> CompletableFuture<V> toCompletableFuture(ListenableFuture<V> guavaFuture) {
+ CompletableFuture<V> future = new CompletableFuture<>();
+ guavaFuture.addListener(
+ () -> {
+ if (guavaFuture.isCancelled()) future.cancel(true);
+ try {
+ V value = guavaFuture.get();
+ future.complete(value);
+ } catch (InterruptedException e) {
+ // Should not happens since listener is invoked after future is complete
+ throw new UncheckedInterruptedException(e);
+ } catch (ExecutionException e) {
+ future.completeExceptionally(e.getCause());
+ }
+ },
+ Runnable::run);
+ return future;
+ }
+
}
diff --git a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
index e7d923d0e87..974aafb392a 100644
--- a/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
+++ b/vespalib/src/tests/btree/btree_store/btree_store_test.cpp
@@ -5,9 +5,12 @@
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/gtest/gtest.h>
using vespalib::GenerationHandler;
+using vespalib::datastore::CompactionSpec;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
namespace vespalib::btree {
@@ -73,61 +76,115 @@ BTreeStoreTest::~BTreeStoreTest()
inc_generation();
}
+namespace {
+
+class ChangeWriter {
+ std::vector<EntryRef*> _old_refs;
+public:
+ ChangeWriter(uint32_t capacity);
+ ~ChangeWriter();
+ void write(const std::vector<EntryRef>& refs);
+ void emplace_back(EntryRef& ref) { _old_refs.emplace_back(&ref); }
+};
+
+ChangeWriter::ChangeWriter(uint32_t capacity)
+ : _old_refs()
+{
+ _old_refs.reserve(capacity);
+}
+
+ChangeWriter::~ChangeWriter() = default;
+
+void
+ChangeWriter::write(const std::vector<EntryRef> &refs)
+{
+ assert(refs.size() == _old_refs.size());
+ auto old_ref_itr = _old_refs.begin();
+ for (auto ref : refs) {
+ **old_ref_itr = ref;
+ ++old_ref_itr;
+ }
+ assert(old_ref_itr == _old_refs.end());
+ _old_refs.clear();
+}
+
+}
+
void
BTreeStoreTest::test_compact_sequence(uint32_t sequence_length)
{
auto &store = _store;
+ uint32_t entry_ref_offset_bits = TreeStore::RefType::offset_bits;
EntryRef ref1 = add_sequence(4, 4 + sequence_length);
EntryRef ref2 = add_sequence(5, 5 + sequence_length);
- EntryRef old_ref1 = ref1;
- EntryRef old_ref2 = ref2;
std::vector<EntryRef> refs;
+ refs.reserve(2);
+ refs.emplace_back(ref1);
+ refs.emplace_back(ref2);
+ std::vector<EntryRef> temp_refs;
for (int i = 0; i < 1000; ++i) {
- refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
+ temp_refs.emplace_back(add_sequence(i + 6, i + 6 + sequence_length));
}
- for (auto& ref : refs) {
+ for (auto& ref : temp_refs) {
store.clear(ref);
}
inc_generation();
+ ChangeWriter change_writer(refs.size());
+ std::vector<EntryRef> move_refs;
+ move_refs.reserve(refs.size());
auto usage_before = store.getMemoryUsage();
for (uint32_t pass = 0; pass < 15; ++pass) {
- auto to_hold = store.start_compact_worst_buffers();
- ref1 = store.move(ref1);
- ref2 = store.move(ref2);
+ CompactionSpec compaction_spec(true, false);
+ CompactionStrategy compaction_strategy;
+ auto to_hold = store.start_compact_worst_buffers(compaction_spec, compaction_strategy);
+ std::vector<bool> filter(TreeStore::RefType::numBuffers());
+ for (auto buffer_id : to_hold) {
+ filter[buffer_id] = true;
+ }
+ for (auto& ref : refs) {
+ if (ref.valid() && filter[ref.buffer_id(entry_ref_offset_bits)]) {
+ move_refs.emplace_back(ref);
+ change_writer.emplace_back(ref);
+ }
+ }
+ store.move(move_refs);
+ change_writer.write(move_refs);
+ move_refs.clear();
store.finishCompact(to_hold);
inc_generation();
}
- EXPECT_NE(old_ref1, ref1);
- EXPECT_NE(old_ref2, ref2);
- EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(ref1));
- EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(ref2));
+ EXPECT_NE(ref1, refs[0]);
+ EXPECT_NE(ref2, refs[1]);
+ EXPECT_EQ(make_exp_sequence(4, 4 + sequence_length), get_sequence(refs[0]));
+ EXPECT_EQ(make_exp_sequence(5, 5 + sequence_length), get_sequence(refs[1]));
auto usage_after = store.getMemoryUsage();
EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
- store.clear(ref1);
- store.clear(ref2);
+ store.clear(refs[0]);
+ store.clear(refs[1]);
}
TEST_F(BTreeStoreTest, require_that_nodes_for_multiple_btrees_are_compacted)
{
auto &store = this->_store;
- EntryRef ref1 = add_sequence(4, 40);
- EntryRef ref2 = add_sequence(100, 130);
+ std::vector<EntryRef> refs;
+ refs.emplace_back(add_sequence(4, 40));
+ refs.emplace_back(add_sequence(100, 130));
store.clear(add_sequence(1000, 20000));
inc_generation();
auto usage_before = store.getMemoryUsage();
for (uint32_t pass = 0; pass < 15; ++pass) {
- auto to_hold = store.start_compact_worst_btree_nodes();
- store.move_btree_nodes(ref1);
- store.move_btree_nodes(ref2);
+ CompactionStrategy compaction_strategy;
+ auto to_hold = store.start_compact_worst_btree_nodes(compaction_strategy);
+ store.move_btree_nodes(refs);
store.finish_compact_worst_btree_nodes(to_hold);
inc_generation();
}
- EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(ref1));
- EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(ref2));
+ EXPECT_EQ(make_exp_sequence(4, 40), get_sequence(refs[0]));
+ EXPECT_EQ(make_exp_sequence(100, 130), get_sequence(refs[1]));
auto usage_after = store.getMemoryUsage();
EXPECT_GT(usage_before.deadBytes(), usage_after.deadBytes());
- store.clear(ref1);
- store.clear(ref2);
+ store.clear(refs[0]);
+ store.clear(refs[1]);
}
TEST_F(BTreeStoreTest, require_that_short_arrays_are_compacted)
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index 4af0b9672f2..bd4f4f8ee08 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -17,6 +17,7 @@
#include <vespa/vespalib/btree/btree.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/test/btree/btree_printer.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -24,6 +25,7 @@
LOG_SETUP("btree_test");
using vespalib::GenerationHandler;
+using vespalib::datastore::CompactionStrategy;
using vespalib::datastore::EntryRef;
namespace vespalib::btree {
@@ -1599,8 +1601,9 @@ TEST_F(BTreeTest, require_that_compaction_works)
auto memory_usage_before = t.getAllocator().getMemoryUsage();
t.foreach_key([&before_list](int key) { before_list.emplace_back(key); });
make_iterators(t, before_list, before_iterators);
+ CompactionStrategy compaction_strategy;
for (int i = 0; i < 15; ++i) {
- t.compact_worst();
+ t.compact_worst(compaction_strategy);
}
inc_generation(g, t);
auto memory_usage_after = t.getAllocator().getMemoryUsage();
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index dbd6d41f5e6..c58e357a9a1 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -3,6 +3,8 @@
#include <vespa/vespalib/test/datastore/buffer_stats.h>
#include <vespa/vespalib/test/datastore/memstats.h>
#include <vespa/vespalib/datastore/array_store.hpp>
+#include <vespa/vespalib/datastore/compaction_spec.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/test/insertion_operators.h>
@@ -124,7 +126,9 @@ struct Fixture
}
template <typename TestedRefType>
void compactWorst(bool compactMemory, bool compactAddressSpace) {
- ICompactionContext::UP ctx = store.compactWorst(compactMemory, compactAddressSpace);
+ CompactionSpec compaction_spec(compactMemory, compactAddressSpace);
+ CompactionStrategy compaction_strategy;
+ ICompactionContext::UP ctx = store.compactWorst(compaction_spec, compaction_strategy);
std::vector<TestedRefType> refs;
for (auto itr = refStore.begin(); itr != refStore.end(); ++itr) {
refs.emplace_back(itr->first);
diff --git a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
index 6e984f286c1..796e19a97d1 100644
--- a/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
+++ b/vespalib/src/tests/datastore/sharded_hash_map/sharded_hash_map_test.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/datastore/sharded_hash_map.h>
+#include <vespa/vespalib/datastore/entry_ref_filter.h>
#include <vespa/vespalib/datastore/i_compactable.h>
#include <vespa/vespalib/datastore/unique_store_allocator.h>
#include <vespa/vespalib/datastore/unique_store_comparator.h>
@@ -12,12 +13,14 @@
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/datastore/unique_store_allocator.hpp>
+#include <iostream>
#include <thread>
#include <vespa/log/log.h>
LOG_SETUP("vespalib_datastore_shared_hash_test");
using vespalib::datastore::EntryRef;
+using vespalib::datastore::EntryRefFilter;
using vespalib::datastore::ICompactable;
using RefT = vespalib::datastore::EntryRefT<22>;
using MyAllocator = vespalib::datastore::UniqueStoreAllocator<uint32_t, RefT>;
@@ -27,6 +30,26 @@ using MyHashMap = vespalib::datastore::ShardedHashMap;
using GenerationHandler = vespalib::GenerationHandler;
using vespalib::makeLambdaTask;
+constexpr uint32_t small_population = 50;
+/*
+ * large_population should trigger multiple callbacks from normalize_values
+ * and foreach_value
+ */
+constexpr uint32_t large_population = 1200;
+
+namespace vespalib::datastore {
+
+/*
+ * Print EntryRef as RefT which is used by test_normalize_values and
+ * test_foreach_value to differentiate between buffers
+ */
+void PrintTo(const EntryRef &ref, std::ostream* os) {
+ RefT iref(ref);
+ *os << "RefT(" << iref.offset() << "," << iref.bufferId() << ")";
+}
+
+}
+
namespace {
void consider_yield(uint32_t i)
@@ -58,6 +81,19 @@ public:
}
};
+uint32_t select_buffer(uint32_t i) {
+ if ((i % 2) == 0) {
+ return 0;
+ }
+ if ((i % 3) == 0) {
+ return 1;
+ }
+ if ((i % 5) == 0) {
+ return 2;
+ }
+ return 3;
+}
+
}
struct DataStoreShardedHashTest : public ::testing::Test
@@ -86,7 +122,11 @@ struct DataStoreShardedHashTest : public ::testing::Test
void read_work(uint32_t cnt);
void read_work();
void write_work(uint32_t cnt);
- void populate_sample_data();
+ void populate_sample_data(uint32_t cnt);
+ void populate_sample_values(uint32_t cnt);
+ void clear_sample_values(uint32_t cnt);
+ void test_normalize_values(bool use_filter, bool one_filter);
+ void test_foreach_value(bool one_filter);
};
@@ -213,13 +253,94 @@ DataStoreShardedHashTest::write_work(uint32_t cnt)
}
void
-DataStoreShardedHashTest::populate_sample_data()
+DataStoreShardedHashTest::populate_sample_data(uint32_t cnt)
{
- for (uint32_t i = 0; i < 50; ++i) {
+ for (uint32_t i = 0; i < cnt; ++i) {
insert(i);
}
}
+void
+DataStoreShardedHashTest::populate_sample_values(uint32_t cnt)
+{
+ for (uint32_t i = 0; i < cnt; ++i) {
+ MyCompare comp(_store, i);
+ auto result = _hash_map.find(comp, EntryRef());
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value());
+ result->second.store_relaxed(RefT(i + 200, select_buffer(i)));
+ }
+}
+
+void
+DataStoreShardedHashTest::clear_sample_values(uint32_t cnt)
+{
+ for (uint32_t i = 0; i < cnt; ++i) {
+ MyCompare comp(_store, i);
+ auto result = _hash_map.find(comp, EntryRef());
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value());
+ result->second.store_relaxed(EntryRef());
+ }
+}
+
+namespace {
+
+template <typename RefT>
+EntryRefFilter
+make_entry_ref_filter(bool one_filter)
+{
+ if (one_filter) {
+ EntryRefFilter filter(RefT::numBuffers(), RefT::offset_bits);
+ filter.add_buffer(3);
+ return filter;
+ }
+ return EntryRefFilter::create_all_filter(RefT::numBuffers(), RefT::offset_bits);
+}
+
+}
+
+void
+DataStoreShardedHashTest::test_normalize_values(bool use_filter, bool one_filter)
+{
+ populate_sample_data(large_population);
+ populate_sample_values(large_population);
+ if (use_filter) {
+ auto filter = make_entry_ref_filter<RefT>(one_filter);
+ EXPECT_TRUE(_hash_map.normalize_values([](std::vector<EntryRef> &refs) noexcept { for (auto &ref : refs) { RefT iref(ref); ref = RefT(iref.offset() + 300, iref.bufferId()); } }, filter));
+ } else {
+ EXPECT_TRUE(_hash_map.normalize_values([](EntryRef ref) noexcept { RefT iref(ref); return RefT(iref.offset() + 300, iref.bufferId()); }));
+ }
+ for (uint32_t i = 0; i < large_population; ++i) {
+ MyCompare comp(_store, i);
+ auto result = _hash_map.find(comp, EntryRef());
+ ASSERT_NE(result, nullptr);
+ EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value());
+ ASSERT_EQ(select_buffer(i), RefT(result->second.load_relaxed()).bufferId());
+ if (use_filter && one_filter && select_buffer(i) != 3) {
+ ASSERT_EQ(i + 200, RefT(result->second.load_relaxed()).offset());
+ } else {
+ ASSERT_EQ(i + 500, RefT(result->second.load_relaxed()).offset());
+ }
+ result->second.store_relaxed(EntryRef());
+ }
+}
+
+void
+DataStoreShardedHashTest::test_foreach_value(bool one_filter)
+{
+ populate_sample_data(large_population);
+ populate_sample_values(large_population);
+
+ auto filter = make_entry_ref_filter<RefT>(one_filter);
+ std::vector<EntryRef> exp_refs;
+ EXPECT_FALSE(_hash_map.normalize_values([&exp_refs](std::vector<EntryRef>& refs) { exp_refs.insert(exp_refs.end(), refs.begin(), refs.end()); }, filter));
+ std::vector<EntryRef> act_refs;
+ _hash_map.foreach_value([&act_refs](const std::vector<EntryRef> &refs) { act_refs.insert(act_refs.end(), refs.begin(), refs.end()); }, filter);
+ EXPECT_EQ(exp_refs, act_refs);
+ clear_sample_values(large_population);
+}
+
TEST_F(DataStoreShardedHashTest, single_threaded_reader_without_updates)
{
_report_work = true;
@@ -254,7 +375,7 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported)
EXPECT_EQ(0, initial_usage.deadBytes());
EXPECT_EQ(0, initial_usage.allocatedBytesOnHold());
auto guard = _generationHandler.takeGuard();
- for (uint32_t i = 0; i < 50; ++i) {
+ for (uint32_t i = 0; i < small_population; ++i) {
insert(i);
}
auto usage = _hash_map.get_memory_usage();
@@ -264,30 +385,31 @@ TEST_F(DataStoreShardedHashTest, memory_usage_is_reported)
TEST_F(DataStoreShardedHashTest, foreach_key_works)
{
- populate_sample_data();
+ populate_sample_data(small_population);
std::vector<uint32_t> keys;
_hash_map.foreach_key([this, &keys](EntryRef ref) { keys.emplace_back(_allocator.get_wrapped(ref).value()); });
std::sort(keys.begin(), keys.end());
- EXPECT_EQ(50, keys.size());
- for (uint32_t i = 0; i < 50; ++i) {
+ EXPECT_EQ(small_population, keys.size());
+ for (uint32_t i = 0; i < small_population; ++i) {
EXPECT_EQ(i, keys[i]);
}
}
TEST_F(DataStoreShardedHashTest, move_keys_works)
{
- populate_sample_data();
+ populate_sample_data(small_population);
std::vector<EntryRef> refs;
_hash_map.foreach_key([&refs](EntryRef ref) { refs.emplace_back(ref); });
std::vector<EntryRef> new_refs;
MyCompactable my_compactable(_allocator, new_refs);
- _hash_map.move_keys(my_compactable, std::vector<bool>(RefT::numBuffers(), true), RefT::offset_bits);
+ auto filter = make_entry_ref_filter<RefT>(false);
+ _hash_map.move_keys(my_compactable, filter);
std::vector<EntryRef> verify_new_refs;
_hash_map.foreach_key([&verify_new_refs](EntryRef ref) { verify_new_refs.emplace_back(ref); });
- EXPECT_EQ(50u, refs.size());
+ EXPECT_EQ(small_population, refs.size());
EXPECT_NE(refs, new_refs);
EXPECT_EQ(new_refs, verify_new_refs);
- for (uint32_t i = 0; i < 50; ++i) {
+ for (uint32_t i = 0; i < small_population; ++i) {
EXPECT_NE(refs[i], new_refs[i]);
auto value = _allocator.get_wrapped(refs[i]).value();
auto new_value = _allocator.get_wrapped(refs[i]).value();
@@ -297,29 +419,33 @@ TEST_F(DataStoreShardedHashTest, move_keys_works)
TEST_F(DataStoreShardedHashTest, normalize_values_works)
{
- populate_sample_data();
- for (uint32_t i = 0; i < 50; ++i) {
- MyCompare comp(_store, i);
- auto result = _hash_map.find(comp, EntryRef());
- ASSERT_NE(result, nullptr);
- EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value());
- result->second.store_relaxed(EntryRef(i + 200));
- }
- _hash_map.normalize_values([](EntryRef ref) noexcept { return EntryRef(ref.ref() + 300); });
- for (uint32_t i = 0; i < 50; ++i) {
- MyCompare comp(_store, i);
- auto result = _hash_map.find(comp, EntryRef());
- ASSERT_NE(result, nullptr);
- EXPECT_EQ(i, _allocator.get_wrapped(result->first.load_relaxed()).value());
- ASSERT_EQ(i + 500, result->second.load_relaxed().ref());
- result->second.store_relaxed(EntryRef());
- }
+ test_normalize_values(false, false);
+}
+
+TEST_F(DataStoreShardedHashTest, normalize_values_all_filter_works)
+{
+ test_normalize_values(true, false);
+}
+
+TEST_F(DataStoreShardedHashTest, normalize_values_one_filter_works)
+{
+ test_normalize_values(true, true);
+}
+
+TEST_F(DataStoreShardedHashTest, foreach_value_all_filter_works)
+{
+ test_foreach_value(false);
+}
+
+TEST_F(DataStoreShardedHashTest, foreach_value_one_filter_works)
+{
+ test_foreach_value(true);
}
TEST_F(DataStoreShardedHashTest, compact_worst_shard_works)
{
- populate_sample_data();
- for (uint32_t i = 10; i < 50; ++i) {
+ populate_sample_data(small_population);
+ for (uint32_t i = 10; i < small_population; ++i) {
remove(i);
}
commit();
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index ccb18f13871..917c91f2dff 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -1,4 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/datastore/compaction_spec.h>
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/datastore/unique_store.hpp>
#include <vespa/vespalib/datastore/unique_store_remapper.h>
#include <vespa/vespalib/datastore/unique_store_string_allocator.hpp>
@@ -111,7 +113,9 @@ struct TestBase : public ::testing::Test {
store.trimHoldLists(generation);
}
void compactWorst() {
- auto remapper = store.compact_worst(true, true);
+ CompactionSpec compaction_spec(true, true);
+ CompactionStrategy compaction_strategy;
+ auto remapper = store.compact_worst(compaction_spec, compaction_strategy);
std::vector<EntryRef> refs;
for (const auto &elem : refStore) {
refs.push_back(elem.first);
diff --git a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
index 8d82c10d340..4a8b7eafe6a 100644
--- a/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_dictionary/unique_store_dictionary_test.cpp
@@ -1,5 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/datastore/compaction_strategy.h>
#include <vespa/vespalib/datastore/unique_store.hpp>
#include <vespa/vespalib/datastore/unique_store_dictionary.hpp>
#include <vespa/vespalib/datastore/sharded_hash_map.h>
@@ -137,8 +138,9 @@ TYPED_TEST(UniqueStoreDictionaryTest, compaction_works)
this->inc_generation();
auto btree_memory_usage_before = this->dict.get_btree_memory_usage();
auto hash_memory_usage_before = this->dict.get_hash_memory_usage();
+ CompactionStrategy compaction_strategy;
for (uint32_t i = 0; i < 15; ++i) {
- this->dict.compact_worst(true, true);
+ this->dict.compact_worst(true, true, compaction_strategy);
}
this->inc_generation();
auto btree_memory_usage_after = this->dict.get_btree_memory_usage();
diff --git a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp
index e129ef2a389..812d06868fd 100644
--- a/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp
+++ b/vespalib/src/tests/net/tls/policy_checking_certificate_verifier/policy_checking_certificate_verifier_test.cpp
@@ -7,57 +7,93 @@
using namespace vespalib;
using namespace vespalib::net::tls;
-bool glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) {
- auto glob = CredentialMatchPattern::create_from_glob(pattern);
+bool dns_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) {
+ auto glob = CredentialMatchPattern::create_from_dns_glob(pattern);
return glob->matches(string_to_check);
}
+bool uri_glob_matches(vespalib::stringref pattern, vespalib::stringref string_to_check) {
+ auto glob = CredentialMatchPattern::create_from_uri_glob(pattern);
+ return glob->matches(string_to_check);
+}
+
+void verify_all_glob_types_match(vespalib::stringref pattern, vespalib::stringref string_to_check) {
+ EXPECT_TRUE(dns_glob_matches(pattern, string_to_check));
+ EXPECT_TRUE(uri_glob_matches(pattern, string_to_check));
+}
+
+void verify_all_glob_types_mismatch(vespalib::stringref pattern, vespalib::stringref string_to_check) {
+ EXPECT_FALSE(dns_glob_matches(pattern, string_to_check));
+ EXPECT_FALSE(uri_glob_matches(pattern, string_to_check));
+}
+
TEST("glob without wildcards matches entire string") {
- EXPECT_TRUE(glob_matches("foo", "foo"));
- EXPECT_FALSE(glob_matches("foo", "fooo"));
- EXPECT_FALSE(glob_matches("foo", "ffoo"));
+ verify_all_glob_types_match("foo", "foo");
+ verify_all_glob_types_mismatch("foo", "fooo");
+ verify_all_glob_types_mismatch("foo", "ffoo");
}
TEST("wildcard glob can match prefix") {
- EXPECT_TRUE(glob_matches("foo*", "foo"));
- EXPECT_TRUE(glob_matches("foo*", "foobar"));
- EXPECT_FALSE(glob_matches("foo*", "ffoo"));
+ verify_all_glob_types_match("foo*", "foo");
+ verify_all_glob_types_match("foo*", "foobar");
+ verify_all_glob_types_mismatch("foo*", "ffoo");
}
TEST("wildcard glob can match suffix") {
- EXPECT_TRUE(glob_matches("*foo", "foo"));
- EXPECT_TRUE(glob_matches("*foo", "ffoo"));
- EXPECT_FALSE(glob_matches("*foo", "fooo"));
+ verify_all_glob_types_match("*foo", "foo");
+ verify_all_glob_types_match("*foo", "ffoo");
+ verify_all_glob_types_mismatch("*foo", "fooo");
}
TEST("wildcard glob can match substring") {
- EXPECT_TRUE(glob_matches("f*o", "fo"));
- EXPECT_TRUE(glob_matches("f*o", "foo"));
- EXPECT_TRUE(glob_matches("f*o", "ffoo"));
- EXPECT_FALSE(glob_matches("f*o", "boo"));
+ verify_all_glob_types_match("f*o", "fo");
+ verify_all_glob_types_match("f*o", "foo");
+ verify_all_glob_types_match("f*o", "ffoo");
+ verify_all_glob_types_mismatch("f*o", "boo");
}
-TEST("wildcard glob does not cross multiple dot delimiter boundaries") {
- EXPECT_TRUE(glob_matches("*.bar.baz", "foo.bar.baz"));
- EXPECT_TRUE(glob_matches("*.bar.baz", ".bar.baz"));
- EXPECT_FALSE(glob_matches("*.bar.baz", "zoid.foo.bar.baz"));
- EXPECT_TRUE(glob_matches("foo.*.baz", "foo.bar.baz"));
- EXPECT_FALSE(glob_matches("foo.*.baz", "foo.bar.zoid.baz"));
+TEST("single char DNS glob matches single character") {
+ EXPECT_TRUE(dns_glob_matches("f?o", "foo"));
+ EXPECT_FALSE(dns_glob_matches("f?o", "fooo"));
+ EXPECT_FALSE(dns_glob_matches("f?o", "ffoo"));
}
-TEST("single char glob matches non dot characters") {
- EXPECT_TRUE(glob_matches("f?o", "foo"));
- EXPECT_FALSE(glob_matches("f?o", "fooo"));
- EXPECT_FALSE(glob_matches("f?o", "ffoo"));
- EXPECT_FALSE(glob_matches("f?o", "f.o"));
+// Due to URIs being able to contain '?' characters as a query separator, don't use it for wildcarding.
+TEST("URI glob matching treats question mark character as literal match") {
+ EXPECT_TRUE(uri_glob_matches("f?o", "f?o"));
+ EXPECT_FALSE(uri_glob_matches("f?o", "foo"));
+ EXPECT_FALSE(uri_glob_matches("f?o", "f?oo"));
+}
+
+TEST("wildcard DNS glob does not cross multiple dot delimiter boundaries") {
+ EXPECT_TRUE(dns_glob_matches("*.bar.baz", "foo.bar.baz"));
+ EXPECT_TRUE(dns_glob_matches("*.bar.baz", ".bar.baz"));
+ EXPECT_FALSE(dns_glob_matches("*.bar.baz", "zoid.foo.bar.baz"));
+ EXPECT_TRUE(dns_glob_matches("foo.*.baz", "foo.bar.baz"));
+ EXPECT_FALSE(dns_glob_matches("foo.*.baz", "foo.bar.zoid.baz"));
+}
+
+TEST("wildcard URI glob does not cross multiple fwd slash delimiter boundaries") {
+ EXPECT_TRUE(uri_glob_matches("*/bar/baz", "foo/bar/baz"));
+ EXPECT_TRUE(uri_glob_matches("*/bar/baz", "/bar/baz"));
+ EXPECT_FALSE(uri_glob_matches("*/bar/baz", "bar/baz"));
+ EXPECT_FALSE(uri_glob_matches("*/bar/baz", "/bar/baz/"));
+ EXPECT_FALSE(uri_glob_matches("*/bar/baz", "zoid/foo/bar/baz"));
+ EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar/baz"));
+ EXPECT_FALSE(uri_glob_matches("foo/*/baz", "foo/bar/zoid/baz"));
+ EXPECT_TRUE(uri_glob_matches("foo/*/baz", "foo/bar.zoid/baz")); // No special handling of dots
+}
+
+TEST("single char DNS glob matches non dot characters only") {
+ EXPECT_FALSE(dns_glob_matches("f?o", "f.o"));
}
TEST("special basic regex characters are escaped") {
- EXPECT_TRUE(glob_matches("$[.\\^", "$[.\\^"));
+ verify_all_glob_types_match("$[.\\^", "$[.\\^");
}
TEST("special extended regex characters are ignored") {
- EXPECT_TRUE(glob_matches("{)(+|]}", "{)(+|]}"));
+ verify_all_glob_types_match("{)(+|]}", "{)(+|]}");
}
// TODO CN + SANs
@@ -116,7 +152,7 @@ TEST("DNS SAN requirement without glob pattern is matched as exact string") {
EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"hello.world.bar"}})));
}
-TEST("DNS SAN requirement can include glob wildcards") {
+TEST("DNS SAN requirement can include glob wildcards, delimited by dot character") {
auto authorized = authorized_peers({policy_with({required_san_dns("*.w?rld")})});
EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"hello.world"}})));
EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"greetings.w0rld"}})));
@@ -124,8 +160,8 @@ TEST("DNS SAN requirement can include glob wildcards") {
EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"world"}})));
}
-// FIXME make this RFC 2459-compliant with subdomain matching, case insensitity for host etc
-TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violation of RFC 2459") {
+// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host
+TEST("URI SAN requirement without glob pattern is matched as exact string") {
auto authorized = authorized_peers({policy_with({required_san_uri("foo://bar.baz/zoid")})});
EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoid"}})));
EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://bar.baz/zoi"}})));
@@ -136,6 +172,25 @@ TEST("URI SAN requirement is matched as exact string in cheeky, pragmatic violat
EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"foo://BAR.baz/zoid"}})));
}
+// TODO consider making this RFC 2459-compliant with case insensitivity for scheme and host
+TEST("URI SAN requirement can include glob wildcards, delimited by fwd slash character") {
+ auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/*/uri")})});
+ EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uri"}})));
+ EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/really.cool/uri"}}))); // Not delimited by dots
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"theirscheme://my/cool/uri"}})));
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://their/cool/uri"}})));
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/cool/uris"}})));
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/swag/uri/"}})));
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/uri"}})));
+}
+
+TEST("URI SAN requirement can include query part even though it's rather silly to do so") {
+ auto authorized = authorized_peers({policy_with({required_san_uri("myscheme://my/fancy/*?magic")})});
+ EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/uri?magic"}})));
+ EXPECT_TRUE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/?magic"}})));
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"myscheme://my/fancy/urimagic"}})));
+}
+
TEST("multi-SAN policy requires all SANs to be present in certificate") {
auto authorized = authorized_peers({policy_with({required_san_dns("hello.world"),
required_san_dns("foo.bar"),
@@ -157,6 +212,13 @@ TEST("wildcard DNS SAN in certificate is not treated as a wildcard match by poli
EXPECT_FALSE(verify(authorized, creds_with_dns_sans({{"*.world"}})));
}
+TEST("wildcard URI SAN in certificate is not treated as a wildcard match by policy") {
+ auto authorized = authorized_peers({policy_with({required_san_uri("hello://world")})});
+ EXPECT_FALSE(verify(authorized, creds_with_uri_sans({{"hello://*"}})));
+}
+
+// TODO this is just by coincidence since we match '*' as any other character, not because we interpret
+// the wildcard in the SAN as anything special during matching. Consider if we need/want to handle explicitly.
TEST("wildcard DNS SAN in certificate is still matched by wildcard policy SAN") {
auto authorized = authorized_peers({policy_with({required_san_dns("*.world")})});
EXPECT_TRUE(verify(authorized, creds_with_dns_sans({{"*.world"}})));
diff --git a/vespalib/src/tests/thread/thread_test.cpp b/vespalib/src/tests/thread/thread_test.cpp
index 43951b4b734..ee4f97c34cc 100644
--- a/vespalib/src/tests/thread/thread_test.cpp
+++ b/vespalib/src/tests/thread/thread_test.cpp
@@ -6,6 +6,8 @@
using namespace vespalib;
+VESPA_THREAD_STACK_TAG(test_agent_thread);
+
struct Agent : public Runnable {
bool started;
int loopCnt;
@@ -22,7 +24,7 @@ struct Agent : public Runnable {
TEST("thread never started") {
Agent agent;
{
- Thread thread(agent);
+ Thread thread(agent, test_agent_thread);
}
EXPECT_TRUE(!agent.started);
EXPECT_EQUAL(0, agent.loopCnt);
@@ -31,7 +33,7 @@ TEST("thread never started") {
TEST("normal operation") {
Agent agent;
{
- Thread thread(agent);
+ Thread thread(agent, test_agent_thread);
thread.start();
std::this_thread::sleep_for(20ms);
thread.stop().join();
@@ -43,7 +45,7 @@ TEST("normal operation") {
TEST("stop before start") {
Agent agent;
{
- Thread thread(agent);
+ Thread thread(agent, test_agent_thread);
thread.stop();
thread.start();
thread.join();
diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
index 9ad0e95667b..cf84ab03a25 100644
--- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
+++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
@@ -19,19 +19,14 @@ assertUsage(const MemoryUsage & exp, const MemoryUsage & act)
TEST("test generation holder")
{
- typedef std::unique_ptr<int32_t> IntPtr;
GenerationHolder gh;
- gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t),
- IntPtr(new int32_t(0)))));
+ gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 0));
gh.transferHoldLists(0);
- gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t),
- IntPtr(new int32_t(1)))));
+ gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 1));
gh.transferHoldLists(1);
- gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t),
- IntPtr(new int32_t(2)))));
+ gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 2));
gh.transferHoldLists(2);
- gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t),
- IntPtr(new int32_t(4)))));
+ gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 4));
gh.transferHoldLists(4);
EXPECT_EQUAL(4u * sizeof(int32_t), gh.getHeldBytes());
gh.trimHoldLists(0);
@@ -40,8 +35,7 @@ TEST("test generation holder")
EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes());
gh.trimHoldLists(2);
EXPECT_EQUAL(2u * sizeof(int32_t), gh.getHeldBytes());
- gh.hold(GenerationHeldBase::UP(new RcuVectorHeld<int32_t>(sizeof(int32_t),
- IntPtr(new int32_t(6)))));
+ gh.hold(std::make_unique<RcuVectorHeld<int32_t>>(sizeof(int32_t), 6));
gh.transferHoldLists(6);
EXPECT_EQUAL(3u * sizeof(int32_t), gh.getHeldBytes());
gh.trimHoldLists(6);
diff --git a/vespalib/src/vespa/vespalib/btree/btree.h b/vespalib/src/vespa/vespalib/btree/btree.h
index 2b03e70fbdf..f87d5751743 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.h
+++ b/vespalib/src/vespa/vespalib/btree/btree.h
@@ -6,6 +6,8 @@
#include "noaggrcalc.h"
#include <vespa/vespalib/util/generationhandler.h>
+namespace vespalib::datastore { class CompactionStrategy; }
+
namespace vespalib::btree {
/**
@@ -149,7 +151,7 @@ public:
_tree.thaw(itr);
}
- void compact_worst();
+ void compact_worst(const datastore::CompactionStrategy& compaction_strategy);
template <typename FunctionType>
void
diff --git a/vespalib/src/vespa/vespalib/btree/btree.hpp b/vespalib/src/vespa/vespalib/btree/btree.hpp
index c4a588bc63e..473d1f4735e 100644
--- a/vespalib/src/vespa/vespalib/btree/btree.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btree.hpp
@@ -26,9 +26,9 @@ BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::~BTree()
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, class AggrCalcT>
void
-BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst()
+BTree<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::compact_worst(const datastore::CompactionStrategy& compaction_strategy)
{
- auto to_hold = _alloc.start_compact_worst();
+ auto to_hold = _alloc.start_compact_worst(compaction_strategy);
_tree.move_nodes(_alloc);
_alloc.finishCompact(to_hold);
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
index 325ce0e0e47..30123b1946e 100644
--- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h
@@ -113,6 +113,9 @@ public:
return _node->getData(_idx);
}
+ // Only use during compaction when changing reference to moved value
+ DataType &getWData() { return getWNode()->getWData(_idx); }
+
bool
valid() const
{
@@ -881,6 +884,9 @@ public:
_leaf.getWNode()->writeData(_leaf.getIdx(), data);
}
+ // Only use during compaction when changing reference to moved value
+ DataType &getWData() { return _leaf.getWData(); }
+
/**
* Set a new key for the current iterator position.
* The new key must have the same semantic meaning as the old key.
diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.h b/vespalib/src/vespa/vespalib/btree/btreenode.h
index d8752d77f0b..468f17fcd1a 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenode.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenode.h
@@ -99,6 +99,8 @@ public:
}
const DataT &getData(uint32_t idx) const { return _data[idx]; }
+ // Only use during compaction when changing reference to moved value
+ DataT &getWData(uint32_t idx) { return _data[idx]; }
void setData(uint32_t idx, const DataT &data) { _data[idx] = data; }
static bool hasData() { return true; }
};
@@ -120,6 +122,9 @@ public:
return BTreeNoLeafData::_instance;
}
+ // Only use during compaction when changing reference to moved value
+ BTreeNoLeafData &getWData(uint32_t) const { return BTreeNoLeafData::_instance; }
+
void setData(uint32_t idx, const BTreeNoLeafData &data) {
(void) idx;
(void) data;
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index 93615ddef82..27e73b3a2b6 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -29,6 +29,7 @@ public:
using BTreeRootBaseType = BTreeRootBase<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>;
using generation_t = vespalib::GenerationHandler::generation_t;
using NodeStore = BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>;
+ using CompactionStrategy = datastore::CompactionStrategy;
using EntryRef = datastore::EntryRef;
using DataStoreBase = datastore::DataStoreBase;
@@ -165,7 +166,7 @@ public:
bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); }
std::vector<uint32_t> startCompact() { return _nodeStore.startCompact(); }
- std::vector<uint32_t> start_compact_worst() { return _nodeStore.start_compact_worst(); }
+ std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); }
void finishCompact(const std::vector<uint32_t> &toHold) {
return _nodeStore.finishCompact(toHold);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index 70a9ba6c73c..444bf641899 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -56,6 +56,7 @@ public:
typedef typename LeafNodeType::RefPair LeafNodeTypeRefPair;
typedef vespalib::GenerationHandler::generation_t generation_t;
using EntryRef = datastore::EntryRef;
+ using CompactionStrategy = datastore::CompactionStrategy;
enum NodeTypes
{
@@ -159,7 +160,7 @@ public:
std::vector<uint32_t> startCompact();
- std::vector<uint32_t> start_compact_worst();
+ std::vector<uint32_t> start_compact_worst(const CompactionStrategy& compaction_strategy);
void finishCompact(const std::vector<uint32_t> &toHold);
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
index ff4fa59cd74..91953507eb0 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.hpp
@@ -3,6 +3,7 @@
#pragma once
#include "btreenodestore.h"
+#include <vespa/vespalib/datastore/compaction_spec.h>
#include <vespa/vespalib/datastore/datastore.hpp>
namespace vespalib::btree {
@@ -71,9 +72,9 @@ template <typename KeyT, typename DataT, typename AggrT,
size_t INTERNAL_SLOTS, size_t LEAF_SLOTS>
std::vector<uint32_t>
BTreeNodeStore<KeyT, DataT, AggrT, INTERNAL_SLOTS, LEAF_SLOTS>::
-start_compact_worst()
+start_compact_worst(const CompactionStrategy &compaction_strategy)
{
- return _store.startCompactWorstBuffers(true, false);
+ return _store.startCompactWorstBuffers(datastore::CompactionSpec(true, false), compaction_strategy);
}
template <typename KeyT, typename DataT, typename AggrT,
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index 82913987e44..a79259c6e57 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -49,6 +49,8 @@ public:
TraitsT::INTERNAL_SLOTS,
TraitsT::LEAF_SLOTS,
AggrCalcT> Builder;
+ using CompactionSpec = datastore::CompactionSpec;
+ using CompactionStrategy = datastore::CompactionStrategy;
using EntryRef = datastore::EntryRef;
template <typename EntryType>
using BufferType = datastore::BufferType<EntryType>;
@@ -298,6 +300,9 @@ public:
bool
isSmallArray(const EntryRef ref) const;
+ static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; }
+ bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); }
+
/**
* Returns the cluster size for the type id.
* Cluster size == 0 means we have a tree for the given reference.
@@ -389,12 +394,12 @@ public:
void
foreach_frozen(EntryRef ref, FunctionType func) const;
- std::vector<uint32_t> start_compact_worst_btree_nodes();
+ std::vector<uint32_t> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
void finish_compact_worst_btree_nodes(const std::vector<uint32_t>& to_hold);
- void move_btree_nodes(EntryRef ref);
+ void move_btree_nodes(const std::vector<EntryRef>& refs);
- std::vector<uint32_t> start_compact_worst_buffers();
- EntryRef move(EntryRef ref);
+ std::vector<uint32_t> start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
+ void move(std::vector<EntryRef>& refs);
private:
static constexpr size_t MIN_BUFFER_ARRAYS = 128u;
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index 15c546a0368..c0985ff8f94 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -5,6 +5,7 @@
#include "btreestore.h"
#include "btreebuilder.h"
#include "btreebuilder.hpp"
+#include <vespa/vespalib/datastore/compaction_spec.h>
#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/util/optimized.h>
@@ -972,10 +973,10 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
std::vector<uint32_t>
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-start_compact_worst_btree_nodes()
+start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy)
{
_builder.clear();
- return _allocator.start_compact_worst();
+ return _allocator.start_compact_worst(compaction_strategy);
}
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
@@ -991,15 +992,15 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
void
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-move_btree_nodes(EntryRef ref)
+move_btree_nodes(const std::vector<EntryRef>& refs)
{
- if (ref.valid()) {
+ for (auto& ref : refs) {
RefType iRef(ref);
- uint32_t clusterSize = getClusterSize(iRef);
- if (clusterSize == 0) {
- BTreeType *tree = getWTreeEntry(iRef);
- tree->move_nodes(_allocator);
- }
+ assert(iRef.valid());
+ uint32_t typeId = getTypeId(iRef);
+ assert(isBTree(typeId));
+ BTreeType *tree = getWTreeEntry(iRef);
+ tree->move_nodes(_allocator);
}
}
@@ -1007,31 +1008,33 @@ template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
std::vector<uint32_t>
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-start_compact_worst_buffers()
+start_compact_worst_buffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
freeze();
- return _store.startCompactWorstBuffers(true, false);
+ return _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
}
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
typename TraitsT, typename AggrCalcT>
-typename BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::EntryRef
+void
BTreeStore<KeyT, DataT, AggrT, CompareT, TraitsT, AggrCalcT>::
-move(EntryRef ref)
+move(std::vector<EntryRef> &refs)
{
- if (!ref.valid() || !_store.getCompacting(ref)) {
- return ref;
- }
- RefType iRef(ref);
- uint32_t clusterSize = getClusterSize(iRef);
- if (clusterSize == 0) {
- BTreeType *tree = getWTreeEntry(iRef);
- auto ref_and_ptr = allocBTreeCopy(*tree);
- tree->prepare_hold();
- return ref_and_ptr.ref;
+ for (auto& ref : refs) {
+ RefType iRef(ref);
+ assert(iRef.valid());
+ assert(_store.getCompacting(iRef));
+ uint32_t clusterSize = getClusterSize(iRef);
+ if (clusterSize == 0) {
+ BTreeType *tree = getWTreeEntry(iRef);
+ auto ref_and_ptr = allocBTreeCopy(*tree);
+ tree->prepare_hold();
+ ref = ref_and_ptr.ref;
+ } else {
+ const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
+ ref = allocKeyDataCopy(shortArray, clusterSize).ref;
+ }
}
- const KeyDataType *shortArray = getKeyDataEntry(iRef, clusterSize);
- return allocKeyDataCopy(shortArray, clusterSize).ref;
}
}
diff --git a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
index 6c6f5258555..c36077e4dd0 100644
--- a/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/datastore/CMakeLists.txt
@@ -5,9 +5,11 @@ vespa_add_library(vespalib_vespalib_datastore OBJECT
array_store_config.cpp
buffer_type.cpp
bufferstate.cpp
+ compaction_strategy.cpp
datastore.cpp
datastorebase.cpp
entryref.cpp
+ entry_ref_filter.cpp
fixed_size_hash_map.cpp
sharded_hash_map.cpp
unique_store.cpp
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h
index 3ba0caae5b9..d9b62c310b5 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.h
@@ -96,7 +96,7 @@ public:
}
void remove(EntryRef ref);
- ICompactionContext::UP compactWorst(bool compactMemory, bool compactAddressSpace);
+ ICompactionContext::UP compactWorst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
vespalib::MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
/**
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index 5600c64eb3d..bbbd52c354d 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -3,6 +3,8 @@
#pragma once
#include "array_store.h"
+#include "compaction_spec.h"
+#include "entry_ref_filter.h"
#include "datastore.hpp"
#include <atomic>
#include <algorithm>
@@ -127,47 +129,38 @@ private:
DataStoreBase &_dataStore;
ArrayStoreType &_store;
std::vector<uint32_t> _bufferIdsToCompact;
+ EntryRefFilter _filter;
- bool compactingBuffer(uint32_t bufferId) {
- return std::find(_bufferIdsToCompact.begin(), _bufferIdsToCompact.end(),
- bufferId) != _bufferIdsToCompact.end();
- }
public:
CompactionContext(DataStoreBase &dataStore,
ArrayStoreType &store,
std::vector<uint32_t> bufferIdsToCompact)
: _dataStore(dataStore),
_store(store),
- _bufferIdsToCompact(std::move(bufferIdsToCompact))
- {}
+ _bufferIdsToCompact(std::move(bufferIdsToCompact)),
+ _filter(RefT::numBuffers(), RefT::offset_bits)
+ {
+ _filter.add_buffers(_bufferIdsToCompact);
+ }
~CompactionContext() override {
_dataStore.finishCompact(_bufferIdsToCompact);
}
void compact(vespalib::ArrayRef<EntryRef> refs) override {
- if (!_bufferIdsToCompact.empty()) {
- for (auto &ref : refs) {
- if (ref.valid()) {
- RefT internalRef(ref);
- if (compactingBuffer(internalRef.bufferId())) {
- EntryRef newRef = _store.add(_store.get(ref));
- std::atomic_thread_fence(std::memory_order_release);
- ref = newRef;
- }
- }
+ for (auto &ref : refs) {
+ if (ref.valid() && _filter.has(ref)) {
+ EntryRef newRef = _store.add(_store.get(ref));
+ std::atomic_thread_fence(std::memory_order_release);
+ ref = newRef;
}
}
}
void compact(vespalib::ArrayRef<AtomicEntryRef> refs) override {
- if (!_bufferIdsToCompact.empty()) {
- for (auto &ref : refs) {
- if (ref.load_relaxed().valid()) {
- RefT internalRef(ref.load_relaxed());
- if (compactingBuffer(internalRef.bufferId())) {
- EntryRef newRef = _store.add(_store.get(ref.load_relaxed()));
- std::atomic_thread_fence(std::memory_order_release);
- ref.store_release(newRef);
- }
- }
+ for (auto &atomic_entry_ref : refs) {
+ auto ref = atomic_entry_ref.load_relaxed();
+ if (ref.valid() && _filter.has(ref)) {
+ EntryRef newRef = _store.add(_store.get(ref));
+ std::atomic_thread_fence(std::memory_order_release);
+ atomic_entry_ref.store_release(newRef);
}
}
}
@@ -177,9 +170,9 @@ public:
template <typename EntryT, typename RefT>
ICompactionContext::UP
-ArrayStore<EntryT, RefT>::compactWorst(bool compactMemory, bool compactAddressSpace)
+ArrayStore<EntryT, RefT>::compactWorst(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy)
{
- std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compactMemory, compactAddressSpace);
+ std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
return std::make_unique<arraystore::CompactionContext<EntryT, RefT>>
(_store, *this, std::move(bufferIdsToCompact));
}
diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_spec.h b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h
new file mode 100644
index 00000000000..b346df68452
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_spec.h
@@ -0,0 +1,29 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib::datastore {
+
+/*
+ * Class describing how to compact a compactable data structure.
+ *
+ * memory - to reduce amount of "dead" memory
+ * address_space - to avoid running out of free buffers in data store
+ * (i.e. move data from small buffers to larger buffers)
+ */
+class CompactionSpec
+{
+ bool _compact_memory;
+ bool _compact_address_space;
+public:
+ CompactionSpec(bool compact_memory_, bool compact_address_space_) noexcept
+ : _compact_memory(compact_memory_),
+ _compact_address_space(compact_address_space_)
+ {
+ }
+ bool compact() const noexcept { return _compact_memory || _compact_address_space; }
+ bool compact_memory() const noexcept { return _compact_memory; }
+ bool compact_address_space() const noexcept { return _compact_address_space; }
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp
new file mode 100644
index 00000000000..2dbd501f78e
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.cpp
@@ -0,0 +1,37 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "compaction_strategy.h"
+#include "compaction_spec.h"
+#include <vespa/vespalib/util/memoryusage.h>
+#include <vespa/vespalib/util/address_space.h>
+#include <iostream>
+
+namespace vespalib::datastore {
+
+bool
+CompactionStrategy::should_compact_memory(const MemoryUsage& memory_usage) const
+{
+ return should_compact_memory(memory_usage.usedBytes(), memory_usage.deadBytes());
+}
+
+bool
+CompactionStrategy::should_compact_address_space(const AddressSpace& address_space) const
+{
+ return should_compact_address_space(address_space.used(), address_space.dead());
+}
+
+CompactionSpec
+CompactionStrategy::should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const
+{
+ return CompactionSpec(should_compact_memory(memory_usage), should_compact_address_space(address_space));
+}
+
+std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy)
+{
+ os << "{maxDeadBytesRatio=" << compaction_strategy.getMaxDeadBytesRatio() <<
+ ", maxDeadAddressSpaceRatio=" << compaction_strategy.getMaxDeadAddressSpaceRatio() <<
+ "}";
+ return os;
+}
+
+}
diff --git a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h
index ced28436471..df7ca1657cb 100644
--- a/searchcommon/src/vespa/searchcommon/common/compaction_strategy.h
+++ b/vespalib/src/vespa/vespalib/datastore/compaction_strategy.h
@@ -4,16 +4,36 @@
#include <iosfwd>
-namespace search {
+namespace vespalib {
+
+class AddressSpace;
+class MemoryUsage;
+
+}
+
+namespace vespalib::datastore {
+
+class CompactionSpec;
/*
* Class describing compaction strategy for a compactable data structure.
*/
class CompactionStrategy
{
+public:
+ static constexpr size_t DEAD_BYTES_SLACK = 0x10000u;
+ static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u;
private:
double _maxDeadBytesRatio; // Max ratio of dead bytes before compaction
double _maxDeadAddressSpaceRatio; // Max ratio of dead address space before compaction
+ bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const {
+ return ((dead_bytes >= DEAD_BYTES_SLACK) &&
+ (dead_bytes > used_bytes * getMaxDeadBytesRatio()));
+ }
+ bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const {
+ return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) &&
+ (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio()));
+ }
public:
CompactionStrategy() noexcept
: _maxDeadBytesRatio(0.05),
@@ -33,21 +53,11 @@ public:
}
bool operator!=(const CompactionStrategy & rhs) const { return !(operator==(rhs)); }
- static constexpr size_t DEAD_BYTES_SLACK = 0x10000u;
-
- bool should_compact_memory(size_t used_bytes, size_t dead_bytes) const {
- return ((dead_bytes >= DEAD_BYTES_SLACK) &&
- (dead_bytes > used_bytes * getMaxDeadBytesRatio()));
- }
-
- static constexpr size_t DEAD_ADDRESS_SPACE_SLACK = 0x10000u;
-
- bool should_compact_address_space(size_t used_address_space, size_t dead_address_space) const {
- return ((dead_address_space >= DEAD_ADDRESS_SPACE_SLACK) &&
- (dead_address_space > used_address_space * getMaxDeadAddressSpaceRatio()));
- }
+ bool should_compact_memory(const MemoryUsage& memory_usage) const;
+ bool should_compact_address_space(const AddressSpace& address_space) const;
+ CompactionSpec should_compact(const MemoryUsage& memory_usage, const AddressSpace& address_space) const;
};
std::ostream& operator<<(std::ostream& os, const CompactionStrategy& compaction_strategy);
-} // namespace search
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index b5cab50bc33..059171e1f02 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "datastore.h"
+#include "compaction_spec.h"
#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/util/stringfmt.h>
#include <limits>
@@ -526,8 +527,9 @@ DataStoreBase::markCompacting(uint32_t bufferId)
}
std::vector<uint32_t>
-DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace)
+DataStoreBase::startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
+ (void) compaction_strategy;
constexpr uint32_t noBufferId = std::numeric_limits<uint32_t>::max();
uint32_t worstMemoryBufferId = noBufferId;
uint32_t worstAddressSpaceBufferId = noBufferId;
@@ -540,11 +542,11 @@ DataStoreBase::startCompactWorstBuffers(bool compactMemory, bool compactAddressS
uint32_t arraySize = typeHandler->getArraySize();
uint32_t reservedElements = typeHandler->getReservedElements(bufferId);
size_t deadElems = state.getDeadElems() - reservedElements;
- if (compactMemory && deadElems > worstDeadElems) {
+ if (compaction_spec.compact_memory() && deadElems > worstDeadElems) {
worstMemoryBufferId = bufferId;
worstDeadElems = deadElems;
}
- if (compactAddressSpace) {
+ if (compaction_spec.compact_address_space()) {
size_t deadArrays = deadElems / arraySize;
if (deadArrays > worstDeadArrays) {
worstAddressSpaceBufferId = bufferId;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 6903ae12c9c..e98d9531806 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -12,6 +12,9 @@
namespace vespalib::datastore {
+class CompactionSpec;
+class CompactionStrategy;
+
/**
* Abstract class used to store data of potential different types in underlying memory buffers.
*
@@ -368,7 +371,7 @@ public:
}
uint32_t startCompactWorstBuffer(uint32_t typeId);
- std::vector<uint32_t> startCompactWorstBuffers(bool compactMemory, bool compactAddressSpace);
+ std::vector<uint32_t> startCompactWorstBuffers(CompactionSpec compaction_spec, const CompactionStrategy &compaction_strategy);
uint64_t get_compaction_count() const { return _compaction_count.load(std::memory_order_relaxed); }
void inc_compaction_count() const { ++_compaction_count; }
bool has_held_buffers() const noexcept { return _hold_buffer_count != 0u; }
diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp
new file mode 100644
index 00000000000..87c3c87636c
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.cpp
@@ -0,0 +1,28 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "entry_ref_filter.h"
+
+namespace vespalib::datastore {
+
+EntryRefFilter::EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits)
+ : _filter(std::move(filter)),
+ _offset_bits(offset_bits)
+{
+}
+
+EntryRefFilter::EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits)
+ : _filter(num_buffers),
+ _offset_bits(offset_bits)
+{
+}
+
+EntryRefFilter::~EntryRefFilter() = default;
+
+EntryRefFilter
+EntryRefFilter::create_all_filter(uint32_t num_buffers, uint32_t offset_bits)
+{
+ std::vector<bool> filter(num_buffers, true);
+ return EntryRefFilter(std::move(filter), offset_bits);
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h
new file mode 100644
index 00000000000..c06d843fbd0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/datastore/entry_ref_filter.h
@@ -0,0 +1,35 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "entryref.h"
+#include <vector>
+
+namespace vespalib::datastore {
+
+/*
+ * Class to filter entry refs based on which buffer the entry is referencing.
+ *
+ * Buffers being allowed have corresponding bit in _filter set.
+ */
+class EntryRefFilter {
+ std::vector<bool> _filter;
+ uint32_t _offset_bits;
+ EntryRefFilter(std::vector<bool> filter, uint32_t offset_bits);
+public:
+ EntryRefFilter(uint32_t num_buffers, uint32_t offset_bits);
+ ~EntryRefFilter();
+ bool has(EntryRef ref) const {
+ uint32_t buffer_id = ref.buffer_id(_offset_bits);
+ return _filter[buffer_id];
+ }
+ void add_buffer(uint32_t buffer_id) { _filter[buffer_id] = true; }
+ void add_buffers(const std::vector<uint32_t>& ids) {
+ for (auto buffer_id : ids) {
+ _filter[buffer_id] = true;
+ }
+ }
+ static EntryRefFilter create_all_filter(uint32_t num_buffers, uint32_t offset_bits);
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
index db9fee8ea70..6f001ce3c94 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.cpp
@@ -2,6 +2,7 @@
#include "fixed_size_hash_map.h"
#include "entry_comparator.h"
+#include "entry_ref_filter.h"
#include "i_compactable.h"
#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/util/memoryusage.h>
@@ -182,7 +183,7 @@ FixedSizeHashMap::foreach_key(const std::function<void(EntryRef)>& callback) con
}
void
-FixedSizeHashMap::move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits)
+FixedSizeHashMap::move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers)
{
for (auto& chain_head : _chain_heads) {
uint32_t node_idx = chain_head.load_relaxed();
@@ -190,8 +191,7 @@ FixedSizeHashMap::move_keys(ICompactable& compactable, const std::vector<bool>&
auto& node = _nodes[node_idx];
EntryRef old_ref = node.get_kv().first.load_relaxed();
assert(old_ref.valid());
- uint32_t buffer_id = old_ref.buffer_id(entry_ref_offset_bits);
- if (compacting_buffers[buffer_id]) {
+ if (compacting_buffers.has(old_ref)) {
EntryRef new_ref = compactable.move(old_ref);
node.get_kv().first.store_release(new_ref);
}
@@ -220,4 +220,104 @@ FixedSizeHashMap::normalize_values(const std::function<EntryRef(EntryRef)>& norm
return changed;
}
+namespace {
+
+class ChangeWriter {
+ std::vector<AtomicEntryRef*> _atomic_refs;
+public:
+ ChangeWriter(uint32_t capacity);
+ ~ChangeWriter();
+ bool write(const std::vector<EntryRef> &refs);
+ void emplace_back(AtomicEntryRef &atomic_ref) { _atomic_refs.emplace_back(&atomic_ref); }
+};
+
+ChangeWriter::ChangeWriter(uint32_t capacity)
+ : _atomic_refs()
+{
+ _atomic_refs.reserve(capacity);
+}
+
+ChangeWriter::~ChangeWriter() = default;
+
+bool
+ChangeWriter::write(const std::vector<EntryRef> &refs)
+{
+ bool changed = false;
+ assert(refs.size() == _atomic_refs.size());
+ auto atomic_ref = _atomic_refs.begin();
+ for (auto ref : refs) {
+ EntryRef old_ref = (*atomic_ref)->load_relaxed();
+ if (ref != old_ref) {
+ (*atomic_ref)->store_release(ref);
+ changed = true;
+ }
+ ++atomic_ref;
+ }
+ assert(atomic_ref == _atomic_refs.end());
+ _atomic_refs.clear();
+ return changed;
+}
+
+}
+
+bool
+FixedSizeHashMap::normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter)
+{
+ std::vector<EntryRef> refs;
+ refs.reserve(1024);
+ bool changed = false;
+ ChangeWriter change_writer(refs.capacity());
+ for (auto& chain_head : _chain_heads) {
+ uint32_t node_idx = chain_head.load_relaxed();
+ while (node_idx != no_node_idx) {
+ auto& node = _nodes[node_idx];
+ EntryRef ref = node.get_kv().second.load_relaxed();
+ if (ref.valid()) {
+ if (filter.has(ref)) {
+ refs.emplace_back(ref);
+ change_writer.emplace_back(node.get_kv().second);
+ if (refs.size() >= refs.capacity()) {
+ normalize(refs);
+ changed |= change_writer.write(refs);
+ refs.clear();
+ }
+ }
+ }
+ node_idx = node.get_next_node_idx().load(std::memory_order_relaxed);
+ }
+ }
+ if (!refs.empty()) {
+ normalize(refs);
+ changed |= change_writer.write(refs);
+ }
+ return changed;
+}
+
+void
+FixedSizeHashMap::foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter)
+{
+ std::vector<EntryRef> refs;
+ refs.reserve(1024);
+ for (auto& chain_head : _chain_heads) {
+ uint32_t node_idx = chain_head.load_relaxed();
+ while (node_idx != no_node_idx) {
+ auto& node = _nodes[node_idx];
+ EntryRef ref = node.get_kv().second.load_relaxed();
+ if (ref.valid()) {
+ if (filter.has(ref)) {
+ refs.emplace_back(ref);
+ if (refs.size() >= refs.capacity()) {
+ callback(refs);
+ refs.clear();
+ }
+ }
+ }
+ node_idx = node.get_next_node_idx().load(std::memory_order_relaxed);
+ }
+ }
+ if (!refs.empty()) {
+ callback(refs);
+ }
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
index 035cd84dbee..c522bcc3c33 100644
--- a/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/fixed_size_hash_map.h
@@ -18,6 +18,7 @@ class MemoryUsage;
}
namespace vespalib::datastore {
+class EntryRefFilter;
struct ICompactable;
class ShardedHashComparator {
@@ -158,8 +159,26 @@ public:
size_t size() const noexcept { return _count; }
MemoryUsage get_memory_usage() const;
void foreach_key(const std::function<void(EntryRef)>& callback) const;
- void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits);
+ void move_keys(ICompactable& compactable, const EntryRefFilter &compacting_buffers);
+ /*
+ * Scan dictionary and call normalize function for each value. If
+ * returned value is different then write back the modified value to
+ * the dictionary. Used when clearing all posting lists.
+ */
bool normalize_values(const std::function<EntryRef(EntryRef)>& normalize);
+ /*
+ * Scan dictionary and call normalize function for batches of values
+ * that pass the filter. Write back modified values to the dictionary.
+ * Used by compaction of posting lists when moving short arrays,
+ * bitvectors or btree roots.
+ */
+ bool normalize_values(const std::function<void(std::vector<EntryRef>&)>& normalize, const EntryRefFilter& filter);
+ /*
+ * Scan dictionary and call callback function for batches of values
+ * that pass the filter. Used by compaction of posting lists when
+ * moving btree nodes.
+ */
+ void foreach_value(const std::function<void(const std::vector<EntryRef>&)>& callback, const EntryRefFilter& filter);
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
index 886ec095dcd..4fd3bcad5e5 100644
--- a/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/i_unique_store_dictionary.h
@@ -10,7 +10,9 @@
namespace vespalib::datastore {
+class CompactionStrategy;
class EntryComparator;
+class EntryRefFilter;
struct ICompactable;
class IUniqueStoreDictionaryReadSnapshot;
class UniqueStoreAddResult;
@@ -28,7 +30,7 @@ public:
virtual UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) = 0;
virtual EntryRef find(const EntryComparator& comp) = 0;
virtual void remove(const EntryComparator& comp, EntryRef ref) = 0;
- virtual void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) = 0;
+ virtual void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) = 0;
virtual uint32_t get_num_uniques() const = 0;
virtual vespalib::MemoryUsage get_memory_usage() const = 0;
virtual void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) = 0;
@@ -40,7 +42,7 @@ public:
virtual vespalib::MemoryUsage get_btree_memory_usage() const = 0;
virtual vespalib::MemoryUsage get_hash_memory_usage() const = 0;
virtual bool has_held_buffers() const = 0;
- virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) = 0;
+ virtual void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) = 0;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
index da4db92a309..019b98a53dd 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.cpp
@@ -171,12 +171,12 @@ ShardedHashMap::foreach_key(std::function<void(EntryRef)> callback) const
}
void
-ShardedHashMap::move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits)
+ShardedHashMap::move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers)
{
for (size_t i = 0; i < num_shards; ++i) {
auto map = _maps[i].load(std::memory_order_relaxed);
if (map != nullptr) {
- map->move_keys(compactable, compacting_buffers, entry_ref_offset_bits);
+ map->move_keys(compactable, compacting_buffers);
}
}
}
@@ -195,6 +195,31 @@ ShardedHashMap::normalize_values(std::function<EntryRef(EntryRef)> normalize)
}
bool
+ShardedHashMap::normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter)
+{
+ bool changed = false;
+ for (size_t i = 0; i < num_shards; ++i) {
+ auto map = _maps[i].load(std::memory_order_relaxed);
+ if (map != nullptr) {
+ changed |= map->normalize_values(normalize, filter);
+ }
+ }
+ return changed;
+}
+
+void
+ShardedHashMap::foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter)
+{
+ for (size_t i = 0; i < num_shards; ++i) {
+ auto map = _maps[i].load(std::memory_order_relaxed);
+ if (map != nullptr) {
+ map->foreach_value(callback, filter);
+ }
+ }
+}
+
+
+bool
ShardedHashMap::has_held_buffers() const
{
return _gen_holder.getHeldBytes() != 0;
diff --git a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
index df07f7a1990..e0ba9488351 100644
--- a/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
+++ b/vespalib/src/vespa/vespalib/datastore/sharded_hash_map.h
@@ -11,6 +11,7 @@ namespace vespalib { class MemoryUsage; }
namespace vespalib::datastore {
class EntryComparator;
+class EntryRefFilter;
class FixedSizeHashMap;
struct ICompactable;
@@ -57,8 +58,10 @@ public:
const EntryComparator &get_default_comparator() const noexcept { return *_comp; }
MemoryUsage get_memory_usage() const;
void foreach_key(std::function<void(EntryRef)> callback) const;
- void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits);
+ void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers);
bool normalize_values(std::function<EntryRef(EntryRef)> normalize);
+ bool normalize_values(std::function<void(std::vector<EntryRef>&)> normalize, const EntryRefFilter& filter);
+ void foreach_value(std::function<void(const std::vector<EntryRef>&)> callback, const EntryRefFilter& filter);
bool has_held_buffers() const;
void compact_worst_shard();
};
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h
index 38643d84be0..aea98f406e8 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h
@@ -55,11 +55,11 @@ public:
EntryRef find(EntryConstRefType value);
EntryConstRefType get(EntryRef ref) const { return _allocator.get(ref); }
void remove(EntryRef ref);
- std::unique_ptr<Remapper> compact_worst(bool compact_memory, bool compact_address_space);
+ std::unique_ptr<Remapper> compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy);
vespalib::MemoryUsage getMemoryUsage() const;
vespalib::MemoryUsage get_values_memory_usage() const { return _store.getMemoryUsage(); }
vespalib::MemoryUsage get_dictionary_memory_usage() const { return _dict->get_memory_usage(); }
- vespalib::AddressSpace get_address_space_usage() const;
+ vespalib::AddressSpace get_values_address_space_usage() const;
// TODO: Consider exposing only the needed functions from allocator
Allocator& get_allocator() { return _allocator; }
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index d375dbae149..b73b714a6bc 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -102,11 +102,9 @@ private:
std::vector<uint32_t> _bufferIdsToCompact;
void allocMapping() {
- _compacting_buffer.resize(RefT::numBuffers());
_mapping.resize(RefT::numBuffers());
for (const auto bufferId : _bufferIdsToCompact) {
BufferState &state = _dataStore.getBufferState(bufferId);
- _compacting_buffer[bufferId] = true;
_mapping[bufferId].resize(state.get_used_arrays());
}
}
@@ -124,7 +122,7 @@ private:
}
void fillMapping() {
- _dict.move_keys(*this, _compacting_buffer, RefT::offset_bits);
+ _dict.move_keys(*this, _compacting_buffer);
}
public:
@@ -140,6 +138,7 @@ public:
_bufferIdsToCompact(std::move(bufferIdsToCompact))
{
if (!_bufferIdsToCompact.empty()) {
+ _compacting_buffer.add_buffers(_bufferIdsToCompact);
allocMapping();
fillMapping();
}
@@ -158,9 +157,9 @@ public:
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
std::unique_ptr<typename UniqueStore<EntryT, RefT, Compare, Allocator>::Remapper>
-UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(bool compact_memory, bool compact_address_space)
+UniqueStore<EntryT, RefT, Compare, Allocator>::compact_worst(CompactionSpec compaction_spec, const CompactionStrategy& compaction_strategy)
{
- std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compact_memory, compact_address_space);
+ std::vector<uint32_t> bufferIdsToCompact = _store.startCompactWorstBuffers(compaction_spec, compaction_strategy);
if (bufferIdsToCompact.empty()) {
return std::unique_ptr<Remapper>();
} else {
@@ -179,7 +178,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::getMemoryUsage() const
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
vespalib::AddressSpace
-UniqueStore<EntryT, RefT, Compare, Allocator>::get_address_space_usage() const
+UniqueStore<EntryT, RefT, Compare, Allocator>::get_values_address_space_usage() const
{
return _allocator.get_data_store().getAddressSpaceUsage();
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
index 3b0169b5a34..d64588e3242 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.h
@@ -79,7 +79,7 @@ public:
UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override;
EntryRef find(const EntryComparator& comp) override;
void remove(const EntryComparator& comp, EntryRef ref) override;
- void move_keys(ICompactable& compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits) override;
+ void move_keys(ICompactable& compactable, const EntryRefFilter& compacting_buffers) override;
uint32_t get_num_uniques() const override;
vespalib::MemoryUsage get_memory_usage() const override;
void build(vespalib::ConstArrayRef<EntryRef>, vespalib::ConstArrayRef<uint32_t> ref_counts, std::function<void(EntryRef)> hold) override;
@@ -91,7 +91,7 @@ public:
vespalib::MemoryUsage get_btree_memory_usage() const override;
vespalib::MemoryUsage get_hash_memory_usage() const override;
bool has_held_buffers() const override;
- void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary) override;
+ void compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy) override;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
index e88376be9fb..4375b38cf7c 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_dictionary.hpp
@@ -4,6 +4,7 @@
#include "datastore.hpp"
#include "entry_comparator_wrapper.h"
+#include "entry_ref_filter.h"
#include "i_compactable.h"
#include "unique_store_add_result.h"
#include "unique_store_dictionary.h"
@@ -139,15 +140,14 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::remove(const
template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
void
-UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const std::vector<bool>& compacting_buffers, uint32_t entry_ref_offset_bits)
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICompactable &compactable, const EntryRefFilter& compacting_buffers)
{
if constexpr (has_btree_dictionary) {
auto itr = this->_btree_dict.begin();
while (itr.valid()) {
EntryRef oldRef(itr.getKey());
assert(oldRef.valid());
- uint32_t buffer_id = oldRef.buffer_id(entry_ref_offset_bits);
- if (compacting_buffers[buffer_id]) {
+ if (compacting_buffers.has(oldRef)) {
EntryRef newRef(compactable.move(oldRef));
this->_btree_dict.thaw(itr);
itr.writeKey(newRef);
@@ -160,7 +160,7 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::move_keys(ICo
++itr;
}
} else {
- this->_hash_dict.move_keys(compactable, compacting_buffers, entry_ref_offset_bits);
+ this->_hash_dict.move_keys(compactable, compacting_buffers);
}
}
@@ -339,11 +339,11 @@ UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::has_held_buff
template <typename BTreeDictionaryT, typename ParentT, typename HashDictionaryT>
void
-UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary)
+UniqueStoreDictionary<BTreeDictionaryT, ParentT, HashDictionaryT>::compact_worst(bool compact_btree_dictionary, bool compact_hash_dictionary, const CompactionStrategy& compaction_strategy)
{
if constexpr (has_btree_dictionary) {
if (compact_btree_dictionary) {
- this->_btree_dict.compact_worst();
+ this->_btree_dict.compact_worst(compaction_strategy);
}
} else {
(void) compact_btree_dictionary;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
index 4a8d72c8685..2501c4fafd9 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_remapper.h
@@ -3,6 +3,7 @@
#pragma once
#include "entryref.h"
+#include "entry_ref_filter.h"
#include <vector>
#include <vespa/vespalib/stllike/allocator.h>
@@ -18,43 +19,35 @@ public:
using RefType = RefT;
protected:
- std::vector<bool> _compacting_buffer;
+ EntryRefFilter _compacting_buffer;
std::vector<std::vector<EntryRef, allocator_large<EntryRef>>> _mapping;
public:
UniqueStoreRemapper()
- : _compacting_buffer(),
+ : _compacting_buffer(RefT::numBuffers(), RefT::offset_bits),
_mapping()
{
}
virtual ~UniqueStoreRemapper() = default;
EntryRef remap(EntryRef ref) const {
- if (ref.valid()) {
- RefType internal_ref(ref);
- if (!_compacting_buffer[internal_ref.bufferId()]) {
- // No remapping for references to buffers not being compacted
- return ref;
- } else {
- auto &inner_mapping = _mapping[internal_ref.bufferId()];
- assert(internal_ref.unscaled_offset() < inner_mapping.size());
- EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()];
- assert(mapped_ref.valid());
- return mapped_ref;
- }
- } else {
- return EntryRef();
- }
+ RefType internal_ref(ref);
+ auto &inner_mapping = _mapping[internal_ref.bufferId()];
+ assert(internal_ref.unscaled_offset() < inner_mapping.size());
+ EntryRef mapped_ref = inner_mapping[internal_ref.unscaled_offset()];
+ assert(mapped_ref.valid());
+ return mapped_ref;
}
void remap(vespalib::ArrayRef<EntryRef> refs) const {
for (auto &ref : refs) {
- auto mapped_ref = remap(ref);
- if (mapped_ref != ref) {
- ref = mapped_ref;
+ if (ref.valid() && _compacting_buffer.has(ref)) {
+ ref = remap(ref);
}
}
}
+ const EntryRefFilter& get_entry_ref_filter() const noexcept { return _compacting_buffer; }
+
virtual void done() = 0;
};
diff --git a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp
index b4f7eb5cd96..7407ffd6a4e 100644
--- a/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp
+++ b/vespalib/src/vespa/vespalib/hwaccelrated/iaccelrated.cpp
@@ -17,28 +17,18 @@ namespace vespalib::hwaccelrated {
namespace {
-class Factory {
-public:
- virtual ~Factory() = default;
- virtual IAccelrated::UP create() const = 0;
-};
-
-class GenericFactory :public Factory{
-public:
- IAccelrated::UP create() const override { return std::make_unique<GenericAccelrator>(); }
-};
-
+IAccelrated::UP create_accelerator() {
#ifdef __x86_64__
-class Avx2Factory :public Factory{
-public:
- IAccelrated::UP create() const override { return std::make_unique<Avx2Accelrator>(); }
-};
-
-class Avx512Factory :public Factory{
-public:
- IAccelrated::UP create() const override { return std::make_unique<Avx512Accelrator>(); }
-};
+ __builtin_cpu_init();
+ if (__builtin_cpu_supports("avx512f")) {
+ return std::make_unique<Avx512Accelrator>();
+ }
+ if (__builtin_cpu_supports("avx2")) {
+ return std::make_unique<Avx2Accelrator>();
+ }
#endif
+ return std::make_unique<GenericAccelrator>();
+}
template<typename T>
std::vector<T> createAndFill(size_t sz) {
@@ -247,42 +237,14 @@ RuntimeVerificator::RuntimeVerificator()
verify(thisCpu);
}
-class Selector
-{
-public:
- Selector() __attribute__((noinline));
- IAccelrated::UP create() { return _factory->create(); }
-private:
- std::unique_ptr<Factory> _factory;
-};
-
-Selector::Selector() :
- _factory()
-{
-#ifdef __x86_64__
- __builtin_cpu_init ();
- if (__builtin_cpu_supports("avx512f")) {
- _factory = std::make_unique<Avx512Factory>();
- } else if (__builtin_cpu_supports("avx2")) {
- _factory = std::make_unique<Avx2Factory>();
- } else {
- _factory = std::make_unique<GenericFactory>();
- }
-#else
- _factory = std::make_unique<GenericFactory>();
-#endif
-}
-
}
-static Selector _G_selector;
-
RuntimeVerificator _G_verifyAccelrator;
const IAccelrated &
IAccelrated::getAccelerator()
{
- static IAccelrated::UP accelrator = _G_selector.create();
+ static IAccelrated::UP accelrator = create_accelerator();
return *accelrator;
}
diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
index 149ad01b947..a476e23e6cb 100644
--- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
+++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.cpp
@@ -22,23 +22,29 @@ bool is_regex_special_char(char c) noexcept {
case '\\':
case '+':
case '.':
+ case '?':
+ case '*':
return true;
default:
return false;
}
}
-std::string dot_separated_glob_to_regex(vespalib::stringref glob) {
+// Important: `delimiter` MUST NOT be a character that needs escaping within a regex [charset]
+template <bool SupportSingleCharMatch>
+std::string char_delimited_glob_to_regex(vespalib::stringref glob, char delimiter) {
std::string ret = "^";
ret.reserve(glob.size() + 2);
+ // Note: we explicitly stop matching at a delimiter boundary.
+ // This is to make path fragment matching less vulnerable to dirty tricks.
+ const std::string wildcard_pattern = std::string("[^") + delimiter + "]*";
+ // Same applies for single chars; they should only match _within_ a delimited boundary.
+ const std::string single_char_pattern = std::string("[^") + delimiter + "]";
for (auto c : glob) {
if (c == '*') {
- // Note: we explicitly stop matching at a dot separator boundary.
- // This is to make host name matching less vulnerable to dirty tricks.
- ret += "[^.]*";
- } else if (c == '?') {
- // Same applies for single chars; they should only match _within_ a dot boundary.
- ret += "[^.]";
+ ret += wildcard_pattern;
+ } else if (c == '?' && SupportSingleCharMatch) {
+ ret += single_char_pattern;
} else {
if (is_regex_special_char(c)) {
ret += '\\';
@@ -52,14 +58,25 @@ std::string dot_separated_glob_to_regex(vespalib::stringref glob) {
class RegexHostMatchPattern : public CredentialMatchPattern {
Regex _pattern_as_regex;
-public:
- explicit RegexHostMatchPattern(vespalib::stringref glob_pattern)
- : _pattern_as_regex(Regex::from_pattern(dot_separated_glob_to_regex(glob_pattern)))
+ explicit RegexHostMatchPattern(std::string_view glob_pattern)
+ : _pattern_as_regex(Regex::from_pattern(glob_pattern))
{
}
+public:
+ RegexHostMatchPattern(RegexHostMatchPattern&&) noexcept = default;
~RegexHostMatchPattern() override = default;
- [[nodiscard]] bool matches(vespalib::stringref str) const override {
+ RegexHostMatchPattern& operator=(RegexHostMatchPattern&&) noexcept = default;
+
+ [[nodiscard]] static RegexHostMatchPattern from_dns_glob_pattern(vespalib::stringref glob_pattern) {
+ return RegexHostMatchPattern(char_delimited_glob_to_regex<true>(glob_pattern, '.'));
+ }
+
+ [[nodiscard]] static RegexHostMatchPattern from_uri_glob_pattern(vespalib::stringref glob_pattern) {
+ return RegexHostMatchPattern(char_delimited_glob_to_regex<false>(glob_pattern, '/'));
+ }
+
+ [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override {
return _pattern_as_regex.full_match(std::string_view(str.data(), str.size()));
}
};
@@ -73,15 +90,19 @@ public:
}
~ExactMatchPattern() override = default;
- [[nodiscard]] bool matches(vespalib::stringref str) const override {
+ [[nodiscard]] bool matches(vespalib::stringref str) const noexcept override {
return (str == _must_match_exactly);
}
};
} // anon ns
-std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_glob(vespalib::stringref glob_pattern) {
- return std::make_shared<const RegexHostMatchPattern>(glob_pattern);
+std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_dns_glob(vespalib::stringref glob_pattern) {
+ return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_dns_glob_pattern(glob_pattern));
+}
+
+std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_from_uri_glob(vespalib::stringref glob_pattern) {
+ return std::make_shared<const RegexHostMatchPattern>(RegexHostMatchPattern::from_uri_glob_pattern(glob_pattern));
}
std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exact_match(vespalib::stringref str) {
@@ -91,9 +112,8 @@ std::shared_ptr<const CredentialMatchPattern> CredentialMatchPattern::create_exa
RequiredPeerCredential::RequiredPeerCredential(Field field, vespalib::string must_match_pattern)
: _field(field),
_original_pattern(std::move(must_match_pattern)),
- // FIXME it's not RFC 2459-compliant to use exact-matching for URIs, but that's all we currently need.
- _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_exact_match(_original_pattern)
- : CredentialMatchPattern::create_from_glob(_original_pattern))
+ _match_pattern(field == Field::SAN_URI ? CredentialMatchPattern::create_from_uri_glob(_original_pattern)
+ : CredentialMatchPattern::create_from_dns_glob(_original_pattern))
{
}
@@ -111,11 +131,21 @@ void print_joined(std::ostream& os, const Collection& coll, const char* sep) {
os << e;
}
}
+
+constexpr const char* to_string(RequiredPeerCredential::Field field) noexcept {
+ switch (field) {
+ case RequiredPeerCredential::Field::CN: return "CN";
+ case RequiredPeerCredential::Field::SAN_DNS: return "SAN_DNS";
+ case RequiredPeerCredential::Field::SAN_URI: return "SAN_URI";
+ default: abort();
+ }
+}
+
}
std::ostream& operator<<(std::ostream& os, const RequiredPeerCredential& cred) {
os << "RequiredPeerCredential("
- << (cred.field() == RequiredPeerCredential::Field::CN ? "CN" : "SAN_DNS")
+ << to_string(cred.field())
<< " matches '"
<< cred.original_pattern()
<< "')";
diff --git a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
index c5721858518..4166efc4312 100644
--- a/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
+++ b/vespalib/src/vespa/vespalib/net/tls/peer_policies.h
@@ -10,9 +10,10 @@ namespace vespalib::net::tls {
struct CredentialMatchPattern {
virtual ~CredentialMatchPattern() = default;
- [[nodiscard]] virtual bool matches(vespalib::stringref str) const = 0;
+ [[nodiscard]] virtual bool matches(vespalib::stringref str) const noexcept = 0;
- static std::shared_ptr<const CredentialMatchPattern> create_from_glob(vespalib::stringref pattern);
+ static std::shared_ptr<const CredentialMatchPattern> create_from_dns_glob(vespalib::stringref glob_pattern);
+ static std::shared_ptr<const CredentialMatchPattern> create_from_uri_glob(vespalib::stringref glob_pattern);
static std::shared_ptr<const CredentialMatchPattern> create_exact_match(vespalib::stringref pattern);
};
@@ -37,7 +38,7 @@ public:
&& (_original_pattern == rhs._original_pattern));
}
- [[nodiscard]] bool matches(vespalib::stringref str) const {
+ [[nodiscard]] bool matches(vespalib::stringref str) const noexcept {
return (_match_pattern && _match_pattern->matches(str));
}
diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.h b/vespalib/src/vespa/vespalib/util/rcuvector.h
index 0396ee0d459..dd4fa660279 100644
--- a/vespalib/src/vespa/vespalib/util/rcuvector.h
+++ b/vespalib/src/vespa/vespalib/util/rcuvector.h
@@ -13,10 +13,10 @@ namespace vespalib {
template <typename T>
class RcuVectorHeld : public GenerationHeldBase
{
- std::unique_ptr<T> _data;
+ T _data;
public:
- RcuVectorHeld(size_t size, std::unique_ptr<T> data);
+ RcuVectorHeld(size_t size, T&& data);
~RcuVectorHeld();
};
@@ -121,7 +121,7 @@ public:
void reset();
void shrink(size_t newSize) __attribute__((noinline));
- void replaceVector(std::unique_ptr<ArrayType> replacement);
+ void replaceVector(ArrayType replacement);
};
template <typename T>
diff --git a/vespalib/src/vespa/vespalib/util/rcuvector.hpp b/vespalib/src/vespa/vespalib/util/rcuvector.hpp
index 9d7c8ea57d6..3c455149dfd 100644
--- a/vespalib/src/vespa/vespalib/util/rcuvector.hpp
+++ b/vespalib/src/vespa/vespalib/util/rcuvector.hpp
@@ -9,7 +9,7 @@
namespace vespalib {
template <typename T>
-RcuVectorHeld<T>::RcuVectorHeld(size_t size, std::unique_ptr<T> data)
+RcuVectorHeld<T>::RcuVectorHeld(size_t size, T&& data)
: GenerationHeldBase(size),
_data(std::move(data))
{ }
@@ -52,20 +52,21 @@ RcuVectorBase<T>::~RcuVectorBase() = default;
template <typename T>
void
RcuVectorBase<T>::expand(size_t newCapacity) {
- std::unique_ptr<ArrayType> tmpData(new ArrayType());
- tmpData->reserve(newCapacity);
+ ArrayType tmpData;
+ tmpData.reserve(newCapacity);
for (const T & v : _data) {
- tmpData->push_back_fast(v);
+ tmpData.push_back_fast(v);
}
replaceVector(std::move(tmpData));
}
template <typename T>
void
-RcuVectorBase<T>::replaceVector(std::unique_ptr<ArrayType> replacement) {
- replacement->swap(_data); // atomic switch of underlying data
- size_t holdSize = replacement->capacity() * sizeof(T);
- GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(replacement)));
+RcuVectorBase<T>::replaceVector(ArrayType replacement) {
+ std::atomic_thread_fence(std::memory_order_release);
+ replacement.swap(_data); // atomic switch of underlying data
+ size_t holdSize = replacement.capacity() * sizeof(T);
+ auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(replacement));
_genHolder.hold(std::move(hold));
onReallocation();
}
@@ -90,17 +91,18 @@ RcuVectorBase<T>::shrink(size_t newSize)
return;
}
if (!_data.try_unreserve(wantedCapacity)) {
- std::unique_ptr<ArrayType> tmpData(new ArrayType());
- tmpData->reserve(wantedCapacity);
- tmpData->resize(newSize);
+ ArrayType tmpData;
+ tmpData.reserve(wantedCapacity);
+ tmpData.resize(newSize);
for (uint32_t i = 0; i < newSize; ++i) {
- (*tmpData)[i] = _data[i];
+ tmpData[i] = _data[i];
}
+ std::atomic_thread_fence(std::memory_order_release);
// Users of RCU vector must ensure that no readers use old size
// after swap. Attribute vectors uses _committedDocIdLimit for this.
- tmpData->swap(_data); // atomic switch of underlying data
- size_t holdSize = tmpData->capacity() * sizeof(T);
- GenerationHeldBase::UP hold(new RcuVectorHeld<ArrayType>(holdSize, std::move(tmpData)));
+ tmpData.swap(_data); // atomic switch of underlying data
+ size_t holdSize = tmpData.capacity() * sizeof(T);
+ auto hold = std::make_unique<RcuVectorHeld<ArrayType>>(holdSize, std::move(tmpData));
_genHolder.hold(std::move(hold));
onReallocation();
}
diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp
index 80bbb3a7ad2..ab83d4e05fd 100644
--- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp
+++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.cpp
@@ -8,6 +8,8 @@ using namespace vespalib::fixed_thread_bundle;
namespace vespalib {
+VESPA_THREAD_STACK_TAG(simple_thread_bundle_executor);
+
namespace {
struct SignalHook : Runnable {
@@ -43,7 +45,7 @@ Runnable::UP wrap(Runnable *runnable) {
}
Runnable::UP chain(Runnable::UP first, Runnable::UP second) {
- return Runnable::UP(new HookPair(std::move(first), std::move(second)));
+ return std::make_unique<HookPair>(std::move(first), std::move(second));
}
} // namespace vespalib::<unnamed>
@@ -173,4 +175,19 @@ SimpleThreadBundle::run(const std::vector<Runnable*> &targets)
latch.await();
}
+SimpleThreadBundle::Worker::Worker(Signal &s, Runnable::UP h)
+ : thread(*this, simple_thread_bundle_executor),
+ signal(s),
+ hook(std::move(h))
+{
+ thread.start();
+}
+void
+SimpleThreadBundle::Worker::run() {
+ for (size_t gen = 0; signal.wait(gen) > 0; ) {
+ hook->run();
+}
+
+}
+
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h
index f0aaccc2525..d9a29ee7bef 100644
--- a/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h
+++ b/vespalib/src/vespa/vespalib/util/simple_thread_bundle.h
@@ -112,14 +112,8 @@ private:
Thread thread;
Signal &signal;
Runnable::UP hook;
- Worker(Signal &s, Runnable::UP h) : thread(*this), signal(s), hook(std::move(h)) {
- thread.start();
- }
- void run() override {
- for (size_t gen = 0; signal.wait(gen) > 0; ) {
- hook->run();
- }
- }
+ Worker(Signal &s, Runnable::UP h);
+ void run() override;
};
Work _work;
diff --git a/vespalib/src/vespa/vespalib/util/thread.cpp b/vespalib/src/vespa/vespalib/util/thread.cpp
index c02a7a3b063..c3230bf313d 100644
--- a/vespalib/src/vespa/vespalib/util/thread.cpp
+++ b/vespalib/src/vespa/vespalib/util/thread.cpp
@@ -9,9 +9,9 @@ namespace vespalib {
__thread Thread *Thread::_currentThread = nullptr;
-Thread::Proxy::Proxy(Thread &parent, Runnable &target)
- : thread(parent), runnable(target),
- start(), started(), cancel(false)
+Thread::Proxy::Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in)
+ : thread(parent), runnable(target), init_fun(std::move(init_fun_in)),
+ start(), started(), cancel(false)
{ }
void
@@ -22,7 +22,7 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *)
start.await();
if (!cancel) {
started.countDown();
- runnable.run();
+ init_fun(runnable);
}
assert(_currentThread == &thread);
_currentThread = nullptr;
@@ -30,8 +30,8 @@ Thread::Proxy::Run(FastOS_ThreadInterface *, void *)
Thread::Proxy::~Proxy() = default;
-Thread::Thread(Runnable &runnable)
- : _proxy(*this, runnable),
+Thread::Thread(Runnable &runnable, init_fun_t init_fun_in)
+ : _proxy(*this, runnable, std::move(init_fun_in)),
_pool(STACK_SIZE, 1),
_lock(),
_cond(),
diff --git a/vespalib/src/vespa/vespalib/util/thread.h b/vespalib/src/vespa/vespalib/util/thread.h
index 8873f23ee98..e08f3ca1100 100644
--- a/vespalib/src/vespa/vespalib/util/thread.h
+++ b/vespalib/src/vespa/vespalib/util/thread.h
@@ -15,17 +15,19 @@ namespace vespalib {
class Thread : public Active
{
private:
+ using init_fun_t = Runnable::init_fun_t;
enum { STACK_SIZE = 256*1024 };
static __thread Thread *_currentThread;
struct Proxy : FastOS_Runnable {
Thread &thread;
Runnable &runnable;
+ init_fun_t init_fun;
vespalib::Gate start;
vespalib::Gate started;
bool cancel;
- Proxy(Thread &parent, Runnable &target);
+ Proxy(Thread &parent, Runnable &target, init_fun_t init_fun_in);
~Proxy() override;
void Run(FastOS_ThreadInterface *thisThread, void *arguments) override;
@@ -39,7 +41,7 @@ private:
bool _woken;
public:
- Thread(Runnable &runnable);
+ Thread(Runnable &runnable, init_fun_t init_fun_in);
~Thread() override;
void start() override;
Thread &stop() override;