summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bundle-plugin-test/test-bundles/main/pom.xml1
-rw-r--r--client/go/internal/cli/cmd/feed_test.go12
-rw-r--r--client/go/internal/mock/http.go14
-rw-r--r--client/go/internal/vespa/document/dispatcher.go2
-rw-r--r--client/go/internal/vespa/document/http.go19
-rw-r--r--client/go/internal/vespa/document/http_test.go33
-rw-r--r--client/go/internal/vespa/document/stats.go4
-rw-r--r--client/js/app/yarn.lock253
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/Field.java25
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/Q.java2
-rw-r--r--client/src/test/java/ai/vespa/client/dsl/QTest.java12
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java3
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java8
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java422
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequest.java9
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java198
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequestTest.java20
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/multifieldresolver/RankTypeResolver.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java26
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java19
-rw-r--r--config-model/src/main/javacc/SchemaParser.jj10
-rw-r--r--config-model/src/test/cfg/admin/metricconfig/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/application/app1/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/application/app1/schemas/product.sd1
-rw-r--r--config-model/src/test/cfg/application/app_complicated_deployment_spec/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/application/app_genericservices/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/application/sdfilenametest/schemas/notmusic.sd1
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/book.sd6
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/music.sd2
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/video.sd7
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/book.sd6
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/music.sd2
-rw-r--r--config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/video.sd7
-rw-r--r--config-model/src/test/cfg/routing/content_two_clusters/schemas/mobile.sd2
-rw-r--r--config-model/src/test/cfg/routing/content_two_clusters/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/routing/contentsimpleconfig/schemas/music.sd2
-rwxr-xr-xconfig-model/src/test/cfg/routing/replacehop/schemas/music.sd2
-rwxr-xr-xconfig-model/src/test/cfg/routing/replaceroute/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/search/data/travel/schemas/TTPOI.sd2
-rw-r--r--config-model/src/test/cfg/search/data/v2/inherited_rankprofiles/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/storage/app_index_higher_than_num_nodes/schemas/music.sd2
-rw-r--r--config-model/src/test/cfg/storage/clustercontroller_advanced/schemas/music.sd2
-rw-r--r--config-model/src/test/derived/music3/music3.sd2
-rw-r--r--config-model/src/test/derived/newrank/newrank.sd1
-rw-r--r--config-model/src/test/examples/attributesexactmatch.sd1
-rw-r--r--config-model/src/test/examples/casing.sd1
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java38
-rw-r--r--config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java2
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java24
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java13
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/NodeAllocationException.java5
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java10
-rw-r--r--config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java10
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java5
-rw-r--r--configserver/src/test/apps/app-jdisc-only-restart/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/app-jdisc-only/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/app-major-version-7/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/app/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/content/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/content2/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd2
-rw-r--r--configserver/src/test/apps/hosted-no-write-access-control/schemas/music.sd1
-rw-r--r--configserver/src/test/apps/legacy-flag/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/zkapp/schemas/music.sd2
-rw-r--r--configserver/src/test/apps/zkapp/schemas/product.sd1
-rw-r--r--configserver/src/test/apps/zkfeed/schemas/product.sd1
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java13
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java14
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java5
-rw-r--r--container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java55
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java10
-rw-r--r--documentgen-test/etc/complex/music.sd3
-rw-r--r--documentgen-test/etc/complex/music2.sd3
-rw-r--r--documentgen-test/etc/complex/video.sd5
-rw-r--r--eval/src/tests/eval/value_cache/dense.json6
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short1.json2
-rw-r--r--eval/src/tests/eval/value_cache/sparse-short2.json2
-rw-r--r--eval/src/tests/eval/value_cache/sparse.json1
-rw-r--r--eval/src/tests/eval/value_cache/sparse.json.lz4bin153 -> 170 bytes
-rw-r--r--eval/src/tests/eval/value_cache/tensor_loader_test.cpp5
-rw-r--r--eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp125
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java6
-rw-r--r--hosted-tenant-base/pom.xml2
-rw-r--r--jdisc_core/pom.xml2
-rw-r--r--jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java129
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java53
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupIndices.java163
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java21
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java93
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java31
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java57
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java55
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java35
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java101
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InPlaceResizeProvisionTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidateTest.java24
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java70
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java16
-rw-r--r--parent/pom.xml18
-rw-r--r--renovate.json21
-rw-r--r--screwdriver.yaml1
-rw-r--r--searchcore/src/tests/proton/flushengine/flushengine_test.cpp7
-rw-r--r--searchlib/abi-spec.json63
-rw-r--r--searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp72
-rw-r--r--searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp3
-rw-r--r--searchlib/src/tests/query/streaming_query_large_test.cpp6
-rw-r--r--searchlib/src/tests/queryeval/filter_search/filter_search_test.cpp12
-rw-r--r--searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp4
-rw-r--r--searchlib/src/vespa/searchcommon/common/schema.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/group.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/grouping.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp36
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributecontext.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributemanager.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingchange.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/diskindex.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp17
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunk.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/engine/propertiesmap.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/expression/resultvector.h10
-rw-r--r--searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/fef/objectstore.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/fef/properties.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/fef/tablemanager.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/rankresult.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/grouping/collect.h2
-rw-r--r--searchlib/src/vespa/searchlib/grouping/groupengine.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistfile.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistfile.h4
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglisthandle.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglisthandle.h2
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistparams.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h6
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_search.cpp16
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h6
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp19
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp38
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp98
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp46
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp7
-rw-r--r--socket_test/pom.xml2
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/rankmanager.h4
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp6
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/common/document.cpp6
-rw-r--r--streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp20
-rw-r--r--streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp7
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp14
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/BucketDistribution.java39
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/DocumentSummary.java21
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/MetaEntry.java52
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/SearchResult.java44
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/distribution/ConfiguredNode.java31
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/distribution/Distribution.java28
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/distribution/Group.java22
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/distribution/GroupVisitor.java2
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java2
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/state/Diff.java19
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/BucketDistributionTestCase.java4
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/SearchResultTestCase.java18
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/distribution/CrossPlatformTestFactory.java21
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestCase.java23
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestFactory.java26
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/distribution/GroupTestCase.java2
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java107
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java53
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/state/NodeTest.java34
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java6
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java5
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java8
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java7
-rw-r--r--vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java8
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt2
-rw-r--r--vespa-documentgen-plugin/etc/complex/book.sd3
-rw-r--r--vespa-documentgen-plugin/etc/complex/common.sd1
-rw-r--r--vespa-documentgen-plugin/etc/complex/music2.sd3
-rw-r--r--vespa-documentgen-plugin/etc/localapp/book.sd3
-rw-r--r--vespa-documentgen-plugin/etc/localapp/common.sd1
-rw-r--r--vespa-documentgen-plugin/etc/localapp/music.sd3
-rw-r--r--vespa-documentgen-plugin/etc/localapp/video.sd5
-rw-r--r--vespa-enforcer-extensions/pom.xml28
-rw-r--r--vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependencies.java1
-rw-r--r--vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependenciesAllProjects.java1
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java3
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java181
-rw-r--r--vespaclient-core/src/main/java/com/yahoo/clientmetrics/MessageTypeMetricSet.java10
-rw-r--r--vespaclient-core/src/main/java/com/yahoo/clientmetrics/RouteMetricSet.java12
-rw-r--r--vespaclient-java/src/test/java/com/yahoo/vespafeeder/BenchmarkProgressPrinterTest.java12
-rw-r--r--vespajlib/src/main/java/com/yahoo/compress/Compressor.java4
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/SystemTimer.java23
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/Timer.java14
-rw-r--r--vespajlib/src/main/java/com/yahoo/yolean/Exceptions.java1
-rw-r--r--vespalib/src/tests/util/rcuvector/rcuvector_test.cpp17
257 files changed, 2398 insertions, 2143 deletions
diff --git a/bundle-plugin-test/test-bundles/main/pom.xml b/bundle-plugin-test/test-bundles/main/pom.xml
index 21399291442..c4dd3407607 100644
--- a/bundle-plugin-test/test-bundles/main/pom.xml
+++ b/bundle-plugin-test/test-bundles/main/pom.xml
@@ -24,7 +24,6 @@
<!-- Added to verify that module-info.class can be handled by bundle-plugin without throwing an exception. -->
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
- <version>2.3.0</version>
</dependency>
</dependencies>
<build>
diff --git a/client/go/internal/cli/cmd/feed_test.go b/client/go/internal/cli/cmd/feed_test.go
index d1904ad815e..fc2c5ec7520 100644
--- a/client/go/internal/cli/cmd/feed_test.go
+++ b/client/go/internal/cli/cmd/feed_test.go
@@ -80,13 +80,17 @@ func TestFeed(t *testing.T) {
require.Nil(t, cli.Run("feed", "-"))
assert.Equal(t, want, stdout.String())
- httpClient.NextResponseString(500, `{"message":"it's broken yo"}`)
+ for i := 0; i < 10; i++ {
+ httpClient.NextResponseString(500, `{"message":"it's broken yo"}`)
+ }
require.Nil(t, cli.Run("feed", jsonFile1))
- assert.Equal(t, "feed: got status 500 ({\"message\":\"it's broken yo\"}) for put id:ns:type::doc1: retrying\n", stderr.String())
+ assert.Equal(t, "feed: got status 500 ({\"message\":\"it's broken yo\"}) for put id:ns:type::doc1: giving up after 10 attempts\n", stderr.String())
stderr.Reset()
- httpClient.NextResponseError(fmt.Errorf("something else is broken"))
+ for i := 0; i < 10; i++ {
+ httpClient.NextResponseError(fmt.Errorf("something else is broken"))
+ }
require.Nil(t, cli.Run("feed", jsonFile1))
- assert.Equal(t, "feed: got error \"something else is broken\" (no body) for put id:ns:type::doc1: retrying\n", stderr.String())
+ assert.Equal(t, "feed: got error \"something else is broken\" (no body) for put id:ns:type::doc1: giving up after 10 attempts\n", stderr.String())
}
func TestFeedInvalid(t *testing.T) {
diff --git a/client/go/internal/mock/http.go b/client/go/internal/mock/http.go
index 8bab716ea60..3d4ead596b0 100644
--- a/client/go/internal/mock/http.go
+++ b/client/go/internal/mock/http.go
@@ -14,8 +14,8 @@ type HTTPClient struct {
// The responses to return for future requests. Once a response is consumed, it's removed from this slice.
nextResponses []HTTPResponse
- // The error to return for the next request. If non-nil, this error is returned before any responses in nextResponses.
- nextError error
+ // The errors to return for future requests. If non-nil, these errors are returned before any responses in nextResponses.
+ nextErrors []error
// LastRequest is the last HTTP request made through this.
LastRequest *http.Request
@@ -52,13 +52,14 @@ func (c *HTTPClient) NextResponse(response HTTPResponse) {
}
func (c *HTTPClient) NextResponseError(err error) {
- c.nextError = err
+ c.nextErrors = append(c.nextErrors, err)
}
func (c *HTTPClient) Do(request *http.Request, timeout time.Duration) (*http.Response, error) {
- if c.nextError != nil {
- err := c.nextError
- c.nextError = nil
+ c.LastRequest = request
+ if len(c.nextErrors) > 0 {
+ err := c.nextErrors[0]
+ c.nextErrors = c.nextErrors[1:]
return nil, err
}
response := HTTPResponse{Status: 200}
@@ -66,7 +67,6 @@ func (c *HTTPClient) Do(request *http.Request, timeout time.Duration) (*http.Res
response = c.nextResponses[0]
c.nextResponses = c.nextResponses[1:]
}
- c.LastRequest = request
if c.ReadBody && request.Body != nil {
body, err := io.ReadAll(request.Body)
if err != nil {
diff --git a/client/go/internal/vespa/document/dispatcher.go b/client/go/internal/vespa/document/dispatcher.go
index fa4424809cf..fb7a532e332 100644
--- a/client/go/internal/vespa/document/dispatcher.go
+++ b/client/go/internal/vespa/document/dispatcher.go
@@ -67,7 +67,7 @@ func (d *Dispatcher) logResult(doc Document, result Result, retry bool) {
if result.Trace != "" {
d.msgs <- fmt.Sprintf("feed: trace for %s %s:\n%s", doc.Operation, doc.Id, result.Trace)
}
- if !d.verbose && result.Success() {
+ if !d.verbose && (retry || result.Success()) {
return
}
var msg strings.Builder
diff --git a/client/go/internal/vespa/document/http.go b/client/go/internal/vespa/document/http.go
index d6e7745e6b1..a2e399549c4 100644
--- a/client/go/internal/vespa/document/http.go
+++ b/client/go/internal/vespa/document/http.go
@@ -265,18 +265,18 @@ func (c *Client) Send(document Document) Result {
req, buf, err := c.prepare(document)
defer c.buffers.Put(buf)
if err != nil {
- return resultWithErr(result, err)
+ return resultWithErr(result, err, 0)
}
bodySize := len(document.Body)
if buf.Len() > 0 {
bodySize = buf.Len()
}
resp, err := c.leastBusyClient().Do(req, c.clientTimeout())
+ elapsed := c.now().Sub(start)
if err != nil {
- return resultWithErr(result, err)
+ return resultWithErr(result, err, elapsed)
}
defer resp.Body.Close()
- elapsed := c.now().Sub(start)
return c.resultWithResponse(resp, bodySize, result, elapsed, buf, false)
}
@@ -290,20 +290,21 @@ func (c *Client) Get(id Id) Result {
result := Result{Id: id}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
- return resultWithErr(result, err)
+ return resultWithErr(result, err, 0)
}
resp, err := c.leastBusyClient().Do(req, c.clientTimeout())
+ elapsed := c.now().Sub(start)
if err != nil {
- return resultWithErr(result, err)
+ return resultWithErr(result, err, elapsed)
}
defer resp.Body.Close()
- elapsed := c.now().Sub(start)
return c.resultWithResponse(resp, 0, result, elapsed, buf, true)
}
-func resultWithErr(result Result, err error) Result {
+func resultWithErr(result Result, err error, elapsed time.Duration) Result {
result.Status = StatusTransportFailure
result.Err = err
+ result.Latency = elapsed
return result
}
@@ -322,14 +323,14 @@ func (c *Client) resultWithResponse(resp *http.Response, sentBytes int, result R
buf.Reset()
written, err := io.Copy(buf, resp.Body)
if err != nil {
- result = resultWithErr(result, err)
+ result = resultWithErr(result, err, elapsed)
} else {
if result.Success() && c.options.TraceLevel > 0 {
var jsonResponse struct {
Trace json.RawValue `json:"trace"`
}
if err := json.Unmarshal(buf.Bytes(), &jsonResponse); err != nil {
- result = resultWithErr(result, fmt.Errorf("failed to decode json response: %w", err))
+ result = resultWithErr(result, fmt.Errorf("failed to decode json response: %w", err), elapsed)
} else {
result.Trace = string(jsonResponse.Trace)
}
diff --git a/client/go/internal/vespa/document/http_test.go b/client/go/internal/vespa/document/http_test.go
index c797ba5607f..6faa14705f0 100644
--- a/client/go/internal/vespa/document/http_test.go
+++ b/client/go/internal/vespa/document/http_test.go
@@ -74,6 +74,9 @@ func TestClientSend(t *testing.T) {
{Document{Condition: "foo", Id: mustParseId("id:ns:type::doc4"), Operation: OperationUpdate, Body: []byte(`{"fields":{"baz": "789"}}`)},
"PUT",
"https://example.com:1337/document/v1/ns/type/docid/doc4?timeout=5000ms&condition=foo"},
+ {Document{Id: mustParseId("id:ns:type::doc5"), Operation: OperationPut, Body: []byte(`{"fields":{"baz": "789"}}`)},
+ "POST",
+ "https://example.com:1337/document/v1/ns/type/docid/doc5?timeout=5000ms"},
}
httpClient := mock.HTTPClient{ReadBody: true}
client, _ := NewClient(ClientOptions{
@@ -89,53 +92,61 @@ func TestClientSend(t *testing.T) {
Id: doc.Id,
Latency: time.Second,
}
- if i < 3 {
+ switch i {
+ case 0, 1, 2:
msg := `{"message":"All good!"}`
httpClient.NextResponseString(200, msg)
wantRes.Status = StatusSuccess
wantRes.HTTPStatus = 200
wantRes.BytesRecv = 23
- } else {
+ case 3:
errMsg := `something went wront`
httpClient.NextResponseString(502, errMsg)
wantRes.Status = StatusVespaFailure
wantRes.HTTPStatus = 502
wantRes.Body = []byte(errMsg)
wantRes.BytesRecv = 20
+ case 4:
+ transportErr := fmt.Errorf("transport error")
+ httpClient.NextResponseError(transportErr)
+ wantRes.Err = transportErr
+ wantRes.Status = StatusTransportFailure
}
res := client.Send(doc)
- wantRes.BytesSent = int64(len(httpClient.LastBody))
+ if res.Err == nil {
+ wantRes.BytesSent = int64(len(httpClient.LastBody))
+ }
if !reflect.DeepEqual(res, wantRes) {
- t.Fatalf("got result %+v, want %+v", res, wantRes)
+ t.Fatalf("#%d: got result %+v, want %+v", i, res, wantRes)
}
stats.Add(res)
r := httpClient.LastRequest
if r.Method != tt.method {
- t.Errorf("got r.Method = %q, want %q", r.Method, tt.method)
+ t.Errorf("#%d: got r.Method = %q, want %q", i, r.Method, tt.method)
}
var headers http.Header = map[string][]string{
"Content-Type": {"application/json; charset=utf-8"},
}
if !reflect.DeepEqual(r.Header, headers) {
- t.Errorf("got r.Header = %v, want %v", r.Header, headers)
+ t.Errorf("#%d: got r.Header = %v, want %v", i, r.Header, headers)
}
if r.URL.String() != tt.url {
- t.Errorf("got r.URL = %q, want %q", r.URL, tt.url)
+ t.Errorf("#%d: got r.URL = %q, want %q", i, r.URL, tt.url)
}
if !bytes.Equal(httpClient.LastBody, doc.Body) {
- t.Errorf("got r.Body = %q, want %q", string(httpClient.LastBody), doc.Body)
+ t.Errorf("#%d: got r.Body = %q, want %q", i, string(httpClient.LastBody), doc.Body)
}
}
want := Stats{
- Requests: 4,
+ Requests: 5,
Responses: 4,
ResponsesByCode: map[int]int64{
200: 3,
502: 1,
},
- Errors: 0,
+ Errors: 1,
Inflight: 0,
- TotalLatency: 4 * time.Second,
+ TotalLatency: 5 * time.Second,
MinLatency: time.Second,
MaxLatency: time.Second,
BytesSent: 75,
diff --git a/client/go/internal/vespa/document/stats.go b/client/go/internal/vespa/document/stats.go
index 3e647d0f893..e53d787cd01 100644
--- a/client/go/internal/vespa/document/stats.go
+++ b/client/go/internal/vespa/document/stats.go
@@ -86,9 +86,9 @@ func (s *Stats) Add(result Result) {
if s.ResponsesByCode == nil {
s.ResponsesByCode = make(map[int]int64)
}
- responsesByCode := s.ResponsesByCode[result.HTTPStatus]
- s.ResponsesByCode[result.HTTPStatus] = responsesByCode + 1
if result.Err == nil {
+ responsesByCode := s.ResponsesByCode[result.HTTPStatus]
+ s.ResponsesByCode[result.HTTPStatus] = responsesByCode + 1
s.Responses++
} else {
s.Errors++
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index 3e180a63226..ccade9c00cd 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -17,12 +17,24 @@
dependencies:
"@babel/highlight" "^7.18.6"
+"@babel/code-frame@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.5.tgz#234d98e1551960604f1246e6475891a570ad5658"
+ integrity sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==
+ dependencies:
+ "@babel/highlight" "^7.22.5"
+
"@babel/compat-data@^7.22.0":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.3.tgz#cd502a6a0b6e37d7ad72ce7e71a7160a3ae36f7e"
integrity sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==
-"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3", "@babel/core@^7.21.4":
+"@babel/compat-data@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.6.tgz#15606a20341de59ba02cd2fcc5086fcbe73bf544"
+ integrity sha512-29tfsWTq2Ftu7MXmimyC0C5FDZv5DYxOZkh3XD3+QW4V/BYuv/LyEsjj3c0hqedEaDt6DBfDvexMKU8YevdqFg==
+
+"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.1.tgz#5de51c5206f4c6f5533562838337a603c1033cfd"
integrity sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==
@@ -43,6 +55,27 @@
json5 "^2.2.2"
semver "^6.3.0"
+"@babel/core@^7.22.5":
+ version "7.22.8"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.8.tgz#386470abe884302db9c82e8e5e87be9e46c86785"
+ integrity sha512-75+KxFB4CZqYRXjx4NlR4J7yGvKumBuZTmV4NV6v09dVXXkuYVYLT68N6HCzLvfJ+fWCxQsntNzKwwIXL4bHnw==
+ dependencies:
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.22.5"
+ "@babel/generator" "^7.22.7"
+ "@babel/helper-compilation-targets" "^7.22.6"
+ "@babel/helper-module-transforms" "^7.22.5"
+ "@babel/helpers" "^7.22.6"
+ "@babel/parser" "^7.22.7"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.8"
+ "@babel/types" "^7.22.5"
+ "@nicolo-ribaudo/semver-v6" "^6.3.3"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.2"
+
"@babel/generator@^7.22.0", "@babel/generator@^7.22.3", "@babel/generator@^7.7.2":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.3.tgz#0ff675d2edb93d7596c5f6728b52615cfc0df01e"
@@ -53,6 +86,16 @@
"@jridgewell/trace-mapping" "^0.3.17"
jsesc "^2.5.1"
+"@babel/generator@^7.22.7":
+ version "7.22.7"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.7.tgz#a6b8152d5a621893f2c9dacf9a4e286d520633d5"
+ integrity sha512-p+jPjMG+SI8yvIaxGgeW24u7q9+5+TGpZh8/CuB7RhBKd7RCy8FayNEFNNKrNK/eUcY/4ExQqLmyrvBXKsIcwQ==
+ dependencies:
+ "@babel/types" "^7.22.5"
+ "@jridgewell/gen-mapping" "^0.3.2"
+ "@jridgewell/trace-mapping" "^0.3.17"
+ jsesc "^2.5.1"
+
"@babel/helper-compilation-targets@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz#bfcd6b7321ffebe33290d68550e2c9d7eb7c7a58"
@@ -64,11 +107,27 @@
lru-cache "^5.1.1"
semver "^6.3.0"
+"@babel/helper-compilation-targets@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.6.tgz#e30d61abe9480aa5a83232eb31c111be922d2e52"
+ integrity sha512-534sYEqWD9VfUm3IPn2SLcH4Q3P86XL+QvqdC7ZsFrzyyPF3T4XGiVghF6PTYNdWg6pXuoqXxNQAhbYeEInTzA==
+ dependencies:
+ "@babel/compat-data" "^7.22.6"
+ "@babel/helper-validator-option" "^7.22.5"
+ "@nicolo-ribaudo/semver-v6" "^6.3.3"
+ browserslist "^4.21.9"
+ lru-cache "^5.1.1"
+
"@babel/helper-environment-visitor@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz#ac3a56dbada59ed969d712cf527bd8271fe3eba8"
integrity sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==
+"@babel/helper-environment-visitor@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz#f06dd41b7c1f44e1f8da6c4055b41ab3a09a7e98"
+ integrity sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==
+
"@babel/helper-function-name@^7.21.0":
version "7.21.0"
resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz#d552829b10ea9f120969304023cd0645fa00b1b4"
@@ -77,6 +136,14 @@
"@babel/template" "^7.20.7"
"@babel/types" "^7.21.0"
+"@babel/helper-function-name@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz#ede300828905bb15e582c037162f99d5183af1be"
+ integrity sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==
+ dependencies:
+ "@babel/template" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
"@babel/helper-hoist-variables@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678"
@@ -84,6 +151,13 @@
dependencies:
"@babel/types" "^7.18.6"
+"@babel/helper-hoist-variables@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb"
+ integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.21.4":
version "7.21.4"
resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz#ac88b2f76093637489e718a90cec6cf8a9b029af"
@@ -91,6 +165,13 @@
dependencies:
"@babel/types" "^7.21.4"
+"@babel/helper-module-imports@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz#1a8f4c9f4027d23f520bd76b364d44434a72660c"
+ integrity sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-module-transforms@^7.21.5", "@babel/helper-module-transforms@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz#e0cad47fedcf3cae83c11021696376e2d5a50c63"
@@ -105,11 +186,30 @@
"@babel/traverse" "^7.22.1"
"@babel/types" "^7.22.0"
-"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.21.5", "@babel/helper-plugin-utils@^7.8.0":
+"@babel/helper-module-transforms@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz#0f65daa0716961b6e96b164034e737f60a80d2ef"
+ integrity sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.22.5"
+ "@babel/helper-module-imports" "^7.22.5"
+ "@babel/helper-simple-access" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.5"
+ "@babel/helper-validator-identifier" "^7.22.5"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
+"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.21.5", "@babel/helper-plugin-utils@^7.8.0":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz#345f2377d05a720a4e5ecfa39cbf4474a4daed56"
integrity sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==
+"@babel/helper-plugin-utils@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295"
+ integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==
+
"@babel/helper-simple-access@^7.21.5":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz#d697a7971a5c39eac32c7e63c0921c06c8a249ee"
@@ -117,6 +217,13 @@
dependencies:
"@babel/types" "^7.21.5"
+"@babel/helper-simple-access@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz#4938357dc7d782b80ed6dbb03a0fba3d22b1d5de"
+ integrity sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-split-export-declaration@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075"
@@ -124,21 +231,43 @@
dependencies:
"@babel/types" "^7.18.6"
+"@babel/helper-split-export-declaration@^7.22.5", "@babel/helper-split-export-declaration@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c"
+ integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-string-parser@^7.21.5":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz#2b3eea65443c6bdc31c22d037c65f6d323b6b2bd"
integrity sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==
+"@babel/helper-string-parser@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f"
+ integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==
+
"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1":
version "7.19.1"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2"
integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==
+"@babel/helper-validator-identifier@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz#9544ef6a33999343c8740fa51350f30eeaaaf193"
+ integrity sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==
+
"@babel/helper-validator-option@^7.21.0":
version "7.21.0"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz#8224c7e13ace4bafdc4004da2cf064ef42673180"
integrity sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==
+"@babel/helper-validator-option@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz#de52000a15a177413c8234fa3a8af4ee8102d0ac"
+ integrity sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==
+
"@babel/helpers@^7.22.0":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.3.tgz#53b74351da9684ea2f694bf0877998da26dd830e"
@@ -148,6 +277,15 @@
"@babel/traverse" "^7.22.1"
"@babel/types" "^7.22.3"
+"@babel/helpers@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.6.tgz#8e61d3395a4f0c5a8060f309fb008200969b5ecd"
+ integrity sha512-YjDs6y/fVOYFV8hAf1rxd1QvR9wJe1pDBZ2AREKq/SDayfPzgk0PBnVuTCE5X1acEpMMNOVUqoe+OwiZGJ+OaA==
+ dependencies:
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.6"
+ "@babel/types" "^7.22.5"
+
"@babel/highlight@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf"
@@ -157,11 +295,25 @@
chalk "^2.0.0"
js-tokens "^4.0.0"
+"@babel/highlight@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.5.tgz#aa6c05c5407a67ebce408162b7ede789b4d22031"
+ integrity sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.22.5"
+ chalk "^2.0.0"
+ js-tokens "^4.0.0"
+
"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.21.9", "@babel/parser@^7.22.0", "@babel/parser@^7.22.4":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.4.tgz#a770e98fd785c231af9d93f6459d36770993fb32"
integrity sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==
+"@babel/parser@^7.22.5", "@babel/parser@^7.22.7":
+ version "7.22.7"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.7.tgz#df8cf085ce92ddbdbf668a7f186ce848c9036cae"
+ integrity sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q==
+
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d"
@@ -269,19 +421,19 @@
"@babel/helper-plugin-utils" "^7.21.5"
"@babel/helper-simple-access" "^7.21.5"
-"@babel/plugin-transform-react-jsx-self@^7.21.0":
- version "7.21.0"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz#ec98d4a9baafc5a1eb398da4cf94afbb40254a54"
- integrity sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==
+"@babel/plugin-transform-react-jsx-self@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.22.5.tgz#ca2fdc11bc20d4d46de01137318b13d04e481d8e"
+ integrity sha512-nTh2ogNUtxbiSbxaT4Ds6aXnXEipHweN9YRgOX/oNXdf0cCrGn/+2LozFa3lnPV5D90MkjhgckCPBrsoSc1a7g==
dependencies:
- "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/helper-plugin-utils" "^7.22.5"
-"@babel/plugin-transform-react-jsx-source@^7.19.6":
- version "7.19.6"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.19.6.tgz#88578ae8331e5887e8ce28e4c9dc83fb29da0b86"
- integrity sha512-RpAi004QyMNisst/pvSanoRdJ4q+jMCWyk9zdw/CyLB9j8RXEahodR6l2GyttDRyEVWZtbN+TpLiHJ3t34LbsQ==
+"@babel/plugin-transform-react-jsx-source@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.22.5.tgz#49af1615bfdf6ed9d3e9e43e425e0b2b65d15b6c"
+ integrity sha512-yIiRO6yobeEIaI0RTbIr8iAK9FcBHLtZq0S89ZPjDLQXBA4xvghaKqI0etp/tF3htTM0sazJKKLz9oEiGRtu7w==
dependencies:
- "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-plugin-utils" "^7.22.5"
"@babel/runtime@^7.10.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.18.3", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7":
version "7.22.3"
@@ -299,6 +451,15 @@
"@babel/parser" "^7.21.9"
"@babel/types" "^7.21.5"
+"@babel/template@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.5.tgz#0c8c4d944509875849bd0344ff0050756eefc6ec"
+ integrity sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==
+ dependencies:
+ "@babel/code-frame" "^7.22.5"
+ "@babel/parser" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
"@babel/traverse@^7.22.1", "@babel/traverse@^7.7.2":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.4.tgz#c3cf96c5c290bd13b55e29d025274057727664c0"
@@ -315,6 +476,22 @@
debug "^4.1.0"
globals "^11.1.0"
+"@babel/traverse@^7.22.5", "@babel/traverse@^7.22.6", "@babel/traverse@^7.22.8":
+ version "7.22.8"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.8.tgz#4d4451d31bc34efeae01eac222b514a77aa4000e"
+ integrity sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==
+ dependencies:
+ "@babel/code-frame" "^7.22.5"
+ "@babel/generator" "^7.22.7"
+ "@babel/helper-environment-visitor" "^7.22.5"
+ "@babel/helper-function-name" "^7.22.5"
+ "@babel/helper-hoist-variables" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.6"
+ "@babel/parser" "^7.22.7"
+ "@babel/types" "^7.22.5"
+ debug "^4.1.0"
+ globals "^11.1.0"
+
"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.3.3":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.4.tgz#56a2653ae7e7591365dabf20b76295410684c071"
@@ -324,6 +501,15 @@
"@babel/helper-validator-identifier" "^7.19.1"
to-fast-properties "^2.0.0"
+"@babel/types@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.5.tgz#cd93eeaab025880a3a47ec881f4b096a5b786fbe"
+ integrity sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==
+ dependencies:
+ "@babel/helper-string-parser" "^7.22.5"
+ "@babel/helper-validator-identifier" "^7.22.5"
+ to-fast-properties "^2.0.0"
+
"@bcoe/v8-coverage@^0.2.3":
version "0.2.3"
resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39"
@@ -961,6 +1147,11 @@
resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-5.10.5.tgz#ad620d714e545c6efb7f69d94ce46e3fd2fe01fb"
integrity sha512-FGMq4dGs5HhDAtI0z46uzxzKKPmZ3h5uKUyKg1ZHoFR1mBtcUMbB6FylFmHqKFRWlJ5IXqX9dwmiVrLYUOfTmA==
+"@nicolo-ribaudo/semver-v6@^6.3.3":
+ version "6.3.3"
+ resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/semver-v6/-/semver-v6-6.3.3.tgz#ea6d23ade78a325f7a52750aab1526b02b628c29"
+ integrity sha512-3Yc1fUTs69MG/uZbJlLSI3JISMn2UV2rg+1D/vROUqZyh3l6iYHCs7GMp+M40ZD7yOdDbYjJcU1oTJhrc+dGKg==
+
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5"
@@ -1205,13 +1396,13 @@
"@types/yargs-parser" "*"
"@vitejs/plugin-react@^4":
- version "4.0.0"
- resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.0.tgz#46d1c37c507447d10467be1c111595174555ef28"
- integrity sha512-HX0XzMjL3hhOYm+0s95pb0Z7F8O81G7joUHgfDd/9J/ZZf5k4xX6QAMFkKsHFxaHlf6X7GD7+XuaZ66ULiJuhQ==
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.2.tgz#cd25adc113c4c6f504b2e32e28230d399bfba334"
+ integrity sha512-zbnVp3Esfg33zDaoLrjxG+p/dPiOtpvJA+1oOEQwSxMMTRL9zi1eghIcd2WtLjkcKnPsa3S15LzS/OzDn2BOCA==
dependencies:
- "@babel/core" "^7.21.4"
- "@babel/plugin-transform-react-jsx-self" "^7.21.0"
- "@babel/plugin-transform-react-jsx-source" "^7.19.6"
+ "@babel/core" "^7.22.5"
+ "@babel/plugin-transform-react-jsx-self" "^7.22.5"
+ "@babel/plugin-transform-react-jsx-source" "^7.22.5"
react-refresh "^0.14.0"
acorn-jsx@^5.3.2:
@@ -1560,6 +1751,16 @@ browserslist@^4.21.3:
node-releases "^2.0.12"
update-browserslist-db "^1.0.11"
+browserslist@^4.21.9:
+ version "4.21.9"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.9.tgz#e11bdd3c313d7e2a9e87e8b4b0c7872b13897635"
+ integrity sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==
+ dependencies:
+ caniuse-lite "^1.0.30001503"
+ electron-to-chromium "^1.4.431"
+ node-releases "^2.0.12"
+ update-browserslist-db "^1.0.11"
+
bser@2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05"
@@ -1615,6 +1816,11 @@ caniuse-lite@^1.0.30001489:
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001495.tgz#64a0ccef1911a9dcff647115b4430f8eff1ef2d9"
integrity sha512-F6x5IEuigtUfU5ZMQK2jsy5JqUUlEFRVZq8bO2a+ysq5K7jD6PPc9YXZj78xDNS3uNchesp1Jw47YXEqr+Viyg==
+caniuse-lite@^1.0.30001503:
+ version "1.0.30001513"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001513.tgz#382fe5fbfb0f7abbaf8c55ca3ac71a0307a752e9"
+ integrity sha512-pnjGJo7SOOjAGytZZ203Em95MRM8Cr6jhCXNF/FAXTpCTRTECnqQWLpiTRqrFtdYcth8hf4WECUpkezuYsMVww==
+
capture-exit@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4"
@@ -1907,6 +2113,11 @@ electron-to-chromium@^1.4.411:
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.423.tgz#99567f3a0563fe0d1d0931e9ce851bca239f6658"
integrity sha512-y4A7YfQcDGPAeSWM1IuoWzXpg9RY1nwHzHSwRtCSQFp9FgAVDgdWlFf0RbdWfLWQ2WUI+bddUgk5RgTjqRE6FQ==
+electron-to-chromium@^1.4.431:
+ version "1.4.453"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.453.tgz#0a81fdc1943db202e8724d9f61369a71f0dd51e8"
+ integrity sha512-BU8UtQz6CB3T7RIGhId4BjmjJVXQDujb0+amGL8jpcluFJr6lwspBOvkUbnttfpZCm4zFMHmjrX1QrdPWBBMjQ==
+
emittery@^0.13.1:
version "0.13.1"
resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad"
@@ -3774,9 +3985,9 @@ node-int64@^0.4.0:
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
node-releases@^2.0.12:
- version "2.0.12"
- resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.12.tgz#35627cc224a23bfb06fb3380f2b3afaaa7eb1039"
- integrity sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==
+ version "2.0.13"
+ resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d"
+ integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==
normalize-path@^2.1.1:
version "2.1.1"
diff --git a/client/src/main/java/ai/vespa/client/dsl/Field.java b/client/src/main/java/ai/vespa/client/dsl/Field.java
index 6d199ead2b8..59459899189 100644
--- a/client/src/main/java/ai/vespa/client/dsl/Field.java
+++ b/client/src/main/java/ai/vespa/client/dsl/Field.java
@@ -571,6 +571,29 @@ public class Field extends QueryChain {
return common("nearestNeighbor", annotation, (Object) rankFeature);
}
+ /**
+ * Fuzzy query.
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#fuzzy
+ *
+ * @param text the text to match fuzzily
+ * @return the query
+ */
+ public Query fuzzy(String text) {
+ return common("fuzzy", annotation, text);
+ }
+
+ /**
+ * Fuzzy query.
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#fuzzy
+ *
+ * @param annotation the annotation
+ * @param text the text to match fuzzily
+ * @return the query
+ */
+ public Query fuzzy(Annotation annotation, String text) {
+ return common("fuzzy", annotation, text);
+ }
+
private Query common(String relation, Annotation annotation, Object value) {
return common(relation, annotation, value, values.toArray());
}
@@ -629,6 +652,8 @@ public class Field extends QueryChain {
return hasAnnotation
? Text.format("([%s]nearestNeighbor(%s, %s))", annotation, fieldName, valuesStr)
: Text.format("nearestNeighbor(%s, %s)", fieldName, valuesStr);
+ case "fuzzy":
+ return Text.format("%s contains (%sfuzzy(%s))", fieldName, annotation, values.get(0));
default:
Object value = values.get(0);
valuesStr = value instanceof Long ? value + "L" : value.toString();
diff --git a/client/src/main/java/ai/vespa/client/dsl/Q.java b/client/src/main/java/ai/vespa/client/dsl/Q.java
index 2bb998cd3e5..e4cfd4aa1ef 100644
--- a/client/src/main/java/ai/vespa/client/dsl/Q.java
+++ b/client/src/main/java/ai/vespa/client/dsl/Q.java
@@ -22,7 +22,7 @@ public final class Q {
throw new RuntimeException(e);
}
}
- private static Sources SELECT_ALL_FROM_SOURCES_ALL = new Sources(new Select("*"), "*");
+ private static final Sources SELECT_ALL_FROM_SOURCES_ALL = new Sources(new Select("*"), "*");
public static Select select(String fieldName) { return new Select(fieldName);
}
diff --git a/client/src/test/java/ai/vespa/client/dsl/QTest.java b/client/src/test/java/ai/vespa/client/dsl/QTest.java
index aae8b2c8923..c242349873c 100644
--- a/client/src/test/java/ai/vespa/client/dsl/QTest.java
+++ b/client/src/test/java/ai/vespa/client/dsl/QTest.java
@@ -485,6 +485,18 @@ class QTest {
}
@Test
+ void fuzzy() {
+ String q = Q.p("f1").fuzzy("text to match").build();
+ assertEquals("yql=select * from sources * where f1 contains (fuzzy(\"text to match\"))", q);
+ }
+
+ @Test
+ void fuzzy_with_annotation() {
+ String q = Q.p("f1").fuzzy(A.a("maxEditDistance", 3).append(A.a("prefixLength", 10)), "text to match").build();
+ assertEquals("yql=select * from sources * where f1 contains ({\"prefixLength\":10,\"maxEditDistance\":3}fuzzy(\"text to match\"))", q);
+ }
+
+ @Test
void use_contains_instead_of_contains_equiv_when_input_size_is_1() {
String q = Q.p("f1").containsEquiv(Collections.singletonList("p1"))
.build();
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 85805da76a5..78103f4beb5 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -24,7 +24,7 @@
<bouncycastle.version>1.74</bouncycastle.version>
<commons-codec.version>1.15</commons-codec.version>
- <felix.version>7.0.1</felix.version>
+ <felix.version>7.0.5</felix.version>
<httpclient5.version>5.2.1</httpclient5.version>
<httpcore5.version>5.2.1</httpcore5.version>
<httpclient.version>4.5.14</httpclient.version>
@@ -58,7 +58,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
- <version>3.0.0</version>
+ <version>3.3.0</version>
<dependencies>
<dependency>
<groupId>com.yahoo.vespa</groupId>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
index 2e8e2707166..19c4e4b1e89 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ContentCluster.java
@@ -17,6 +17,7 @@ import java.util.Objects;
import java.util.TreeMap;
import static com.yahoo.vdslib.state.NodeState.ORCHESTRATOR_RESERVED_DESCRIPTION;
+import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result;
public class ContentCluster {
@@ -129,7 +130,7 @@ public class ContentCluster {
* @param newState state wanted to be set
* @param inMoratorium whether the CC is in moratorium
*/
- public NodeStateChangeChecker.Result calculateEffectOfNewState(
+ public Result calculateEffectOfNewState(
Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition,
NodeState oldState, NodeState newState, boolean inMoratorium) {
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
index 069139b8c9e..04ad5899cd2 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
@@ -216,10 +216,6 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
return RPCCommunicator.SET_DISTRIBUTION_STATES_RPC_VERSION;
}
- public String getSlobrokAddress() {
- return "storage/cluster." + cluster.getName() + "/" + node.getType() + "/" + node.getIndex();
- }
-
public void markRpcAddressOutdated(Timer timer) {
lastSeenInSlobrok = timer.getCurrentTimeInMillis();
}
@@ -237,6 +233,10 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
return node.getType().equals(NodeType.STORAGE);
}
+ public String type() {
+ return isDistributor() ? "distributor" : "storage node";
+ }
+
public int getNodeIndex() {
return node.getIndex();
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
index 54240330de3..c9f5cfeb9c8 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
@@ -11,7 +11,6 @@ import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
-import com.yahoo.vespa.clustercontroller.core.hostinfo.Metrics;
import com.yahoo.vespa.clustercontroller.core.hostinfo.StorageNode;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest;
import java.util.ArrayList;
@@ -30,9 +29,9 @@ import static com.yahoo.vdslib.state.State.DOWN;
import static com.yahoo.vdslib.state.State.MAINTENANCE;
import static com.yahoo.vdslib.state.State.RETIRED;
import static com.yahoo.vdslib.state.State.UP;
-import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.allowSettingOfWantedState;
-import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.createAlreadySet;
-import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.createDisallowed;
+import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.allow;
+import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.alreadySet;
+import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result.disallow;
import static com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest.Condition.FORCE;
import static com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest.Condition.SAFE;
import static java.util.logging.Level.FINE;
@@ -64,214 +63,150 @@ public class NodeStateChangeChecker {
throw new IllegalArgumentException("Cannot have both 1 group and maxNumberOfGroupsAllowedToBeDown > 1");
}
- public static class Result {
-
- public enum Action {
- MUST_SET_WANTED_STATE,
- ALREADY_SET,
- DISALLOWED
- }
-
- private final Action action;
- private final String reason;
-
- private Result(Action action, String reason) {
- this.action = action;
- this.reason = reason;
- }
-
- public static Result createDisallowed(String reason) {
- return new Result(Action.DISALLOWED, reason);
- }
-
- public static Result allowSettingOfWantedState() {
- return new Result(Action.MUST_SET_WANTED_STATE, "Preconditions fulfilled and new state different");
- }
-
- public static Result createAlreadySet() {
- return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
- }
-
- public boolean settingWantedStateIsAllowed() { return action == Action.MUST_SET_WANTED_STATE; }
-
- public boolean settingWantedStateIsNotAllowed() { return ! settingWantedStateIsAllowed(); }
-
- public boolean wantedStateAlreadySet() {
- return action == Action.ALREADY_SET;
- }
-
- public String getReason() {
- return reason;
- }
-
- public String toString() {
- return "action " + action + ": " + reason;
- }
- }
-
public Result evaluateTransition(Node node, ClusterState clusterState, SetUnitStateRequest.Condition condition,
NodeState oldWantedState, NodeState newWantedState) {
- if (condition == FORCE) {
- return allowSettingOfWantedState();
- }
+ if (condition == FORCE)
+ return allow();
- if (inMoratorium) {
- return createDisallowed("Master cluster controller is bootstrapping and in moratorium");
- }
+ if (inMoratorium)
+ return disallow("Master cluster controller is bootstrapping and in moratorium");
- if (condition != SAFE) {
- return createDisallowed("Condition not implemented: " + condition.name());
- }
+ if (condition != SAFE)
+ return disallow("Condition not implemented: " + condition.name());
- if (node.getType() != STORAGE) {
- return createDisallowed("Safe-set of node state is only supported for storage nodes! " +
- "Requested node type: " + node.getType().toString());
- }
+ if (node.getType() != STORAGE)
+ return disallow("Safe-set of node state is only supported for storage nodes! " +
+ "Requested node type: " + node.getType().toString());
StorageNodeInfo nodeInfo = clusterInfo.getStorageNodeInfo(node.getIndex());
- if (nodeInfo == null) {
- return createDisallowed("Unknown node " + node);
- }
+ if (nodeInfo == null)
+ return disallow("Unknown node " + node);
- // If the new state and description equals the existing, we're done. This is done for 2 cases:
- // - We can short-circuit setting of a new wanted state, which e.g. hits ZooKeeper.
- // - We ensure that clients that have previously set the wanted state, continue
- // to see the same conclusion, even though they possibly would have been denied
- // MUST_SET_WANTED_STATE if re-evaluated. This is important for implementing idempotent clients.
- if (newWantedState.getState().equals(oldWantedState.getState()) &&
- Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription())) {
- return createAlreadySet();
- }
+ if (noChanges(oldWantedState, newWantedState))
+ return alreadySet();
return switch (newWantedState.getState()) {
case UP -> canSetStateUp(nodeInfo, oldWantedState);
case MAINTENANCE -> canSetStateMaintenanceTemporarily(nodeInfo, clusterState, newWantedState.getDescription());
case DOWN -> canSetStateDownPermanently(nodeInfo, clusterState, newWantedState.getDescription());
- default -> createDisallowed("Destination node state unsupported in safe mode: " + newWantedState);
+ default -> disallow("Destination node state unsupported in safe mode: " + newWantedState);
};
}
+ private static boolean noChanges(NodeState oldWantedState, NodeState newWantedState) {
+ // If the new state and description equals the existing, we're done. This is done for 2 cases:
+ // - We can short-circuit setting of a new wanted state, which e.g. hits ZooKeeper.
+ // - We ensure that clients that have previously set the wanted state, continue
+ // to see the same conclusion, even though they possibly would have been
+ // DISALLOWED if re-evaluated. This is important for implementing idempotent clients.
+ return newWantedState.getState().equals(oldWantedState.getState())
+ && Objects.equals(newWantedState.getDescription(), oldWantedState.getDescription());
+ }
+
private Result canSetStateDownPermanently(NodeInfo nodeInfo, ClusterState clusterState, String newDescription) {
- NodeState oldWantedState = nodeInfo.getUserWantedState();
- if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) {
- // Refuse to override whatever an operator or unknown entity is doing.
- //
- // Note: The new state&description is NOT equal to the old state&description:
- // that would have been short-circuited prior to this.
- return createDisallowed("A conflicting wanted state is already set: " +
- oldWantedState.getState() + ": " + oldWantedState.getDescription());
- }
+ var result = checkIfStateSetWithDifferentDescription(nodeInfo, newDescription);
+ if (result.notAllowed())
+ return result;
State reportedState = nodeInfo.getReportedState().getState();
- if (reportedState != UP) {
- return createDisallowed("Reported state (" + reportedState
- + ") is not UP, so no bucket data is available");
- }
+ if (reportedState != UP)
+ return disallow("Reported state (" + reportedState + ") is not UP, so no bucket data is available");
State currentState = clusterState.getNodeState(nodeInfo.getNode()).getState();
- if (currentState != RETIRED) {
- return createDisallowed("Only retired nodes are allowed to be set to DOWN in safe mode - is "
- + currentState);
- }
+ if (currentState != RETIRED)
+ return disallow("Only retired nodes are allowed to be set to DOWN in safe mode - is " + currentState);
HostInfo hostInfo = nodeInfo.getHostInfo();
Integer hostInfoNodeVersion = hostInfo.getClusterStateVersionOrNull();
int clusterControllerVersion = clusterState.getVersion();
- if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion) {
- return createDisallowed("Cluster controller at version " + clusterControllerVersion
- + " got info for storage node " + nodeInfo.getNodeIndex() + " at a different version "
- + hostInfoNodeVersion);
- }
+ int nodeIndex = nodeInfo.getNodeIndex();
+ if (hostInfoNodeVersion == null || hostInfoNodeVersion != clusterControllerVersion)
+ return disallow("Cluster controller at version " + clusterControllerVersion +
+ " got info for storage node " + nodeIndex + " at a different version " +
+ hostInfoNodeVersion);
- Optional<Metrics.Value> bucketsMetric;
- bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS);
- if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null) {
- return createDisallowed("Missing last value of the " + BUCKETS_METRIC_NAME +
- " metric for storage node " + nodeInfo.getNodeIndex());
- }
+ var bucketsMetric = hostInfo.getMetrics().getValueAt(BUCKETS_METRIC_NAME, BUCKETS_METRIC_DIMENSIONS);
+ if (bucketsMetric.isEmpty() || bucketsMetric.get().getLast() == null)
+ return disallow("Missing last value of the " + BUCKETS_METRIC_NAME + " metric for storage node " + nodeIndex);
long lastBuckets = bucketsMetric.get().getLast();
- if (lastBuckets > 0) {
- return createDisallowed("The storage node manages " + lastBuckets + " buckets");
- }
+ if (lastBuckets > 0)
+ return disallow("The storage node manages " + lastBuckets + " buckets");
- return allowSettingOfWantedState();
+ return allow();
}
private Result canSetStateUp(NodeInfo nodeInfo, NodeState oldWantedState) {
- if (oldWantedState.getState() == UP) {
- // The description is not significant when wanting to set the state to UP
- return createAlreadySet();
- }
+ if (oldWantedState.getState() == UP)
+ return alreadySet(); // The description is not significant when wanting to set the state to UP
- if (nodeInfo.getReportedState().getState() != UP) {
- return createDisallowed("Refuse to set wanted state to UP, " +
- "since the reported state is not UP (" +
- nodeInfo.getReportedState().getState() + ")");
- }
+ State reportedState = nodeInfo.getReportedState().getState();
+ if (reportedState != UP)
+ return disallow("Refuse to set wanted state to UP, since the reported state is not UP (" + reportedState + ")");
- return allowSettingOfWantedState();
+ return allow();
}
private Result canSetStateMaintenanceTemporarily(StorageNodeInfo nodeInfo, ClusterState clusterState,
String newDescription) {
- NodeState oldWantedState = nodeInfo.getUserWantedState();
- if (oldWantedState.getState() != UP && !oldWantedState.getDescription().equals(newDescription)) {
- // Refuse to override whatever an operator or unknown entity is doing. If the description is
- // identical, we assume it is the same operator.
- //
- // Note: The new state&description is NOT equal to the old state&description:
- // that would have been short-circuited prior to this.
- return createDisallowed("A conflicting wanted state is already set: " +
- oldWantedState.getState() + ": " + oldWantedState.getDescription());
- }
+ var result = checkIfStateSetWithDifferentDescription(nodeInfo, newDescription);
+ if (result.notAllowed())
+ return result;
if (maxNumberOfGroupsAllowedToBeDown == -1) {
- var otherGroupCheck = anotherNodeInAnotherGroupHasWantedState(nodeInfo);
- if (otherGroupCheck.settingWantedStateIsNotAllowed()) {
- return otherGroupCheck;
- }
- if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription)) {
- return allowSettingOfWantedState();
- }
+ result = checkIfAnotherNodeInAnotherGroupHasWantedState(nodeInfo);
+ if (result.notAllowed())
+ return result;
+ if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription))
+ return allow();
} else {
- var result = otherNodesHaveWantedState(nodeInfo, newDescription, clusterState);
- if (result.isPresent())
- return result.get();
+ var optionalResult = checkIfOtherNodesHaveWantedState(nodeInfo, newDescription, clusterState);
+ if (optionalResult.isPresent())
+ return optionalResult.get();
}
- if (clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN) {
+ if (nodeIsDown(clusterState, nodeInfo)) {
log.log(FINE, "node is DOWN, allow");
- return allowSettingOfWantedState();
+ return allow();
}
- Result allNodesAreUpCheck = checkAllNodesAreUp(clusterState);
- if (allNodesAreUpCheck.settingWantedStateIsNotAllowed()) {
- log.log(FINE, "allNodesAreUpCheck: " + allNodesAreUpCheck);
- return allNodesAreUpCheck;
+ result = checkIfNodesAreUpOrRetired(clusterState);
+ if (result.notAllowed()) {
+ log.log(FINE, "nodesAreUpOrRetired: " + result);
+ return result;
}
- Result checkDistributorsResult = checkDistributors(nodeInfo.getNode(), clusterState.getVersion());
- if (checkDistributorsResult.settingWantedStateIsNotAllowed()) {
- log.log(FINE, "checkDistributors: "+ checkDistributorsResult);
- return checkDistributorsResult;
+ result = checkClusterStateAndRedundancy(nodeInfo.getNode(), clusterState.getVersion());
+ if (result.notAllowed()) {
+ log.log(FINE, "checkDistributors: "+ result);
+ return result;
}
- return allowSettingOfWantedState();
+ return allow();
+ }
+
+ /** Refuse to override whatever an operator or unknown entity is doing. */
+ private static Result checkIfStateSetWithDifferentDescription(NodeInfo nodeInfo, String newDescription) {
+ State oldWantedState = nodeInfo.getUserWantedState().getState();
+ String oldDescription = nodeInfo.getUserWantedState().getDescription();
+ if (oldWantedState != UP && ! oldDescription.equals(newDescription))
+ return disallow("A conflicting wanted state is already set: " + oldWantedState + ": " + oldDescription);
+
+ return allow();
}
/**
* Returns a disallow-result if there is another node (in another group, if hierarchical)
* that has a wanted state != UP. We disallow more than 1 suspended node/group at a time.
*/
- private Result anotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) {
+ private Result checkIfAnotherNodeInAnotherGroupHasWantedState(StorageNodeInfo nodeInfo) {
if (groupVisiting.isHierarchical()) {
SettableOptional<Result> anotherNodeHasWantedState = new SettableOptional<>();
groupVisiting.visit(group -> {
if (!groupContainsNode(group, nodeInfo.getNode())) {
Result result = otherNodeInGroupHasWantedState(group);
- if (result.settingWantedStateIsNotAllowed()) {
+ if (result.notAllowed()) {
anotherNodeHasWantedState.set(result);
// Have found a node that is suspended, halt the visiting
return false;
@@ -281,7 +216,7 @@ public class NodeStateChangeChecker {
return true;
});
- return anotherNodeHasWantedState.asOptional().orElseGet(Result::allowSettingOfWantedState);
+ return anotherNodeHasWantedState.asOptional().orElseGet(Result::allow);
} else {
// Returns a disallow-result if there is another node with a wanted state
return otherNodeHasWantedState(nodeInfo);
@@ -296,7 +231,7 @@ public class NodeStateChangeChecker {
* if less than maxNumberOfGroupsAllowedToBeDown: return Optional.of(allowed)
* else: if node is in group with nodes already down: return Optional.of(allowed), else Optional.of(disallowed)
*/
- private Optional<Result> otherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) {
+ private Optional<Result> checkIfOtherNodesHaveWantedState(StorageNodeInfo nodeInfo, String newDescription, ClusterState clusterState) {
Node node = nodeInfo.getNode();
if (groupVisiting.isHierarchical()) {
@@ -309,12 +244,12 @@ public class NodeStateChangeChecker {
Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription);
if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) {
log.log(FINE, "Node is in group with same state and description, allow");
- return Optional.of(allowSettingOfWantedState());
+ return Optional.of(allow());
}
// There are groups with nodes not up, but with another description, probably operator set
if (groupsWithSameStateAndDescription.size() == 0) {
- return Optional.of(createDisallowed("Wanted state already set for another node in groups: " +
- sortSetIntoList(groupsWithNodesWantedStateNotUp)));
+ return Optional.of(disallow("Wanted state already set for another node in groups: " +
+ sortSetIntoList(groupsWithNodesWantedStateNotUp)));
}
Set<Integer> retiredAndNotUpGroups = groupsWithNotRetiredAndNotUp(clusterState);
@@ -326,21 +261,25 @@ public class NodeStateChangeChecker {
}
if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) {
log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups);
- return Optional.of(allowSettingOfWantedState());
+ return Optional.of(allow());
}
- return Optional.of(createDisallowed(String.format("At most %d groups can have wanted state: %s",
- maxNumberOfGroupsAllowedToBeDown,
- sortSetIntoList(retiredAndNotUpGroups))));
+ return Optional.of(disallow(String.format("At most %d groups can have wanted state: %s",
+ maxNumberOfGroupsAllowedToBeDown,
+ sortSetIntoList(retiredAndNotUpGroups))));
} else {
// Return a disallow-result if there is another node with a wanted state
var otherNodeHasWantedState = otherNodeHasWantedState(nodeInfo);
- if (otherNodeHasWantedState.settingWantedStateIsNotAllowed())
+ if (otherNodeHasWantedState.notAllowed())
return Optional.of(otherNodeHasWantedState);
}
return Optional.empty();
}
+ private static boolean nodeIsDown(ClusterState clusterState, NodeInfo nodeInfo) {
+ return clusterState.getNodeState(nodeInfo.getNode()).getState() == DOWN;
+ }
+
private ArrayList<Integer> sortSetIntoList(Set<Integer> set) {
var sortedList = new ArrayList<>(set);
Collections.sort(sortedList);
@@ -354,55 +293,46 @@ public class NodeStateChangeChecker {
StorageNodeInfo storageNodeInfo = clusterInfo.getStorageNodeInfo(index);
if (storageNodeInfo == null) continue; // needed for tests only
State storageNodeWantedState = storageNodeInfo.getUserWantedState().getState();
- if (storageNodeWantedState != UP) {
- return createDisallowed(
- "At most one group can have wanted state: Other storage node " + index +
- " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState);
- }
+ if (storageNodeWantedState != UP)
+ return disallow("At most one group can have wanted state: Other storage node " + index +
+ " in group " + group.getIndex() + " has wanted state " + storageNodeWantedState);
State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState();
- if (distributorWantedState != UP) {
- return createDisallowed(
- "At most one group can have wanted state: Other distributor " + index +
- " in group " + group.getIndex() + " has wanted state " + distributorWantedState);
- }
+ if (distributorWantedState != UP)
+ return disallow("At most one group can have wanted state: Other distributor " + index +
+ " in group " + group.getIndex() + " has wanted state " + distributorWantedState);
}
- return allowSettingOfWantedState();
+ return allow();
}
private Result otherNodeHasWantedState(StorageNodeInfo nodeInfo) {
for (var configuredNode : clusterInfo.getConfiguredNodes().values()) {
int index = configuredNode.index();
- if (index == nodeInfo.getNodeIndex()) {
- continue;
- }
+ if (index == nodeInfo.getNodeIndex()) continue;
State storageNodeWantedState = clusterInfo.getStorageNodeInfo(index).getUserWantedState().getState();
if (storageNodeWantedState != UP) {
- return createDisallowed(
- "At most one node can have a wanted state when #groups = 1: Other storage node " +
+ return disallow("At most one node can have a wanted state when #groups = 1: Other storage node " +
index + " has wanted state " + storageNodeWantedState);
}
State distributorWantedState = clusterInfo.getDistributorNodeInfo(index).getUserWantedState().getState();
if (distributorWantedState != UP) {
- return createDisallowed(
- "At most one node can have a wanted state when #groups = 1: Other distributor " +
+ return disallow("At most one node can have a wanted state when #groups = 1: Other distributor " +
index + " has wanted state " + distributorWantedState);
}
}
- return allowSettingOfWantedState();
+ return allow();
}
private boolean anotherNodeInGroupAlreadyAllowed(StorageNodeInfo nodeInfo, String newDescription) {
MutableBoolean alreadyAllowed = new MutableBoolean(false);
groupVisiting.visit(group -> {
- if (!groupContainsNode(group, nodeInfo.getNode())) {
+ if (!groupContainsNode(group, nodeInfo.getNode()))
return true;
- }
alreadyAllowed.set(anotherNodeInGroupAlreadyAllowed(group, nodeInfo.getNode(), newDescription));
@@ -425,9 +355,8 @@ public class NodeStateChangeChecker {
private static boolean groupContainsNode(Group group, Node node) {
for (ConfiguredNode configuredNode : group.getNodes()) {
- if (configuredNode.index() == node.getIndex()) {
+ if (configuredNode.index() == node.getIndex())
return true;
- }
}
return false;
@@ -449,61 +378,42 @@ public class NodeStateChangeChecker {
.collect(Collectors.toList());
}
- private Result checkAllNodesAreUp(ClusterState clusterState) {
- // This method verifies both storage nodes and distributors are up (or retired).
- // The complicated part is making a summary error message.
-
- for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) {
- State wantedState = storageNodeInfo.getUserWantedState().getState();
- if (wantedState != UP && wantedState != RETIRED) {
- return createDisallowed("Another storage node wants state " +
- wantedState.toString().toUpperCase() + ": " + storageNodeInfo.getNodeIndex());
- }
-
- State state = clusterState.getNodeState(storageNodeInfo.getNode()).getState();
- if (state != UP && state != RETIRED) {
- return createDisallowed("Another storage node has state " + state.toString().toUpperCase() +
- ": " + storageNodeInfo.getNodeIndex());
- }
- }
-
- for (NodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) {
- State wantedState = distributorNodeInfo.getUserWantedState().getState();
- if (wantedState != UP && wantedState != RETIRED) {
- return createDisallowed("Another distributor wants state " + wantedState.toString().toUpperCase() +
- ": " + distributorNodeInfo.getNodeIndex());
- }
+ /** Verifies that storage nodes and distributors are up (or retired). */
+ private Result checkIfNodesAreUpOrRetired(ClusterState clusterState) {
+ for (NodeInfo nodeInfo : clusterInfo.getAllNodeInfos()) {
+ State wantedState = nodeInfo.getUserWantedState().getState();
+ if (wantedState != UP && wantedState != RETIRED)
+ return disallow("Another " + nodeInfo.type() + " wants state " +
+ wantedState.toString().toUpperCase() + ": " + nodeInfo.getNodeIndex());
- State state = clusterState.getNodeState(distributorNodeInfo.getNode()).getState();
- if (state != UP && state != RETIRED) {
- return createDisallowed("Another distributor has state " + state.toString().toUpperCase() +
- ": " + distributorNodeInfo.getNodeIndex());
- }
+ State state = clusterState.getNodeState(nodeInfo.getNode()).getState();
+ if (state != UP && state != RETIRED)
+ return disallow("Another " + nodeInfo.type() + " has state " +
+ state.toString().toUpperCase() + ": " + nodeInfo.getNodeIndex());
}
- return allowSettingOfWantedState();
+ return allow();
}
- private Result checkStorageNodesForDistributor(DistributorNodeInfo distributorNodeInfo, Node node) {
+ private Result checkRedundancy(DistributorNodeInfo distributorNodeInfo, Node node) {
List<StorageNode> storageNodes = distributorNodeInfo.getHostInfo().getDistributor().getStorageNodes();
for (StorageNode storageNode : storageNodes) {
if (storageNode.getIndex() == node.getIndex()) {
Integer minReplication = storageNode.getMinCurrentReplicationFactorOrNull();
// Why test on != null? Missing min-replication is OK (indicate empty/few buckets on system).
if (minReplication != null && minReplication < requiredRedundancy) {
- return createDisallowed("Distributor "
- + distributorNodeInfo.getNodeIndex()
+ return disallow("Distributor " + distributorNodeInfo.getNodeIndex()
+ " says storage node " + node.getIndex()
+ " has buckets with redundancy as low as "
+ storageNode.getMinCurrentReplicationFactorOrNull()
+ ", but we require at least " + requiredRedundancy);
} else {
- return allowSettingOfWantedState();
+ return allow();
}
}
}
- return allowSettingOfWantedState();
+ return allow();
}
/**
@@ -511,29 +421,29 @@ public class NodeStateChangeChecker {
* @param node the node to be checked
* @param clusterStateVersion the cluster state we expect distributors to have
*/
- private Result checkDistributors(Node node, int clusterStateVersion) {
- if (clusterInfo.getDistributorNodeInfos().isEmpty()) {
- return createDisallowed("Not aware of any distributors, probably not safe to upgrade?");
- }
+ private Result checkClusterStateAndRedundancy(Node node, int clusterStateVersion) {
+ if (clusterInfo.getDistributorNodeInfos().isEmpty())
+ return disallow("Not aware of any distributors, probably not safe to upgrade?");
+
for (DistributorNodeInfo distributorNodeInfo : clusterInfo.getDistributorNodeInfos()) {
Integer distributorClusterStateVersion = distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull();
- if (distributorClusterStateVersion == null) {
- return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex()
- + " has not reported any cluster state version yet.");
- } else if (distributorClusterStateVersion != clusterStateVersion) {
- return createDisallowed("Distributor node " + distributorNodeInfo.getNodeIndex()
- + " does not report same version ("
- + distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull()
- + ") as fleetcontroller (" + clusterStateVersion + ")");
+ if (distributorClusterStateVersion == null)
+ return disallow("Distributor node " + distributorNodeInfo.getNodeIndex() +
+ " has not reported any cluster state version yet.");
+ if (distributorClusterStateVersion != clusterStateVersion) {
+ return disallow("Distributor node " + distributorNodeInfo.getNodeIndex() +
+ " does not report same version (" +
+ distributorNodeInfo.getHostInfo().getClusterStateVersionOrNull() +
+ ") as fleetcontroller (" + clusterStateVersion + ")");
}
- Result storageNodesResult = checkStorageNodesForDistributor(distributorNodeInfo, node);
- if (storageNodesResult.settingWantedStateIsNotAllowed()) {
+ Result storageNodesResult = checkRedundancy(distributorNodeInfo, node);
+ if (storageNodesResult.notAllowed()) {
return storageNodesResult;
}
}
- return allowSettingOfWantedState();
+ return allow();
}
private Set<Integer> groupsWithUserWantedStateNotUp() {
@@ -575,4 +485,50 @@ public class NodeStateChangeChecker {
.collect(Collectors.toSet());
}
+ public static class Result {
+
+ public enum Action {
+ ALLOWED,
+ ALREADY_SET,
+ DISALLOWED
+ }
+
+ private final Action action;
+ private final String reason;
+
+ private Result(Action action, String reason) {
+ this.action = action;
+ this.reason = reason;
+ }
+
+ public static Result disallow(String reason) {
+ return new Result(Action.DISALLOWED, reason);
+ }
+
+ public static Result allow() {
+ return new Result(Action.ALLOWED, "Preconditions fulfilled and new state different");
+ }
+
+ public static Result alreadySet() {
+ return new Result(Action.ALREADY_SET, "Basic preconditions fulfilled and new state is already effective");
+ }
+
+ public boolean allowed() { return action == Action.ALLOWED; }
+
+ public boolean notAllowed() { return ! allowed(); }
+
+ public boolean isAlreadySet() {
+ return action == Action.ALREADY_SET;
+ }
+
+ public String reason() {
+ return reason;
+ }
+
+ public String toString() {
+ return "action " + action + ": " + reason;
+ }
+
+ }
+
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequest.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequest.java
index 01a75034ddf..bfbe0f795fc 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequest.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequest.java
@@ -9,7 +9,6 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.ContentCluster;
import com.yahoo.vespa.clustercontroller.core.NodeInfo;
-import com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker;
import com.yahoo.vespa.clustercontroller.core.RemoteClusterControllerTask;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeListener;
import com.yahoo.vespa.clustercontroller.core.restapiv2.Id;
@@ -145,7 +144,7 @@ public class SetNodeStateRequest extends Request<SetResponse> {
probe);
// If the state was successfully set, just return an "ok" message back.
- String reason = success ? "ok" : result.getReason();
+ String reason = success ? "ok" : result.reason();
return new SetResponse(reason, success);
}
@@ -154,19 +153,19 @@ public class SetNodeStateRequest extends Request<SetResponse> {
* wanted state, or the requested state has been accepted as the new wanted state.
*/
private static boolean setWantedStateAccordingToResult(
- NodeStateChangeChecker.Result result,
+ Result result,
NodeState newWantedState,
SetUnitStateRequest.Condition condition,
NodeInfo nodeInfo,
ContentCluster cluster,
NodeListener stateListener,
boolean probe) {
- if (result.settingWantedStateIsAllowed()) {
+ if (result.allowed()) {
setNewWantedState(nodeInfo, newWantedState, stateListener, probe);
}
// True if the wanted state was or has just been set to newWantedState
- boolean success = result.settingWantedStateIsAllowed() || result.wantedStateAlreadySet();
+ boolean success = result.allowed() || result.isAlreadySet();
if (success && condition == SetUnitStateRequest.Condition.SAFE && nodeInfo.isStorage()) {
// In safe-mode, setting the storage node must be accompanied by changing the state
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
index 43687d51937..7b20fcf694a 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeCheckerTest.java
@@ -115,8 +115,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), FORCE,
UP_NODE_STATE, newState);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -127,9 +127,9 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Master cluster controller is bootstrapping and in moratorium", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Master cluster controller is bootstrapping and in moratorium", result.reason());
}
@ParameterizedTest
@@ -140,9 +140,9 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 10), defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Unknown node storage.10", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Unknown node storage.10", result.reason());
}
@ParameterizedTest
@@ -159,10 +159,10 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 1), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
assertEquals("At most one node can have a wanted state when #groups = 1: Other storage node 0 has wanted state Maintenance",
- result.getReason());
+ result.reason());
}
@Test
@@ -195,9 +195,9 @@ public class NodeStateChangeCheckerTest {
cluster.clusterInfo().getStorageNodeInfo(nodeIndex).setReportedState(new NodeState(STORAGE, DOWN), 0);
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("At most 2 groups can have wanted state: [0, 1, 2]", result.getReason());
+ assertFalse(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
+ assertEquals("At most 2 groups can have wanted state: [0, 1, 2]", result.reason());
}
// Nodes in group 0 and 1 in maintenance, try to set storage node in group 2 to maintenance, should fail
@@ -206,9 +206,9 @@ public class NodeStateChangeCheckerTest {
int nodeIndex = 2;
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason());
+ assertFalse(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
+ assertEquals("At most 2 groups can have wanted state: [0, 1]", result.reason());
}
}
@@ -251,9 +251,9 @@ public class NodeStateChangeCheckerTest {
int nodeIndex = 4;
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason());
+ assertFalse(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
+ assertEquals("At most 2 groups can have wanted state: [0, 1]", result.reason());
}
// 2 nodes in group 0 and 1 in group 1 in maintenance, try to set storage node 3 in group 1 to maintenance
@@ -270,9 +270,9 @@ public class NodeStateChangeCheckerTest {
int nodeIndex = 4;
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("At most 2 groups can have wanted state: [0, 1]", result.getReason());
+ assertFalse(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
+ assertEquals("At most 2 groups can have wanted state: [0, 1]", result.reason());
}
// 2 nodes in group 0 up again but buckets not in sync and 2 nodes in group 1 in maintenance,
@@ -301,8 +301,8 @@ public class NodeStateChangeCheckerTest {
int nodeIndex = 2;
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
}
}
@@ -321,10 +321,10 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 1), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
assertEquals("At most one node can have a wanted state when #groups = 1: Other distributor 0 has wanted state Down",
- result.getReason());
+ result.reason());
}
@ParameterizedTest
@@ -344,12 +344,12 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 2), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
if (maxNumberOfGroupsAllowedToBeDown >= 1)
- assertEquals("Wanted state already set for another node in groups: [0]", result.getReason());
+ assertEquals("Wanted state already set for another node in groups: [0]", result.reason());
else
- assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.getReason());
+ assertEquals("At most one group can have wanted state: Other distributor 0 in group 0 has wanted state Down", result.reason());
}
{
@@ -359,11 +359,11 @@ public class NodeStateChangeCheckerTest {
new Node(STORAGE, 1), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
if (maxNumberOfGroupsAllowedToBeDown >= 1) {
- assertFalse(result.settingWantedStateIsAllowed(), result.getReason());
- assertEquals("Wanted state already set for another node in groups: [0]", result.getReason());
+ assertFalse(result.allowed(), result.reason());
+ assertEquals("Wanted state already set for another node in groups: [0]", result.reason());
} else {
- assertFalse(result.settingWantedStateIsAllowed(), result.getReason());
- assertEquals("Another distributor wants state DOWN: 0", result.getReason());
+ assertFalse(result.allowed(), result.reason());
+ assertEquals("Another distributor wants state DOWN: 0", result.reason());
}
}
}
@@ -385,13 +385,13 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 2), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
if (maxNumberOfGroupsAllowedToBeDown >= 1)
- assertEquals("At most 1 groups can have wanted state: [0]", result.getReason());
+ assertEquals("At most 1 groups can have wanted state: [0]", result.reason());
else
assertEquals("At most one group can have wanted state: Other storage node 0 in group 0 has wanted state Maintenance",
- result.getReason());
+ result.reason());
}
{
@@ -400,8 +400,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 1), clusterStateWith0InMaintenance,
SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed(), result.getReason());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed(), result.reason());
+ assertFalse(result.isAlreadySet());
}
}
@@ -412,9 +412,9 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeDistributor, defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertTrue(result.getReason().contains("Safe-set of node state is only supported for storage nodes"));
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertTrue(result.reason().contains("Safe-set of node state is only supported for storage nodes"));
}
@ParameterizedTest
@@ -433,17 +433,17 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, clusterStateWith3Down, SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Another storage node has state DOWN: 3", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Another storage node has state DOWN: 3", result.reason());
}
@ParameterizedTest
@ValueSource(ints = {-1, 1})
void testCanUpgradeStorageSafeYes(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, 1, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState());
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -456,8 +456,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
}
// A node may be reported as Up but have a generated state of Down if it's part of
@@ -477,8 +477,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SAFE,
MAINTENANCE_NODE_STATE, UP_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -491,8 +491,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SAFE,
new NodeState(STORAGE, DOWN), UP_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -505,10 +505,10 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
assertEquals("Distributor 0 says storage node 1 has buckets with redundancy as low as 3, but we require at least 4",
- result.getReason());
+ result.reason());
}
@ParameterizedTest
@@ -521,8 +521,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 3), defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -546,8 +546,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
new Node(STORAGE, 1), defaultAllUpClusterState(), SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -559,9 +559,9 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, defaultAllUpClusterState(), SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Distributor node 0 has not reported any cluster state version yet.", result.reason());
}
private Result transitionToSameState(State state, String oldDescription, String newDescription, int maxNumberOfGroupsAllowedToBeDown) {
@@ -583,23 +583,23 @@ public class NodeStateChangeCheckerTest {
@ValueSource(ints = {-1, 1})
void testSettingUpWhenUpCausesAlreadySet(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToSameState(UP, "foo", "bar", maxNumberOfGroupsAllowedToBeDown);
- assertTrue(result.wantedStateAlreadySet());
+ assertTrue(result.isAlreadySet());
}
@ParameterizedTest
@ValueSource(ints = {-1, 1})
void testSettingAlreadySetState(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToSameState("foo", "foo", maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertTrue(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertTrue(result.isAlreadySet());
}
@ParameterizedTest
@ValueSource(ints = {-1, 1})
void testDifferentDescriptionImpliesDenied(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToSameState("foo", "bar", maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
}
private Result transitionToMaintenanceWithOneStorageNodeDown(ContentCluster cluster, ClusterState clusterState) {
@@ -632,16 +632,16 @@ public class NodeStateChangeCheckerTest {
@ValueSource(ints = {-1, 1})
void testCanUpgradeWhenAllUp(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState());
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@ValueSource(ints = {-1, 1})
void testCanUpgradeWhenAllUpOrRetired(int maxNumberOfGroupsAllowedToBeDown) {
Result result = transitionToMaintenanceWithNoStorageNodesDown(createCluster(4, maxNumberOfGroupsAllowedToBeDown), defaultAllUpClusterState());
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -656,8 +656,8 @@ public class NodeStateChangeCheckerTest {
clusterState.setNodeState(new Node(STORAGE, storageNodeIndex), downNodeState);
Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -674,9 +674,9 @@ public class NodeStateChangeCheckerTest {
clusterState.setNodeState(new Node(STORAGE, otherIndex), downNodeState);
Result result = transitionToMaintenanceWithOneStorageNodeDown(cluster, clusterState);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertTrue(result.getReason().contains("Another storage node has state DOWN: 2"));
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertTrue(result.reason().contains("Another storage node has state DOWN: 2"));
}
@ParameterizedTest
@@ -698,8 +698,8 @@ public class NodeStateChangeCheckerTest {
Result result = nodeStateChangeChecker.evaluateTransition(
nodeStorage, stateWithNodeDown, SAFE,
UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
}
@ParameterizedTest
@@ -711,9 +711,9 @@ public class NodeStateChangeCheckerTest {
currentClusterStateVersion,
0,
maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Only retired nodes are allowed to be set to DOWN in safe mode - is Up", result.reason());
}
@ParameterizedTest
@@ -725,9 +725,9 @@ public class NodeStateChangeCheckerTest {
currentClusterStateVersion,
1,
maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("The storage node manages 1 buckets", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("The storage node manages 1 buckets", result.reason());
}
@ParameterizedTest
@@ -739,9 +739,9 @@ public class NodeStateChangeCheckerTest {
currentClusterStateVersion,
0,
maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.getReason());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Reported state (Initializing) is not UP, so no bucket data is available", result.reason());
}
@ParameterizedTest
@@ -753,10 +753,10 @@ public class NodeStateChangeCheckerTest {
currentClusterStateVersion - 1,
0,
maxNumberOfGroupsAllowedToBeDown);
- assertFalse(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertFalse(result.allowed());
+ assertFalse(result.isAlreadySet());
assertEquals("Cluster controller at version 2 got info for storage node 1 at a different version 1",
- result.getReason());
+ result.reason());
}
@ParameterizedTest
@@ -768,8 +768,8 @@ public class NodeStateChangeCheckerTest {
currentClusterStateVersion,
0,
maxNumberOfGroupsAllowedToBeDown);
- assertTrue(result.settingWantedStateIsAllowed());
- assertFalse(result.wantedStateAlreadySet());
+ assertTrue(result.allowed());
+ assertFalse(result.isAlreadySet());
}
private Result evaluateDownTransition(ClusterState clusterState,
@@ -948,9 +948,9 @@ public class NodeStateChangeCheckerTest {
private void checkSettingToMaintenanceIsAllowed(int nodeIndex, NodeStateChangeChecker nodeStateChangeChecker, ClusterState clusterState) {
Node node = new Node(STORAGE, nodeIndex);
Result result = nodeStateChangeChecker.evaluateTransition(node, clusterState, SAFE, UP_NODE_STATE, MAINTENANCE_NODE_STATE);
- assertTrue(result.settingWantedStateIsAllowed(), result.toString());
- assertFalse(result.wantedStateAlreadySet());
- assertEquals("Preconditions fulfilled and new state different", result.getReason());
+ assertTrue(result.allowed(), result.toString());
+ assertFalse(result.isAlreadySet());
+ assertEquals("Preconditions fulfilled and new state different", result.reason());
}
private void setStorageNodeWantedStateToMaintenance(ContentCluster cluster, int nodeIndex) {
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequestTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequestTest.java
index 6d93eadfe2a..f2f38954f55 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequestTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/restapiv2/requests/SetNodeStateRequestTest.java
@@ -12,7 +12,6 @@ import com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeListener;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.errors.StateRestApiException;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest;
-import com.yahoo.vespa.clustercontroller.utils.staterestapi.response.SetResponse;
import com.yahoo.vespa.clustercontroller.utils.staterestapi.response.UnitState;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -20,6 +19,7 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
+import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Result;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.eq;
@@ -54,7 +54,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"maintenance",
State.UP, State.UP,
- NodeStateChangeChecker.Result.allowSettingOfWantedState(),
+ Result.allow(),
Optional.of(State.MAINTENANCE), Optional.of(State.DOWN));
}
@@ -64,7 +64,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"maintenance",
State.UP, State.UP,
- NodeStateChangeChecker.Result.allowSettingOfWantedState(),
+ Result.allow(),
Optional.empty(), Optional.empty());
}
@@ -73,7 +73,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"down",
State.UP, State.UP,
- NodeStateChangeChecker.Result.allowSettingOfWantedState(),
+ Result.allow(),
Optional.of(State.DOWN), Optional.of(State.DOWN));
}
@@ -82,7 +82,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"up",
State.MAINTENANCE, State.DOWN,
- NodeStateChangeChecker.Result.allowSettingOfWantedState(),
+ Result.allow(),
Optional.of(State.UP), Optional.of(State.UP));
}
@@ -91,7 +91,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"up",
State.DOWN, State.DOWN,
- NodeStateChangeChecker.Result.allowSettingOfWantedState(),
+ Result.allow(),
Optional.of(State.UP), Optional.of(State.UP));
}
@@ -100,7 +100,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"maintenance",
State.MAINTENANCE, State.UP,
- NodeStateChangeChecker.Result.createAlreadySet(),
+ Result.alreadySet(),
Optional.empty(), Optional.of(State.DOWN));
}
@@ -109,7 +109,7 @@ public class SetNodeStateRequestTest {
testSetStateRequest(
"maintenance",
State.MAINTENANCE, State.DOWN,
- NodeStateChangeChecker.Result.createAlreadySet(),
+ Result.alreadySet(),
Optional.empty(), Optional.empty());
}
@@ -168,8 +168,8 @@ public class SetNodeStateRequestTest {
}
}
- private SetResponse setWantedState() throws StateRestApiException {
- return SetNodeStateRequest.setWantedState(
+ private void setWantedState() throws StateRestApiException {
+ SetNodeStateRequest.setWantedState(
cluster,
condition,
newStates,
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/multifieldresolver/RankTypeResolver.java b/config-model/src/main/java/com/yahoo/schema/processing/multifieldresolver/RankTypeResolver.java
index 6424fd8ba06..e86ac6dabfc 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/multifieldresolver/RankTypeResolver.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/multifieldresolver/RankTypeResolver.java
@@ -11,7 +11,7 @@ import java.util.logging.Level;
/**
* Checks if fields have defined different rank types for the same
- * index (typically in an index-to statement), and if they have
+ * index (typically in a fieldset statement), and if they have
* output a warning and use the first ranktype.
*
* @author hmusum
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index 00a1078b294..098d917c4e0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -161,6 +161,11 @@ public class HostSystem extends TreeConfigProducer<Host> {
deployLogger.log(level, message);
}
+ @Override
+ public void logApplicationPackage(Level level, String message) {
+ deployLogger.logApplicationPackage(level, message);
+ }
+
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
index df3cd4103d9..92c9eccf2d3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java
@@ -112,9 +112,8 @@ public class ConstantTensorJsonValidator {
consumeTopObject();
return;
} else if (isScalar()) {
- if (top == JsonToken.VALUE_NUMBER_FLOAT || top == JsonToken.VALUE_NUMBER_INT) {
- return;
- }
+ throw new InvalidConstantTensorException(
+ parser, String.format("Invalid type %s: Only tensors with dimensions can be stored as file constants", tensorType.toString()));
}
throw new InvalidConstantTensorException(
parser, String.format("Unexpected first token '%s' for constant with type %s",
@@ -315,14 +314,6 @@ public class ConstantTensorJsonValidator {
}
}
- private void assertFieldNameIs(String wantedFieldName) throws IOException {
- String actualFieldName = parser.getCurrentName();
-
- if (!actualFieldName.equals(wantedFieldName)) {
- throw new InvalidConstantTensorException(parser, String.format("Expected field name '%s', got '%s'", wantedFieldName, actualFieldName));
- }
- }
-
static class InvalidConstantTensorException extends IllegalArgumentException {
InvalidConstantTensorException(JsonParser parser, String message) {
@@ -338,19 +329,6 @@ public class ConstantTensorJsonValidator {
}
}
- @FunctionalInterface
- private interface SubroutineThrowingIOException {
- void invoke() throws IOException;
- }
-
- private void wrapIOException(SubroutineThrowingIOException lambda) {
- try {
- lambda.invoke();
- } catch (IOException e) {
- throw new InvalidConstantTensorException(parser, e);
- }
- }
-
private void consumeValuesNesting(int level) throws IOException {
assertCurrentTokenIs(JsonToken.START_ARRAY);
if (level >= denseDims.size()) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
index 5bb73643de5..ea4988f3029 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
@@ -290,28 +290,9 @@ public class NodesSpecification {
.loadBalancerSettings(zoneEndpoint)
.stateful(stateful)
.build();
- logInsufficientDiskResources(clusterId, clusterType, logger);
return hostSystem.allocateHosts(cluster, Capacity.from(min, max, groupSize, required, canFail, cloudAccount, info), logger);
}
- /** Log a message if requested disk may not fit core/heap dumps */
- private void logInsufficientDiskResources(ClusterSpec.Id clusterId, ClusterSpec.Type clusterType, DeployLogger deployLogger) {
- NodeResources resources = min.nodeResources();
- if (resources.diskGbIsUnspecified() || resources.memoryGbIsUnspecified()) return;
- double minDiskGb = resources.memoryGb() * switch (clusterType) {
- case combined, content -> 3;
- case container -> 2;
- default -> 0; // No constraint on other types
- };
- if (resources.diskGb() < minDiskGb) {
- // TODO(mpolden): Consider enforcing this on Vespa 9
- deployLogger.logApplicationPackage(Level.WARNING, "Requested disk (" + resources.diskGb() +
- "Gb) in " + clusterId + " is not large enough to fit " +
- "core/heap dumps. Minimum recommended disk resources " +
- "is " + minDiskGb + "Gb");
- }
- }
-
private static Pair<NodeResources, NodeResources> nodeResources(ModelElement nodesElement) {
ModelElement resources = nodesElement.child("resources");
if (resources != null) {
diff --git a/config-model/src/main/javacc/SchemaParser.jj b/config-model/src/main/javacc/SchemaParser.jj
index 9a38fdc673e..b2cb258c0ab 100644
--- a/config-model/src/main/javacc/SchemaParser.jj
+++ b/config-model/src/main/javacc/SchemaParser.jj
@@ -992,6 +992,11 @@ void attribute(ParsedField field) :
{
<ATTRIBUTE> [name = identifier()]
{
+ // TODO: Remove support for attribute with different name than field name in Vespa 9
+ if ( ! name.equals(field.name()))
+ deployLogger.logApplicationPackage(Level.WARNING, "Creating an attribute for field '" + field.name() +
+ "' with a different name '" + name + "' than the field name" +
+ " is deprecated, and support will be removed in Vespa 9. Define a field with the wanted name outside the document instead.");
ParsedAttribute attr = field.attributeFor(name);
}
( (<COLON> attributeSetting(attr))
@@ -1506,6 +1511,11 @@ void indexInsideField(ParsedField field) :
{
<INDEX> [indexName = identifier()]
{
+ // TODO: Remove support for index with different name than field name in Vespa 9
+ if ( ! indexName.equals(field.name()))
+ deployLogger.logApplicationPackage(Level.WARNING, "Creating an index for field '" + field.name() +
+ "' with a different name '" + indexName + "' than the field name" +
+ " is deprecated, and support will be removed in Vespa 9. Define a field with the wanted name outside the document instead.");
op = new ParsedIndex(indexName);
}
( (<COLON> indexBody(op) (<COMMA> indexBody(op))*) |
diff --git a/config-model/src/test/cfg/admin/metricconfig/schemas/music.sd b/config-model/src/test/cfg/admin/metricconfig/schemas/music.sd
index f90d805ce6a..71d588662a0 100644
--- a/config-model/src/test/cfg/admin/metricconfig/schemas/music.sd
+++ b/config-model/src/test/cfg/admin/metricconfig/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/application/app1/schemas/music.sd b/config-model/src/test/cfg/application/app1/schemas/music.sd
index 92e87848a8a..4e220f96727 100644
--- a/config-model/src/test/cfg/application/app1/schemas/music.sd
+++ b/config-model/src/test/cfg/application/app1/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
rank-type: about # Type of ranking settings to apply
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
rank-type:about
}
diff --git a/config-model/src/test/cfg/application/app1/schemas/product.sd b/config-model/src/test/cfg/application/app1/schemas/product.sd
index 132ae15053f..70c9343d63a 100644
--- a/config-model/src/test/cfg/application/app1/schemas/product.sd
+++ b/config-model/src/test/cfg/application/app1/schemas/product.sd
@@ -3,7 +3,6 @@ document product {
field title type string {
indexing: index | summary
- # index-to: title, default
}
field price type int {
diff --git a/config-model/src/test/cfg/application/app_complicated_deployment_spec/schemas/music.sd b/config-model/src/test/cfg/application/app_complicated_deployment_spec/schemas/music.sd
index 92e87848a8a..4e220f96727 100644
--- a/config-model/src/test/cfg/application/app_complicated_deployment_spec/schemas/music.sd
+++ b/config-model/src/test/cfg/application/app_complicated_deployment_spec/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
rank-type: about # Type of ranking settings to apply
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
rank-type:about
}
diff --git a/config-model/src/test/cfg/application/app_genericservices/schemas/music.sd b/config-model/src/test/cfg/application/app_genericservices/schemas/music.sd
index 92e87848a8a..4e220f96727 100644
--- a/config-model/src/test/cfg/application/app_genericservices/schemas/music.sd
+++ b/config-model/src/test/cfg/application/app_genericservices/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
rank-type: about # Type of ranking settings to apply
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
rank-type:about
}
diff --git a/config-model/src/test/cfg/application/sdfilenametest/schemas/notmusic.sd b/config-model/src/test/cfg/application/sdfilenametest/schemas/notmusic.sd
index 19528975587..a4cf5cef1a1 100644
--- a/config-model/src/test/cfg/application/sdfilenametest/schemas/notmusic.sd
+++ b/config-model/src/test/cfg/application/sdfilenametest/schemas/notmusic.sd
@@ -5,7 +5,6 @@ search music {
field title type string {
indexing: summary | index
- # index-to: title, default
}
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/book.sd b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/book.sd
index 73b540627d7..ba298f4fcba 100644
--- a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/book.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/book.sd
@@ -2,34 +2,28 @@ search book {
document book inherits base {
field title type string {
bolding: on
- index-to: default, title
indexing: index|summary
rank-type: about
}
field dispauthor type string {
bolding: on
- index-to: default, dispauthor
indexing: index|summary
rank-type: about
}
field author type string {
bolding: on
- index-to: default, author
indexing: index|summary
rank-type: about
}
field keys type string {
- index-to: default, keys
indexing: index
rank-type: about
}
field isbn type string {
- index-to: default, isbn
indexing: index|summary
rank-type: about
}
field series type string {
- index-to: default, series
indexing: index
rank-type: about
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/music.sd b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/music.sd
index 498bc79489f..21da176564b 100644
--- a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/music.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/music.sd
@@ -2,11 +2,9 @@ search music {
document music inherits base {
field f1 type string {
indexing: summary | index
- index-to: f1, all
}
field f2 type string {
indexing: summary | index
- index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/video.sd b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/video.sd
index b010b6d9769..5462be17374 100644
--- a/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/video.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/nomanifest/searchdefinitions/video.sd
@@ -2,41 +2,34 @@ search video {
document video inherits base {
field title type string {
bolding: on
- index-to: default, title
indexing: index|summary
rank-type: about
}
field keys type string {
- index-to: default, keys
indexing: index
rank-type: about
}
field director type string {
bolding: on
- index-to: default, director
indexing: index|summary
rank-type: about
}
field disp_actor type string {
bolding: on
- index-to: default, disp_actor
indexing: index|summary
rank-type: about
}
field actor type string {
bolding: on
- index-to: default, actor
indexing: index|summary
rank-type: about
}
field fmt type string {
- index-to: default, fmt
indexing: index|summary
rank-type: about
}
field isbn type string {
bolding: on
- index-to: default, isbn
indexing: index|summary
rank-type: about
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/book.sd b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/book.sd
index 73b540627d7..ba298f4fcba 100644
--- a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/book.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/book.sd
@@ -2,34 +2,28 @@ search book {
document book inherits base {
field title type string {
bolding: on
- index-to: default, title
indexing: index|summary
rank-type: about
}
field dispauthor type string {
bolding: on
- index-to: default, dispauthor
indexing: index|summary
rank-type: about
}
field author type string {
bolding: on
- index-to: default, author
indexing: index|summary
rank-type: about
}
field keys type string {
- index-to: default, keys
indexing: index
rank-type: about
}
field isbn type string {
- index-to: default, isbn
indexing: index|summary
rank-type: about
}
field series type string {
- index-to: default, series
indexing: index
rank-type: about
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/music.sd b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/music.sd
index 498bc79489f..21da176564b 100644
--- a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/music.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/music.sd
@@ -2,11 +2,9 @@ search music {
document music inherits base {
field f1 type string {
indexing: summary | index
- index-to: f1, all
}
field f2 type string {
indexing: summary | index
- index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/video.sd b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/video.sd
index b010b6d9769..5462be17374 100644
--- a/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/video.sd
+++ b/config-model/src/test/cfg/application/validation/testjars/ok/searchdefinitions/video.sd
@@ -2,41 +2,34 @@ search video {
document video inherits base {
field title type string {
bolding: on
- index-to: default, title
indexing: index|summary
rank-type: about
}
field keys type string {
- index-to: default, keys
indexing: index
rank-type: about
}
field director type string {
bolding: on
- index-to: default, director
indexing: index|summary
rank-type: about
}
field disp_actor type string {
bolding: on
- index-to: default, disp_actor
indexing: index|summary
rank-type: about
}
field actor type string {
bolding: on
- index-to: default, actor
indexing: index|summary
rank-type: about
}
field fmt type string {
- index-to: default, fmt
indexing: index|summary
rank-type: about
}
field isbn type string {
bolding: on
- index-to: default, isbn
indexing: index|summary
rank-type: about
}
diff --git a/config-model/src/test/cfg/routing/content_two_clusters/schemas/mobile.sd b/config-model/src/test/cfg/routing/content_two_clusters/schemas/mobile.sd
index 60ea98235b0..3cc3dcf5526 100644
--- a/config-model/src/test/cfg/routing/content_two_clusters/schemas/mobile.sd
+++ b/config-model/src/test/cfg/routing/content_two_clusters/schemas/mobile.sd
@@ -3,11 +3,9 @@ search mobile {
document mobile {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/routing/content_two_clusters/schemas/music.sd b/config-model/src/test/cfg/routing/content_two_clusters/schemas/music.sd
index 290f8983b4d..982607955a7 100644
--- a/config-model/src/test/cfg/routing/content_two_clusters/schemas/music.sd
+++ b/config-model/src/test/cfg/routing/content_two_clusters/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/routing/contentsimpleconfig/schemas/music.sd b/config-model/src/test/cfg/routing/contentsimpleconfig/schemas/music.sd
index 290f8983b4d..982607955a7 100644
--- a/config-model/src/test/cfg/routing/contentsimpleconfig/schemas/music.sd
+++ b/config-model/src/test/cfg/routing/contentsimpleconfig/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/routing/replacehop/schemas/music.sd b/config-model/src/test/cfg/routing/replacehop/schemas/music.sd
index 274c6ca63d6..c4dcecbd6ac 100755
--- a/config-model/src/test/cfg/routing/replacehop/schemas/music.sd
+++ b/config-model/src/test/cfg/routing/replacehop/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/routing/replaceroute/schemas/music.sd b/config-model/src/test/cfg/routing/replaceroute/schemas/music.sd
index 274c6ca63d6..c4dcecbd6ac 100755
--- a/config-model/src/test/cfg/routing/replaceroute/schemas/music.sd
+++ b/config-model/src/test/cfg/routing/replaceroute/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/search/data/travel/schemas/TTPOI.sd b/config-model/src/test/cfg/search/data/travel/schemas/TTPOI.sd
index f3fe2cdf445..7895d98b2e0 100644
--- a/config-model/src/test/cfg/search/data/travel/schemas/TTPOI.sd
+++ b/config-model/src/test/cfg/search/data/travel/schemas/TTPOI.sd
@@ -4,13 +4,11 @@ document TTPOI {
# categories associated with the POI
field Categories type array<string> {
indexing: summary | index
- # index-to: Categories
}
# sub catagories associated with the POI
field SubCategories type array<string> {
indexing: summary | index
- # index-to: SubCategories
}
}
diff --git a/config-model/src/test/cfg/search/data/v2/inherited_rankprofiles/schemas/music.sd b/config-model/src/test/cfg/search/data/v2/inherited_rankprofiles/schemas/music.sd
index 290f8983b4d..982607955a7 100644
--- a/config-model/src/test/cfg/search/data/v2/inherited_rankprofiles/schemas/music.sd
+++ b/config-model/src/test/cfg/search/data/v2/inherited_rankprofiles/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/storage/app_index_higher_than_num_nodes/schemas/music.sd b/config-model/src/test/cfg/storage/app_index_higher_than_num_nodes/schemas/music.sd
index 290f8983b4d..982607955a7 100644
--- a/config-model/src/test/cfg/storage/app_index_higher_than_num_nodes/schemas/music.sd
+++ b/config-model/src/test/cfg/storage/app_index_higher_than_num_nodes/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/cfg/storage/clustercontroller_advanced/schemas/music.sd b/config-model/src/test/cfg/storage/clustercontroller_advanced/schemas/music.sd
index 290f8983b4d..982607955a7 100644
--- a/config-model/src/test/cfg/storage/clustercontroller_advanced/schemas/music.sd
+++ b/config-model/src/test/cfg/storage/clustercontroller_advanced/schemas/music.sd
@@ -3,11 +3,9 @@ search music {
document music {
field f1 type string {
indexing: summary | index
- # index-to: f1, all
}
field f2 type string {
indexing: summary | index
- # index-to: f2, all
}
}
}
diff --git a/config-model/src/test/derived/music3/music3.sd b/config-model/src/test/derived/music3/music3.sd
index 7123c45bac2..47867683c62 100644
--- a/config-model/src/test/derived/music3/music3.sd
+++ b/config-model/src/test/derived/music3/music3.sd
@@ -5,13 +5,11 @@ schema music3 {
field title type string {
indexing: summary | index
- # index-to: title, default
rank-type: about
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
rank-type:about
}
diff --git a/config-model/src/test/derived/newrank/newrank.sd b/config-model/src/test/derived/newrank/newrank.sd
index 345d01bffb5..a01f292eb27 100644
--- a/config-model/src/test/derived/newrank/newrank.sd
+++ b/config-model/src/test/derived/newrank/newrank.sd
@@ -99,7 +99,6 @@ schema newrank{
field artist type string {
indexing: summary | index
- # index-to: artist, default
}
field artistspid type string {
diff --git a/config-model/src/test/examples/attributesexactmatch.sd b/config-model/src/test/examples/attributesexactmatch.sd
index 2db687cb20d..5529906adce 100644
--- a/config-model/src/test/examples/attributesexactmatch.sd
+++ b/config-model/src/test/examples/attributesexactmatch.sd
@@ -29,7 +29,6 @@ search music {
}
field genre type string {
- # index-to: foo
}
field trumpetist type string {
diff --git a/config-model/src/test/examples/casing.sd b/config-model/src/test/examples/casing.sd
index b0ce0a07748..7564934949b 100644
--- a/config-model/src/test/examples/casing.sd
+++ b/config-model/src/test/examples/casing.sd
@@ -31,7 +31,6 @@ search music {
field Genre type string {
indexing: index
- # index-to: Foo
alias Foo: sjanger
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index b1f47c54d54..c128b9af6e0 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -2577,44 +2577,6 @@ public class ModelProvisioningTest {
assertEquals((long) ((128 - memoryOverheadGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
}
- @Test
- public void warn_on_insufficient_disk_resources() {
- String services = """
- <?xml version='1.0' encoding='utf-8' ?>
- <services>
- <container version='1.0' id='c1'>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='40Gb'/>
- </nodes>
- </container>
- <container version='1.0' id='c2'>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='50Gb'/>
- </nodes>
- </container>
- <content version='1.0' id='c3'>
- <redundancy>1</redundancy>
- <documents>
- <document type='type1' mode='index'/>
- </documents>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='50Gb'/>
- </nodes>
- </content>
- </services>
- """;
- VespaModelTester tester = new VespaModelTester();
- tester.addHosts(new NodeResources(1, 24, 50, 1, DiskSpeed.fast), 10);
- TestLogger testLogger = new TestLogger();
- VespaModel model = tester.createModel(services, true, new DeployState.Builder().deployLogger(testLogger));
- assertEquals(1, model.getContainerClusters().get("c1").getContainers().size());
- assertEquals(1, model.getContainerClusters().get("c2").getContainers().size());
- assertEquals(1, model.getContentClusters().get("c3").getSearch().getSearchNodes().size());
- assertEquals(List.of(new TestLogger.LogMessage(Level.WARNING, "Requested disk (40.0Gb) in cluster 'c1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 48.0Gb"),
- new TestLogger.LogMessage(Level.WARNING, "Requested disk (50.0Gb) in cluster 'c3' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 72.0Gb")),
- testLogger.msgs());
- }
-
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
diff --git a/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java b/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
index b1d502dec36..7990d76d023 100644
--- a/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
@@ -37,7 +37,7 @@ public class IndexSettingsTestCase extends AbstractSchemaTestCase {
}
@Test
- void requireThatInterlavedFeaturesAreSetOnExtraField() throws ParseException {
+ void requireThatInterleavedFeaturesAreSetOnExtraField() throws ParseException {
ApplicationBuilder builder = ApplicationBuilder.createFromString(joinLines(
"search test {",
" document test {",
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
index 9e8388b6442..7de3d41817a 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterMembership.java
@@ -22,16 +22,28 @@ public class ClusterMembership {
private ClusterMembership(String stringValue, Version vespaVersion, Optional<DockerImage> dockerImageRepo,
ZoneEndpoint zoneEndpoint) {
String[] components = stringValue.split("/");
- if (components.length < 4)
+ if (components.length < 3)
throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " +
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/stateful][/combinedId]'");
+ Integer groupIndex = components[2].isEmpty() ? null : Integer.parseInt(components[2]);
+ Integer nodeIndex;
+ int missingElements = 0;
+ try {
+ nodeIndex = Integer.parseInt(components[3]);
+ } catch (NumberFormatException | ArrayIndexOutOfBoundsException e) {
+ // Legacy form missing the group component
+ nodeIndex = groupIndex;
+ groupIndex = null;
+ missingElements = 1;
+ }
+
boolean exclusive = false;
boolean stateful = false;
var combinedId = Optional.<String>empty();
boolean retired = false;
- if (components.length > 4) {
- for (int i = 4; i < components.length; i++) {
+ if (components.length > (4 - missingElements)) {
+ for (int i = (4 - missingElements); i < components.length; i++) {
String component = components[i];
switch (component) {
case "exclusive" -> exclusive = true;
@@ -44,7 +56,7 @@ public class ClusterMembership {
this.cluster = ClusterSpec.specification(ClusterSpec.Type.valueOf(components[0]),
ClusterSpec.Id.from(components[1]))
- .group(ClusterSpec.Group.from(Integer.parseInt(components[2])))
+ .group(groupIndex == null ? null : ClusterSpec.Group.from(groupIndex))
.vespaVersion(vespaVersion)
.exclusive(exclusive)
.combinedId(combinedId.map(ClusterSpec.Id::from))
@@ -52,7 +64,7 @@ public class ClusterMembership {
.loadBalancerSettings(zoneEndpoint)
.stateful(stateful)
.build();
- this.index = Integer.parseInt(components[3]);
+ this.index = nodeIndex;
this.retired = retired;
this.stringValue = toStringValue();
}
@@ -67,7 +79,7 @@ public class ClusterMembership {
protected String toStringValue() {
return cluster.type().name() +
"/" + cluster.id().value() +
- (cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") +
+ (cluster.group().isPresent() ? "/" + cluster.group().get().index() : "/") +
"/" + index +
( cluster.isExclusive() ? "/exclusive" : "") +
( retired ? "/retired" : "") +
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
index ccc24e60edf..4a3045c9cdd 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ClusterSpec.java
@@ -102,19 +102,18 @@ public final class ClusterSpec {
/** Creates a ClusterSpec when requesting a cluster */
public static Builder request(Type type, Id id) {
- return new Builder(type, id, false);
+ return new Builder(type, id);
}
/** Creates a ClusterSpec for an existing cluster, group id and Vespa version needs to be set */
public static Builder specification(Type type, Id id) {
- return new Builder(type, id, true);
+ return new Builder(type, id);
}
public static class Builder {
private final Type type;
private final Id id;
- private final boolean specification;
private Optional<Group> groupId = Optional.empty();
private Optional<DockerImage> dockerImageRepo = Optional.empty();
@@ -124,19 +123,13 @@ public final class ClusterSpec {
private ZoneEndpoint zoneEndpoint = ZoneEndpoint.defaultEndpoint;
private boolean stateful;
- private Builder(Type type, Id id, boolean specification) {
+ private Builder(Type type, Id id) {
this.type = type;
this.id = id;
- this.specification = specification;
this.stateful = type.isContent(); // Default to true for content clusters
}
public ClusterSpec build() {
- if (specification) {
- if (groupId.isEmpty()) throw new IllegalArgumentException("groupId is required to be set when creating a ClusterSpec with specification()");
- if (vespaVersion == null) throw new IllegalArgumentException("vespaVersion is required to be set when creating a ClusterSpec with specification()");
- } else
- if (groupId.isPresent()) throw new IllegalArgumentException("groupId is not allowed to be set when creating a ClusterSpec with request()");
return new ClusterSpec(type, id, groupId, vespaVersion, exclusive, combinedId, dockerImageRepo, zoneEndpoint, stateful);
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeAllocationException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeAllocationException.java
index 507d95c1d7b..64d028db7b0 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeAllocationException.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeAllocationException.java
@@ -16,6 +16,11 @@ public class NodeAllocationException extends RuntimeException {
this.retryable = retryable;
}
+ public NodeAllocationException(String message, Throwable cause, boolean retryable) {
+ super(message, cause);
+ this.retryable = retryable;
+ }
+
public boolean retryable() {
return retryable;
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
index 5a22056de1b..9d72f274419 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
@@ -10,6 +10,16 @@ import java.util.logging.Level;
*/
public interface ProvisionLogger {
+ /** Log a message unrelated to the application package, e.g. internal error/status. */
void log(Level level, String message);
+ /**
+ * Log a message related to the application package. These messages should be actionable by the user, f.ex. to
+ * signal usage of invalid/deprecated syntax.
+ * This default implementation just forwards to {@link #log(Level, String)}
+ */
+ default void logApplicationPackage(Level level, String message) {
+ log(level, message);
+ }
+
}
diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
index b1195b6a54b..292aec60e39 100644
--- a/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
+++ b/config-provisioning/src/test/java/com/yahoo/config/provision/ClusterMembershipTest.java
@@ -100,7 +100,9 @@ public class ClusterMembershipTest {
assertEquals("id1", instance.cluster().id().value());
assertFalse(instance.cluster().group().isPresent());
assertEquals(3, instance.index());
- assertEquals("container/id1/3", instance.stringValue());
+ assertEquals("container/id1//3", instance.stringValue());
+ // Legacy form:
+ assertEquals(instance, ClusterMembership.from("container/id1/3", instance.cluster().vespaVersion(), Optional.empty()));
}
private void assertContentService(ClusterMembership instance) {
@@ -109,7 +111,7 @@ public class ClusterMembershipTest {
assertFalse(instance.cluster().group().isPresent());
assertEquals(37, instance.index());
assertFalse(instance.retired());
- assertEquals("content/id1/37/stateful", instance.stringValue());
+ assertEquals("content/id1//37/stateful", instance.stringValue());
}
private void assertContentServiceWithGroup(ClusterMembership instance) {
@@ -127,7 +129,9 @@ public class ClusterMembershipTest {
assertEquals("id1", instance.cluster().id().value());
assertEquals(37, instance.index());
assertTrue(instance.retired());
- assertEquals("content/id1/37/retired/stateful", instance.stringValue());
+ assertEquals("content/id1//37/retired/stateful", instance.stringValue());
+ // Legacy form:
+ assertEquals(instance, ClusterMembership.from("content/id1/37/retired/stateful", instance.cluster().vespaVersion(), Optional.empty()));
}
private void assertContentServiceWithGroupAndRetire(ClusterMembership instance) {
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java
index 41bab257248..f8db7aadc29 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSetSubscription.java
@@ -3,7 +3,6 @@ package com.yahoo.config.subscription.impl;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.subscription.ConfigSet;
-import com.yahoo.config.subscription.ConfigSource;
import com.yahoo.vespa.config.ConfigKey;
import java.lang.reflect.Constructor;
@@ -48,11 +47,11 @@ public class ConfigSetSubscription<T extends ConfigInstance> extends ConfigSubsc
if (hasConfigChanged()) return true;
if (timeout <= 0) return false;
- long end = System.nanoTime() + timeout * 1_000_000;
+ long startNanos = System.nanoTime();
do {
sleep();
if (hasConfigChanged()) return true;
- } while (System.nanoTime() < end);
+ } while (System.nanoTime() - startNanos < timeout * 1_000_000);
return false;
}
diff --git a/configserver/src/test/apps/app-jdisc-only-restart/schemas/music.sd b/configserver/src/test/apps/app-jdisc-only-restart/schemas/music.sd
index a45c62ccdc7..71cfc346117 100644
--- a/configserver/src/test/apps/app-jdisc-only-restart/schemas/music.sd
+++ b/configserver/src/test/apps/app-jdisc-only-restart/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/app-jdisc-only/schemas/music.sd b/configserver/src/test/apps/app-jdisc-only/schemas/music.sd
index a45c62ccdc7..71cfc346117 100644
--- a/configserver/src/test/apps/app-jdisc-only/schemas/music.sd
+++ b/configserver/src/test/apps/app-jdisc-only/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/app-major-version-7/schemas/music.sd b/configserver/src/test/apps/app-major-version-7/schemas/music.sd
index f4b11d1e8e4..85db4873ba1 100644
--- a/configserver/src/test/apps/app-major-version-7/schemas/music.sd
+++ b/configserver/src/test/apps/app-major-version-7/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/app/schemas/music.sd b/configserver/src/test/apps/app/schemas/music.sd
index f4b11d1e8e4..85db4873ba1 100644
--- a/configserver/src/test/apps/app/schemas/music.sd
+++ b/configserver/src/test/apps/app/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/content/schemas/music.sd b/configserver/src/test/apps/content/schemas/music.sd
index f4b11d1e8e4..85db4873ba1 100644
--- a/configserver/src/test/apps/content/schemas/music.sd
+++ b/configserver/src/test/apps/content/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/content2/schemas/music.sd b/configserver/src/test/apps/content2/schemas/music.sd
index f4b11d1e8e4..85db4873ba1 100644
--- a/configserver/src/test/apps/content2/schemas/music.sd
+++ b/configserver/src/test/apps/content2/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
index a45c62ccdc7..71cfc346117 100644
--- a/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
+++ b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/hosted-no-write-access-control/schemas/music.sd b/configserver/src/test/apps/hosted-no-write-access-control/schemas/music.sd
index cb4b860e019..49475dc7f77 100644
--- a/configserver/src/test/apps/hosted-no-write-access-control/schemas/music.sd
+++ b/configserver/src/test/apps/hosted-no-write-access-control/schemas/music.sd
@@ -3,7 +3,6 @@ search music {
document music {
field title type string {
indexing: index | summary
- # index-to: default
}
}
}
diff --git a/configserver/src/test/apps/legacy-flag/schemas/music.sd b/configserver/src/test/apps/legacy-flag/schemas/music.sd
index f4b11d1e8e4..85db4873ba1 100644
--- a/configserver/src/test/apps/legacy-flag/schemas/music.sd
+++ b/configserver/src/test/apps/legacy-flag/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
weight: 25
}
diff --git a/configserver/src/test/apps/zkapp/schemas/music.sd b/configserver/src/test/apps/zkapp/schemas/music.sd
index 7616e2370b4..cc7844e76d5 100644
--- a/configserver/src/test/apps/zkapp/schemas/music.sd
+++ b/configserver/src/test/apps/zkapp/schemas/music.sd
@@ -7,13 +7,11 @@ search music {
field title type string {
indexing: summary | index # How this field should be indexed
- # index-to: title, default # Create two indexes
rank-type: about # Type of ranking settings to apply
}
field artist type string {
indexing: summary | attribute | index
- # index-to: artist, default
rank-type:about
}
diff --git a/configserver/src/test/apps/zkapp/schemas/product.sd b/configserver/src/test/apps/zkapp/schemas/product.sd
index 132ae15053f..70c9343d63a 100644
--- a/configserver/src/test/apps/zkapp/schemas/product.sd
+++ b/configserver/src/test/apps/zkapp/schemas/product.sd
@@ -3,7 +3,6 @@ document product {
field title type string {
indexing: index | summary
- # index-to: title, default
}
field price type int {
diff --git a/configserver/src/test/apps/zkfeed/schemas/product.sd b/configserver/src/test/apps/zkfeed/schemas/product.sd
index 132ae15053f..70c9343d63a 100644
--- a/configserver/src/test/apps/zkfeed/schemas/product.sd
+++ b/configserver/src/test/apps/zkfeed/schemas/product.sd
@@ -3,7 +3,6 @@ document product {
field title type string {
indexing: index | summary
- # index-to: title, default
}
field price type int {
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java b/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
index fbc179a10fa..5ac7705471c 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
@@ -54,7 +54,7 @@ class AdaptiveTimeoutHandler implements TimeoutHandler {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
- if (now + nextAdaptive >= deadline) {
+ if (nextAdaptive >= deadline - now) {
return deadline - now;
}
deadline = now + nextAdaptive;
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
index 4e4b77422c1..db7e80a95e5 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
@@ -35,11 +35,11 @@ import java.util.Set;
/**
* A dispatcher communicates with search nodes to perform queries and fill hits.
- *
+ * <p>
* This class allocates {@link SearchInvoker} and {@link FillInvoker} objects based
* on query properties and general system status. The caller can then use the provided
* invocation object to execute the search or fill.
- *
+ * <p>
* This class is multithread safe.
*
* @author bratseth
@@ -111,6 +111,7 @@ public class Dispatcher extends AbstractComponent {
searchCluster.addMonitoring(clusterMonitor);
return items;
}
+
private void initialWarmup(double warmupTime) {
Thread warmup = new Thread(() -> warmup(warmupTime));
warmup.start();
@@ -130,10 +131,10 @@ public class Dispatcher extends AbstractComponent {
private static LoadBalancer.Policy toLoadBalancerPolicy(DispatchConfig.DistributionPolicy.Enum policy) {
return switch (policy) {
- case ROUNDROBIN: yield LoadBalancer.Policy.ROUNDROBIN;
- case BEST_OF_RANDOM_2: yield LoadBalancer.Policy.BEST_OF_RANDOM_2;
- case ADAPTIVE,LATENCY_AMORTIZED_OVER_REQUESTS: yield LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_REQUESTS;
- case LATENCY_AMORTIZED_OVER_TIME: yield LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_TIME;
+ case ROUNDROBIN -> LoadBalancer.Policy.ROUNDROBIN;
+ case BEST_OF_RANDOM_2 -> LoadBalancer.Policy.BEST_OF_RANDOM_2;
+ case ADAPTIVE,LATENCY_AMORTIZED_OVER_REQUESTS -> LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_REQUESTS;
+ case LATENCY_AMORTIZED_OVER_TIME -> LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_TIME;
};
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
index d6fb6de6354..b488662591a 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InvokerFactory.java
@@ -61,16 +61,10 @@ public abstract class InvokerFactory {
List<SearchInvoker> invokers = new ArrayList<>(nodes.size());
Set<Integer> failed = null;
for (Node node : nodes) {
- boolean nodeAdded = false;
- if (node.isWorking() != Boolean.FALSE) {
- Optional<SearchInvoker> invoker = createNodeSearchInvoker(searcher, query, maxHits, node);
- if (invoker.isPresent()) {
- invokers.add(invoker.get());
- nodeAdded = true;
- }
- }
-
- if ( ! nodeAdded) {
+ if ( node.isWorking() == Boolean.FALSE
+ || createNodeSearchInvoker(searcher, query, maxHits, node)
+ .map(invoker -> { invokers.add(invoker); return invoker; })
+ .isEmpty()) {
if (failed == null) {
failed = new HashSet<>();
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index 1be45b01367..9c65cb3d4c0 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -99,7 +99,7 @@ public class SearchCluster implements NodeManager<Node> {
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
- return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
+ return (int) groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
index 3e6e092ea70..514f0de4fec 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
@@ -3,6 +3,7 @@ package com.yahoo.search.dispatch.searchcluster;
import com.google.common.math.Quantiles;
import java.util.Collection;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -38,7 +39,7 @@ public class SearchGroupsImpl implements SearchGroups {
public long medianDocumentsPerGroup() {
if (isEmpty()) return 0;
- var activeDocuments = groups().stream().map(Group::activeDocuments).toList();
- return (long) Quantiles.median().compute(activeDocuments);
+ double[] activeDocuments = groups().stream().mapToDouble(Group::activeDocuments).toArray();
+ return (long) Quantiles.median().computeInPlace(activeDocuments);
}
}
diff --git a/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java b/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
index 63655da0784..450239f7b12 100644
--- a/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
@@ -326,7 +326,6 @@ public class QueryTestCase {
@Test
void testBooleanParameterNoQueryProfile() {
- QueryProfile profile = new QueryProfile("myProfile");
Query query = new Query("/?query=something&ranking.softtimeout.enable=false");
assertFalse(query.properties().getBoolean("ranking.softtimeout.enable"));
assertFalse(query.getRanking().getSoftTimeout().getEnable());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
index 0d8e7745f65..186e6838a71 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
@@ -85,10 +85,7 @@ public class ApplicationPackageValidator {
private void validateDeprecatedElements(ApplicationPackage applicationPackage) {
int wantedMajor = applicationPackage.compileVersion().map(Version::getMajor)
.or(() -> applicationPackage.deploymentSpec().majorVersion())
- .or(() -> controller.readVersionStatus().controllerVersion()
- .map(VespaVersion::versionNumber)
- .map(Version::getMajor))
- .orElseThrow(() -> new IllegalArgumentException("Could not determine wanted major version"));
+ .orElseGet(() -> controller.readSystemVersion().getMajor());
for (var deprecatedElement : applicationPackage.deploymentSpec().deprecatedElements()) {
if (deprecatedElement.majorVersion() >= wantedMajor) continue;
throw new IllegalArgumentException(deprecatedElement.humanReadableString());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
index 9e2933f60fd..8f9b9b70639 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.container.jdisc.secretstore.SecretNotFoundException;
import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.jdisc.Metric;
@@ -50,7 +49,7 @@ public class CertificatePoolMaintainer extends ControllerMaintainer {
private final BooleanFlag useAlternateCertProvider;
public CertificatePoolMaintainer(Controller controller, Metric metric, Duration interval) {
- super(controller, interval, null, Set.of(SystemName.Public, SystemName.PublicCd));
+ super(controller, interval);
this.controller = controller;
this.secretStore = controller.secretStore();
this.certPoolSize = Flags.CERT_POOL_SIZE.bindTo(controller.flagSource());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index 30c832a7747..66fb1fe615b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -332,26 +332,24 @@ public class RoutingPolicies {
}
}
if (!aliasTargets.isEmpty()) {
- nameServiceForwarderIn(targetZone).createAlias(
+ nameServiceForwarder(applicationEndpoint).createAlias(
RecordName.from(applicationEndpoint.dnsName()), aliasTargets, Priority.normal, owner);
}
if (!directTargets.isEmpty()) {
- nameServiceForwarderIn(targetZone).createDirect(
+ nameServiceForwarder(applicationEndpoint).createDirect(
RecordName.from(applicationEndpoint.dnsName()), directTargets, Priority.normal, owner);
}
});
// Remove DNS records for inactive targets
inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
- // Where multiple zones are permitted, they all have the same routing policy, and nameServiceForwarder.
- ZoneId targetZone = applicationEndpoint.targets().iterator().next().deployment().zoneId();
targets.forEach(target -> {
if (!target.deployment().equals(deployment)) return; // Do not update target not matching this deployment
- nameServiceForwarderIn(targetZone).removeRecords(target.type(),
- RecordName.from(applicationEndpoint.dnsName()),
- target.data(),
- Priority.normal,
- owner);
+ nameServiceForwarder(applicationEndpoint).removeRecords(target.type(),
+ RecordName.from(applicationEndpoint.dnsName()),
+ target.data(),
+ Priority.normal,
+ owner);
});
});
}
@@ -394,13 +392,14 @@ public class RoutingPolicies {
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy, LoadBalancer loadBalancer, DeploymentId deploymentId) {
+ RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(deploymentId.zoneId());
boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(deploymentId.applicationId());
- for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, addTokenEndpoint)) {
+ for (var endpoint : policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint)) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
new Record(Record.Type.A, name, RecordData.from(policy.ipAddress().orElseThrow()));
- nameServiceForwarderIn(policy.id().zone()).createRecord(record, Priority.normal, ownerOf(deploymentId));
+ nameServiceForwarder(endpoint).createRecord(record, Priority.normal, ownerOf(deploymentId));
setPrivateDns(endpoint, loadBalancer, deploymentId);
}
}
@@ -413,13 +412,14 @@ public class RoutingPolicies {
case mtls -> false;
};
if (skipBasedOnAuthMethod) return;
+ if (endpoint.routingMethod() != RoutingMethod.exclusive) return; // Not supported for this routing method
controller.serviceRegistry().vpcEndpointService()
.setPrivateDns(DomainName.of(endpoint.dnsName()),
new ClusterId(deploymentId, endpoint.cluster()),
loadBalancer.cloudAccount())
.ifPresent(challenge -> {
try (Mutex lock = db.lockNameServiceQueue()) {
- nameServiceForwarderIn(deploymentId.zoneId()).createTxt(challenge.name(), List.of(challenge.data()), Priority.high, ownerOf(deploymentId));
+ controller.nameServiceForwarder().createTxt(challenge.name(), List.of(challenge.data()), Priority.high, ownerOf(deploymentId));
db.writeDnsChallenge(challenge);
}
});
@@ -458,8 +458,7 @@ public class RoutingPolicies {
}
private void removeDnsChallenge(DnsChallenge challenge) {
- nameServiceForwarderIn(challenge.clusterId().deploymentId().zoneId())
- .removeRecords(Type.TXT, challenge.name(), Priority.normal, ownerOf(challenge.clusterId().deploymentId()));
+ controller.nameServiceForwarder().removeRecords(Type.TXT, challenge.name(), Priority.normal, ownerOf(challenge.clusterId().deploymentId()));
db.deleteDnsChallenge(challenge.clusterId());
}
@@ -469,17 +468,18 @@ public class RoutingPolicies {
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
+ RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(allocation.deployment.zoneId());
boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(allocation.deployment.applicationId());
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
- for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, addTokenEndpoint)) {
- nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
- RecordName.from(endpoint.dnsName()),
- Priority.normal,
- ownerOf(allocation));
+ for (var endpoint : policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint)) {
+ nameServiceForwarder(endpoint).removeRecords(Record.Type.CNAME,
+ RecordName.from(endpoint.dnsName()),
+ Priority.normal,
+ ownerOf(allocation));
}
newPolicies.remove(policy.id());
}
@@ -497,12 +497,11 @@ public class RoutingPolicies {
EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(id.instance())
.not().requiresRotation()
.named(id.endpointId(), Endpoint.Scope.global);
- NameServiceForwarder forwarder = nameServiceForwarderIn(allocation.deployment.zoneId());
// This removes all ALIAS records having this DNS name. There is no attempt to delete only the entry for the
// affected zone. Instead, the correct set of records is (re)created by updateGlobalDnsOf
- endpoints.forEach(endpoint -> forwarder.removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
- Priority.normal,
- ownerOf(allocation)));
+ endpoints.forEach(endpoint -> nameServiceForwarder(endpoint).removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()),
+ Priority.normal,
+ ownerOf(allocation)));
}
}
@@ -520,8 +519,8 @@ public class RoutingPolicies {
List<RoutingPolicy> policies = routingTable.get(id);
for (var policy : policies) {
if (!policy.appliesTo(allocation.deployment)) continue;
- NameServiceForwarder forwarder = nameServiceForwarderIn(policy.id().zone());
for (Endpoint endpoint : endpoints) {
+ NameServiceForwarder forwarder = nameServiceForwarder(endpoint);
if (policy.canonicalName().isPresent()) {
forwarder.removeRecords(Record.Type.ALIAS,
RecordName.from(endpoint.dnsName()),
@@ -690,11 +689,11 @@ public class RoutingPolicies {
.collect(Collectors.toUnmodifiableSet());
}
- /** Returns the name updater to use for given zone */
- private NameServiceForwarder nameServiceForwarderIn(ZoneId zone) {
- return switch (controller.zoneRegistry().routingMethod(zone)) {
+ /** Returns the name updater to use for given endpoint */
+ private NameServiceForwarder nameServiceForwarder(Endpoint endpoint) {
+ return switch (endpoint.routingMethod()) {
case exclusive -> controller.nameServiceForwarder();
- case sharedLayer4 -> new NameServiceDiscarder(controller.curator());
+ case sharedLayer4 -> endpoint.generated().isPresent() ? controller.nameServiceForwarder() : new NameServiceDiscarder(controller.curator());
};
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index f6ed8fd7323..0a6d2a3b106 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -370,14 +370,22 @@ public class RoutingPoliciesTest {
}
@Test
- void zone_routing_policies_without_dns_update() {
+ void zone_routing_policies_with_shared_routing() {
var tester = new RoutingPoliciesTester(new DeploymentTester(), false);
var context = tester.newDeploymentContext("tenant1", "app1", "default");
tester.provisionLoadBalancers(1, context.instanceId(), true, zone1, zone2);
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
assertEquals(0, tester.controllerTester().controller().curator().readNameServiceQueue().requests().size());
+ // Ordinary endpoints are not created in DNS
assertEquals(List.of(), tester.recordNames());
assertEquals(2, tester.policiesOf(context.instanceId()).size());
+ // Generated endpoints are created in DNS
+ tester.controllerTester().flagSource().withBooleanFlag(Flags.RANDOMIZED_ENDPOINT_NAMES.id(), true);
+ addCertificateToPool("cafed00d", UnassignedCertificate.State.ready, tester);
+ context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
+ assertEquals(List.of("b22ab332.cafed00d.z.vespa.oath.cloud",
+ "d71005bf.cafed00d.z.vespa.oath.cloud"),
+ tester.recordNames());
}
@Test
diff --git a/documentgen-test/etc/complex/music.sd b/documentgen-test/etc/complex/music.sd
index e91adeed039..11abbaa154e 100644
--- a/documentgen-test/etc/complex/music.sd
+++ b/documentgen-test/etc/complex/music.sd
@@ -3,19 +3,16 @@ search music {
document music inherits common {
field artist type string {
bolding: on
- # index-to: default, artist
indexing: index|summary
}
field disp_song type string {
indexing: summary
}
field song type string {
- # index-to: default, song
indexing: index
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/documentgen-test/etc/complex/music2.sd b/documentgen-test/etc/complex/music2.sd
index 34419eeea6a..64e91caf25b 100644
--- a/documentgen-test/etc/complex/music2.sd
+++ b/documentgen-test/etc/complex/music2.sd
@@ -3,19 +3,16 @@ search music2 {
document music2 inherits common {
field artist type string {
bolding: on
- # index-to: default, artist
indexing: index|summary
}
field disp_song type string {
indexing: summary
}
field song type string {
- # index-to: default, song
indexing: index
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/documentgen-test/etc/complex/video.sd b/documentgen-test/etc/complex/video.sd
index 5ca283eaa96..95cb4228ad8 100644
--- a/documentgen-test/etc/complex/video.sd
+++ b/documentgen-test/etc/complex/video.sd
@@ -3,26 +3,21 @@ search video {
document video inherits common {
field director type string {
bolding: on
- # index-to: default, director
indexing: index|summary
}
field disp_actor type string {
bolding: on
- # index-to: default, disp_actor
indexing: index|summary
}
field actor type string {
bolding: on
- # index-to: default, actor
indexing: index|summary
}
field fmt type string {
- # index-to: default, fmt
indexing: index|summary
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/eval/src/tests/eval/value_cache/dense.json b/eval/src/tests/eval/value_cache/dense.json
index 2263053f01f..f310ee9dc32 100644
--- a/eval/src/tests/eval/value_cache/dense.json
+++ b/eval/src/tests/eval/value_cache/dense.json
@@ -1,8 +1,8 @@
{
"dimensions": ["x","y"],
"cells": [
- { "address": { "x": "0", "y": "0" }, "value": 1.0 },
- { "address": { "x": "0", "y": "1" }, "value": 2.0 },
- { "address": { "x": "1", "y": "0" }, "value": 3.0 },
+ { "address": { "x": 0, "y": 0 }, "value": 1.0 },
+ { "address": { "y": 1, "x": 0 }, "value": 2.0 },
+ { "address": { "x": "1", "y": 0 }, "value": 3.0 },
{ "address": { "x": "1", "y": "1" }, "value": 4.0 }]
}
diff --git a/eval/src/tests/eval/value_cache/sparse-short1.json b/eval/src/tests/eval/value_cache/sparse-short1.json
index 741a2160898..5b6aa6d6104 100644
--- a/eval/src/tests/eval/value_cache/sparse-short1.json
+++ b/eval/src/tests/eval/value_cache/sparse-short1.json
@@ -1,5 +1,5 @@
{
- "foo": 1.0,
+ "foo": 1,
"cells": 2.0,
"values": 0.5,
"blocks": 1.5
diff --git a/eval/src/tests/eval/value_cache/sparse-short2.json b/eval/src/tests/eval/value_cache/sparse-short2.json
index 7eb377968e4..552fec39bcc 100644
--- a/eval/src/tests/eval/value_cache/sparse-short2.json
+++ b/eval/src/tests/eval/value_cache/sparse-short2.json
@@ -1,6 +1,6 @@
{
"cells": {
- "foo": 1.0,
+ "foo": 1,
"cells": 2.0,
"values": 0.5,
"blocks": 1.5
diff --git a/eval/src/tests/eval/value_cache/sparse.json b/eval/src/tests/eval/value_cache/sparse.json
index a80e7906286..f52ad888c61 100644
--- a/eval/src/tests/eval/value_cache/sparse.json
+++ b/eval/src/tests/eval/value_cache/sparse.json
@@ -2,5 +2,6 @@
"dimensions": ["x","y"],
"cells": [
{ "address": { "x": "foo", "y": "bar" }, "value": 1.0 },
+ { "address": { "x": 17, "y": 42 }, "value": 1742.0 },
{ "address": { "x": "bar", "y": "foo" }, "value": 2.0 }]
}
diff --git a/eval/src/tests/eval/value_cache/sparse.json.lz4 b/eval/src/tests/eval/value_cache/sparse.json.lz4
index 0de6fae56e1..4064222d403 100644
--- a/eval/src/tests/eval/value_cache/sparse.json.lz4
+++ b/eval/src/tests/eval/value_cache/sparse.json.lz4
Binary files differ
diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
index c10da861c83..ba2412d6f70 100644
--- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
+++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp
@@ -28,6 +28,7 @@ TensorSpec make_simple_dense_tensor() {
TensorSpec make_sparse_tensor() {
return TensorSpec("tensor(x{},y{})")
+ .add({{"x", "17"}, {"y", "42"}}, 1742.0)
.add({{"x", "foo"}, {"y", "bar"}}, 1.0)
.add({{"x", "bar"}, {"y", "foo"}}, 2.0);
}
@@ -74,6 +75,10 @@ TEST_F("require that dense tensors can be loaded", ConstantTensorLoader(factory)
TEST_DO(verify_tensor(make_dense_tensor(), f1.create(TEST_PATH("dense.json"), "tensor(x[2],y[2])")));
}
+TEST_F("require that sparse tensors can be loaded", ConstantTensorLoader(factory)) {
+ TEST_DO(verify_tensor(make_sparse_tensor(), f1.create(TEST_PATH("sparse.json"), "tensor(x{},y{})")));
+}
+
TEST_F("require that mixed tensors can be loaded", ConstantTensorLoader(factory)) {
TEST_DO(verify_tensor(make_mixed_tensor(), f1.create(TEST_PATH("mixed.json"), "tensor(x{},y[2])")));
}
diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
index 059bf3c535d..74965b79bbc 100644
--- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
+++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp
@@ -20,6 +20,42 @@ using ObjectTraverser = slime::ObjectTraverser;
namespace {
+struct Target {
+ const ValueType tensor_type;
+ TensorSpec spec;
+ void check_add(TensorSpec::Address address, double value) {
+ for (const auto &dim : tensor_type.dimensions()) {
+ const auto & it = address.find(dim.name);
+ if (it == address.end()) {
+ LOG(error, "Missing dimension '%s' in address for constant tensor", dim.name.c_str());
+ throw std::exception();
+ }
+ if (it->second.is_mapped() != dim.is_mapped()) {
+ LOG(error, "Mismatch mapped/indexed for '%s' in address", dim.name.c_str());
+ throw std::exception();
+ }
+ if (dim.is_indexed()) {
+ if (it->second.index >= dim.size) {
+ LOG(error, "Index %zu out of range for dimension %s[%u]",
+ it->second.index, dim.name.c_str(), dim.size);
+ throw std::exception();
+ }
+ }
+ }
+ if (address.size() != tensor_type.dimensions().size()) {
+ for (const auto & [name, label] : address) {
+ if (tensor_type.dimension_index(name) == ValueType::Dimension::npos) {
+ LOG(error, "Extra dimension '%s' in address for constant tensor", name.c_str());
+ }
+ }
+ LOG(error, "Wrong number %zu of dimensions in address for constant tensor, wanted %zu",
+ address.size(), tensor_type.dimensions().size());
+ throw std::exception();
+ }
+ spec.add(address, value);
+ }
+};
+
struct AddressExtractor : ObjectTraverser {
const std::set<vespalib::string> &indexed;
TensorSpec::Address &address;
@@ -28,14 +64,38 @@ struct AddressExtractor : ObjectTraverser {
: indexed(indexed_in), address(address_out) {}
void field(const Memory &symbol, const Inspector &inspector) override {
vespalib::string dimension = symbol.make_string();
- vespalib::string label = inspector.asString().make_string();
- if (dimension.empty() || label.empty()) {
+ if (dimension.empty()) {
+ LOG(warning, "missing 'dimension' in address");
+ throw std::exception();
+ }
+ if (inspector.type().getId() == vespalib::slime::LONG::ID) {
+ size_t index = inspector.asLong();
+ if (indexed.contains(dimension)) {
+ address.emplace(dimension, TensorSpec::Label(index));
+ } else {
+ auto label = std::to_string(index);
+ address.emplace(dimension, TensorSpec::Label(label));
+ }
return;
}
+ vespalib::string label = inspector.asString().make_string();
+ if (label.empty()) {
+ auto got = inspector.toString();
+ int sz = got.size();
+ if (sz > 0) --sz;
+ LOG(error, "missing 'label' in address, got '%.*s'", sz, got.c_str());
+ throw std::exception();
+ }
if (indexed.find(dimension) == indexed.end()) {
address.emplace(dimension, TensorSpec::Label(label));
} else {
- size_t index = strtoull(label.c_str(), nullptr, 10);
+ const char *str_beg = label.c_str();
+ char *str_end = const_cast<char *>(str_beg);
+ size_t index = strtoull(str_beg, &str_end, 10);
+ if (str_end == str_beg || *str_end != '\0') {
+ LOG(error, "bad index: '%s' cannot be parsed as an unsigned integer", str_beg);
+ throw std::exception();
+ }
address.emplace(dimension, TensorSpec::Label(index));
}
}
@@ -43,46 +103,41 @@ struct AddressExtractor : ObjectTraverser {
struct SingleMappedExtractor : ObjectTraverser {
const vespalib::string &dimension;
- TensorSpec &spec;
- SingleMappedExtractor(const vespalib::string &dimension_in, TensorSpec &spec_in)
+ Target &target;
+ SingleMappedExtractor(const vespalib::string &dimension_in, Target &target_in)
: dimension(dimension_in),
- spec(spec_in)
+ target(target_in)
{}
void field(const Memory &symbol, const Inspector &inspector) override {
vespalib::string label = symbol.make_string();
double value = inspector.asDouble();
TensorSpec::Address address;
address.emplace(dimension, label);
- spec.add(address, value);
+ target.check_add(address, value);
}
};
-void decodeSingleMappedForm(const Inspector &root, const ValueType &value_type, TensorSpec &spec) {
- auto extractor = SingleMappedExtractor(value_type.dimensions()[0].name, spec);
+void decodeSingleMappedForm(const Inspector &root, const ValueType &value_type, Target &target) {
+ auto extractor = SingleMappedExtractor(value_type.dimensions()[0].name, target);
root.traverse(extractor);
}
-void decodeSingleDenseForm(const Inspector &values, const ValueType &value_type, TensorSpec &spec) {
+void decodeSingleDenseForm(const Inspector &values, const ValueType &value_type, Target &target) {
const auto &dimension = value_type.dimensions()[0].name;
for (size_t i = 0; i < values.entries(); ++i) {
TensorSpec::Address address;
address.emplace(dimension, TensorSpec::Label(i));
- spec.add(address, values[i].asDouble());
+ target.check_add(address, values[i].asDouble());
}
}
struct DenseValuesDecoder {
const std::vector<ValueType::Dimension> _idims;
- TensorSpec &_target;
- DenseValuesDecoder(std::vector<ValueType::Dimension> idims, TensorSpec &target)
- : _idims(std::move(idims)),
- _target(target)
- {
- }
+ Target &_target;
void decode(const Inspector &input, const TensorSpec::Address &address, size_t dim_idx) {
if (dim_idx == _idims.size()) {
- _target.add(address, input.asDouble());
+ _target.check_add(address, input.asDouble());
} else {
const auto &dimension = _idims[dim_idx];
if (input.entries() != dimension.size) {
@@ -97,9 +152,9 @@ struct DenseValuesDecoder {
}
};
-void decodeDenseValues(const Inspector &values, const ValueType &value_type, TensorSpec &spec) {
+void decodeDenseValues(const Inspector &values, const ValueType &value_type, Target &target) {
TensorSpec::Address address;
- DenseValuesDecoder decoder(value_type.indexed_dimensions(), spec);
+ DenseValuesDecoder decoder{value_type.indexed_dimensions(), target};
decoder.decode(values, address, 0);
}
@@ -113,12 +168,12 @@ struct TraverserCallback : ObjectTraverser {
}
};
-void decodeSingleMappedBlocks(const Inspector &blocks, const ValueType &value_type, TensorSpec &spec) {
+void decodeSingleMappedBlocks(const Inspector &blocks, const ValueType &value_type, Target &target) {
if (value_type.count_mapped_dimensions() != 1) {
return; // TODO handle mismatch
}
vespalib::string dim_name = value_type.mapped_dimensions()[0].name;
- DenseValuesDecoder decoder(value_type.indexed_dimensions(), spec);
+ DenseValuesDecoder decoder{value_type.indexed_dimensions(), target};
auto lambda = [&](vespalib::string label, const Inspector &input) {
TensorSpec::Address address;
address.emplace(dim_name, std::move(label));
@@ -128,13 +183,13 @@ void decodeSingleMappedBlocks(const Inspector &blocks, const ValueType &value_ty
blocks.traverse(cb);
}
-void decodeAddressedBlocks(const Inspector &blocks, const ValueType &value_type, TensorSpec &spec) {
+void decodeAddressedBlocks(const Inspector &blocks, const ValueType &value_type, Target &target) {
const auto & idims = value_type.indexed_dimensions();
std::set<vespalib::string> indexed;
for (const auto &dimension: idims) {
indexed.insert(dimension.name);
}
- DenseValuesDecoder decoder(value_type.indexed_dimensions(), spec);
+ DenseValuesDecoder decoder{value_type.indexed_dimensions(), target};
for (size_t i = 0; i < blocks.entries(); ++i) {
TensorSpec::Address address;
AddressExtractor extractor(indexed, address);
@@ -143,7 +198,7 @@ void decodeAddressedBlocks(const Inspector &blocks, const ValueType &value_type,
}
}
-void decodeLiteralForm(const Inspector &cells, const ValueType &value_type, TensorSpec &spec) {
+void decodeLiteralForm(const Inspector &cells, const ValueType &value_type, Target &target) {
std::set<vespalib::string> indexed;
for (const auto &dimension: value_type.dimensions()) {
if (dimension.is_indexed()) {
@@ -154,7 +209,7 @@ void decodeLiteralForm(const Inspector &cells, const ValueType &value_type, Tens
TensorSpec::Address address;
AddressExtractor extractor(indexed, address);
cells[i]["address"].traverse(extractor);
- spec.add(address, cells[i]["value"].asDouble());
+ target.check_add(address, cells[i]["value"].asDouble());
}
}
@@ -207,7 +262,7 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
}
Slime slime;
decode_json(path, slime);
- TensorSpec spec(type);
+ Target target{value_type, TensorSpec(type)};
bool isSingleDenseType = value_type.is_dense() && (value_type.count_indexed_dimensions() == 1);
bool isSingleMappedType = value_type.is_sparse() && (value_type.count_mapped_dimensions() == 1);
const Inspector &root = slime.get();
@@ -216,31 +271,31 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin
const Inspector &values = root["values"];
const Inspector &blocks = root["blocks"];
if (cells.type().getId() == vespalib::slime::ARRAY::ID) {
- decodeLiteralForm(cells, value_type, spec);
+ decodeLiteralForm(cells, value_type, target);
}
else if (cells.type().getId() == vespalib::slime::OBJECT::ID) {
if (isSingleMappedType) {
- decodeSingleMappedForm(cells, value_type, spec);
+ decodeSingleMappedForm(cells, value_type, target);
}
}
else if (values.type().getId() == vespalib::slime::ARRAY::ID) {
- decodeDenseValues(values, value_type, spec);
+ decodeDenseValues(values, value_type, target);
}
else if (blocks.type().getId() == vespalib::slime::OBJECT::ID) {
- decodeSingleMappedBlocks(blocks, value_type, spec);
+ decodeSingleMappedBlocks(blocks, value_type, target);
}
else if (blocks.type().getId() == vespalib::slime::ARRAY::ID) {
- decodeAddressedBlocks(blocks, value_type, spec);
+ decodeAddressedBlocks(blocks, value_type, target);
}
else if (isSingleMappedType) {
- decodeSingleMappedForm(root, value_type, spec);
+ decodeSingleMappedForm(root, value_type, target);
}
}
else if (root.type().getId() == vespalib::slime::ARRAY::ID && isSingleDenseType) {
- decodeSingleDenseForm(root, value_type, spec);
+ decodeSingleDenseForm(root, value_type, target);
}
try {
- return std::make_unique<SimpleConstantValue>(value_from_spec(spec, _factory));
+ return std::make_unique<SimpleConstantValue>(value_from_spec(target.spec, _factory));
} catch (std::exception &) {
return std::make_unique<BadConstantValue>();
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index ae281dc708f..c3788a20ddc 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -382,12 +382,6 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundBooleanFlag NEW_IDDOC_LAYOUT = defineFeatureFlag(
- "new_iddoc_layout", true, List.of("tokle", "bjorncs", "olaa"), "2023-04-24", "2023-12-30",
- "Whether to use new identity document layout",
- "Takes effect on node reboot",
- HOSTNAME, APPLICATION_ID, VESPA_VERSION);
-
public static final UnboundBooleanFlag RANDOMIZED_ENDPOINT_NAMES = defineFeatureFlag(
"randomized-endpoint-names", false, List.of("andreer"), "2023-04-26", "2023-07-30",
"Whether to use randomized endpoint names",
diff --git a/hosted-tenant-base/pom.xml b/hosted-tenant-base/pom.xml
index 7ca96a4d8b7..f2d4c6ab012 100644
--- a/hosted-tenant-base/pom.xml
+++ b/hosted-tenant-base/pom.xml
@@ -195,7 +195,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
- <version>3.0.0</version>
+ <version>3.3.0</version>
<executions>
<execution>
<id>enforce-java</id>
diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml
index cdc1eb76b6d..fa9bf05fad3 100644
--- a/jdisc_core/pom.xml
+++ b/jdisc_core/pom.xml
@@ -39,7 +39,6 @@
<!-- Newer version than the one in rt.jar, including the ElementTraversal class needed by Xerces (Aug 2015, still valid Sep 2017) -->
<groupId>xml-apis</groupId>
<artifactId>xml-apis</artifactId>
- <version>1.4.01</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
@@ -262,7 +261,6 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
- <version>3.0.0-M6</version>
<executions>
<execution>
<goals>
diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
index a2aade05059..4c33bbb563f 100644
--- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
+++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
@@ -1,5 +1,6 @@
package com.yahoo.jdisc.core;
+import com.google.common.collect.Sets;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -7,11 +8,13 @@ import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Paths;
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
-import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -49,6 +52,60 @@ public class ExportPackagesIT {
"javax.activation.jar"
).map(f -> JAR_PATH + f).toList();
+ private static final Pattern PACKAGE_PATTERN = Pattern.compile("([^;,]+);\\s*version=\"([^\"]*)\"(?:,\\s*([^;,]+);\\s*uses:=\"([^\"]*)\")?");
+
+ record PackageInfo(String packageName, String version, List<String> clauses) implements Comparable<PackageInfo> {
+
+ PackageInfo withoutVersion() {
+ return new PackageInfo(packageName, "", clauses);
+ }
+
+ @Override
+ public String toString() {
+ return packageName + ":" + version;
+ }
+
+ @Override
+ public int compareTo(PackageInfo o) {
+ int pkg = packageName.compareTo(o.packageName);
+ return (pkg != 0) ? pkg : version.compareTo(o.version);
+ }
+ }
+
+ record PackageSet(List<PackageInfo> packages) {
+ PackageSet removeJavaVersion() {
+ return new PackageSet(packages.stream()
+ .map(p -> p.version.contains(".JavaSE_") ? p.withoutVersion() : p)
+ .toList());
+ }
+
+ PackageSet removeNewPackageOnJava20() {
+ return new PackageSet(packages.stream()
+ .filter(p -> ! p.packageName.contains("java.lang.foreign"))
+ .filter(p -> ! p.packageName.contains("com.sun.jna"))
+ .toList());
+ }
+
+ boolean isEquivalentTo(PackageSet other) {
+ var thisPackages = new HashSet<>(removeJavaVersion().removeNewPackageOnJava20().packages);
+ var otherPackages = new HashSet<>(other.removeJavaVersion().removeNewPackageOnJava20().packages);
+ return thisPackages.equals(otherPackages);
+ }
+
+ PackageSet minus(PackageSet other) {
+ var thisPackages = new HashSet<>(removeJavaVersion().removeNewPackageOnJava20().packages);
+ var otherPackages = new HashSet<>(other.removeJavaVersion().removeNewPackageOnJava20().packages);
+ Set<PackageInfo> diff = Sets.difference(thisPackages, otherPackages);
+ return new PackageSet(diff.stream().sorted().toList());
+ }
+
+ @Override
+ public String toString() {
+ return packages.stream().map(PackageInfo::toString)
+ .collect(Collectors.joining(",\n ", " [", "]"));
+ }
+ }
+
@TempDir
public static File tempFolder;
@@ -62,60 +119,60 @@ public class ExportPackagesIT {
String expectedValue = expectedProperties.getProperty(ExportPackages.EXPORT_PACKAGES);
assertNotNull(expectedValue, "Missing exportPackages property in file.");
- Set<String> actualPackages = removeNewPackageOnJava20(removeJavaVersion(getPackages(actualValue)));
- Set<String> expectedPackages = removeNewPackageOnJava20(removeJavaVersion(getPackages(expectedValue)));
- if (!actualPackages.equals(expectedPackages)) {
+ var expectedPackages = parsePackages(expectedValue).removeJavaVersion();
+ var actualPackages = parsePackages(actualValue).removeJavaVersion()
+ .removeNewPackageOnJava20();
+
+ if (!actualPackages.isEquivalentTo(expectedPackages)) {
StringBuilder message = getDiff(actualPackages, expectedPackages);
message.append("\n\nIf this test fails due to an intentional change in exported packages, run the following command:\n")
.append("$ cp jdisc_core/target/classes/exportPackages.properties jdisc_core/src/test/resources/")
.append("\n\nNote that removing exported packages usually requires a new major version of Vespa.\n");
fail(message.toString());
}
+ // TODO: check that actualValue equals expectedValue. Problem is that exportPackages.properties is not deterministic.
}
- private static Set<String> removeJavaVersion(Set<String> packages) {
- return packages.stream().map(p -> p.replaceAll(".JavaSE_\\d+", "")).collect(Collectors.toSet());
- }
-
- private static Set<String> removeNewPackageOnJava20(Set<String> packages) {
- return packages.stream()
- .filter(p -> ! p.contains("java.lang.foreign"))
- .filter(p -> ! p.contains("com.sun.jna"))
- .collect(Collectors.toSet());
- }
-
- private static StringBuilder getDiff(Set<String> actual, Set<String> expected) {
+ private static StringBuilder getDiff(PackageSet actual, PackageSet expected) {
StringBuilder sb = new StringBuilder();
- Set<String> onlyInActual = onlyInSet1(actual, expected);
- if (! onlyInActual.isEmpty()) {
+
+ var onlyInActual = actual.minus(expected);
+ if (! onlyInActual.packages().isEmpty()) {
sb.append("\nexportPackages.properties contained ")
- .append(onlyInActual.size())
+ .append(onlyInActual.packages.size())
.append(" unexpected packages:\n")
- .append(onlyInActual.stream().collect(Collectors.joining(",\n ", " [", "]")));
+ .append(onlyInActual);
}
- Set<String> onlyInExpected = onlyInSet1(expected, actual);
- if (! onlyInExpected.isEmpty()) {
+ var onlyInExpected = expected.minus(actual);
+ if (! onlyInExpected.packages.isEmpty()) {
sb.append("\nexportPackages.properties did not contain ")
- .append(onlyInExpected.size())
+ .append(onlyInExpected.packages.size())
.append(" expected packages:\n")
- .append(onlyInExpected.stream().collect(Collectors.joining(",\n ", " [", "]")));
+ .append(onlyInExpected);
}
return sb;
}
- // Returns a sorted set for readability.
- private static Set<String> onlyInSet1(Set<String> set1, Set<String> set2) {
- return set1.stream()
- .filter(s -> ! set2.contains(s))
- .collect(Collectors.toCollection(TreeSet::new));
- }
+ public static PackageSet parsePackages(String input) {
+ List<PackageInfo> packages = new ArrayList<>();
- private static Set<String> getPackages(String propertyValue) {
- return Arrays.stream(propertyValue.split(","))
- .map(String::trim)
- .filter(s -> ! s.isEmpty())
- .collect(Collectors.toSet());
+ Matcher matcher = PACKAGE_PATTERN.matcher(input);
+ while (matcher.find()) {
+ String packageName = matcher.group(1);
+ String version = matcher.group(2);
+ String dependencyPackage = matcher.group(3);
+ String dependencyClause = matcher.group(4);
+
+ List<String> clauses = new ArrayList<>();
+ if (dependencyPackage != null && dependencyClause != null) {
+ clauses.add(dependencyPackage + ";" + dependencyClause);
+ }
+
+ PackageInfo packageInfo = new PackageInfo(packageName, version, clauses);
+ packages.add(packageInfo);
+ }
+ return new PackageSet(packages);
}
private static Properties getPropertiesFromFile(File file) throws IOException {
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index d504357b07c..99d4d227305 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -28,7 +28,7 @@ org.apache.maven:maven-repository-metadata:3.8.7
org.apache.maven:maven-resolver-provider:3.8.7
org.apache.maven:maven-settings:3.8.7
org.apache.maven:maven-settings-builder:3.8.7
-org.apache.maven.enforcer:enforcer-api:3.0.0
+org.apache.maven.enforcer:enforcer-api:3.3.0
org.apache.maven.plugin-tools:maven-plugin-annotations:3.6.4
org.apache.maven.plugins:maven-shade-plugin:3.4.1
org.apache.maven.resolver:maven-resolver-api:1.6.3
@@ -37,7 +37,6 @@ org.apache.maven.resolver:maven-resolver-spi:1.6.3
org.apache.maven.resolver:maven-resolver-util:1.6.3
org.apache.maven.shared:maven-artifact-transfer:0.13.1
org.apache.maven.shared:maven-common-artifact-filters:3.1.0
-org.apache.maven.shared:maven-dependency-tree:3.1.1
org.apache.maven.shared:maven-dependency-tree:3.2.0
org.apache.maven.shared:maven-shared-utils:3.3.4
org.codehaus.plexus:plexus-archiver:4.4.0
@@ -50,7 +49,6 @@ org.codehaus.plexus:plexus-sec-dispatcher:2.0
org.codehaus.plexus:plexus-utils:3.3.1
org.eclipse.aether:aether-api:1.0.0.v20140518
org.eclipse.aether:aether-util:1.0.0.v20140518
-org.eclipse.aether:aether-util:1.1.0
org.eclipse.sisu:org.eclipse.sisu.inject:0.3.5
org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.5
org.iq80.snappy:snappy:0.4
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
index 1d3fcb5fbf8..b6ec0ebbd94 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
@@ -43,7 +43,6 @@ import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
import java.security.KeyPair;
import java.security.PrivateKey;
import java.security.cert.X509Certificate;
@@ -76,7 +75,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
private static final String CONTAINER_SIA_DIRECTORY = "/var/lib/sia";
private static final String LEGACY_SIA_DIRECTORY = "/opt/vespa/var/vespa/sia";
- private final URI ztsEndpoint;
private final Path ztsTrustStorePath;
private final Timer timer;
private final String certificateDnsSuffix;
@@ -87,14 +85,12 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
// Used as an optimization to ensure ZTS is not DDoS'ed on continuously failing refresh attempts
private final Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>();
- public AthenzCredentialsMaintainer(URI ztsEndpoint,
- Path ztsTrustStorePath,
+ public AthenzCredentialsMaintainer(Path ztsTrustStorePath,
ConfigServerInfo configServerInfo,
String certificateDnsSuffix,
ServiceIdentityProvider hostIdentityProvider,
FlagSource flagSource,
Timer timer) {
- this.ztsEndpoint = ztsEndpoint;
this.ztsTrustStorePath = ztsTrustStorePath;
this.certificateDnsSuffix = certificateDnsSuffix;
this.hostIdentityProvider = hostIdentityProvider;
@@ -231,14 +227,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
var keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA);
var athenzRole = AthenzRole.fromResourceNameString(role);
- var containerIdentitySslContext = new SslContextBuilder()
- .withKeyStore(privateKeyFile, certificateFile)
- .withTrustStore(ztsTrustStorePath)
- .build();
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(identityDocument))
- .withSslContext(containerIdentitySslContext)
- .withHostnameVerifier(ztsHostNameVerifier)
- .build()) {
+ try (ZtsClient ztsClient = ztsClient(identityDocument.ztsUrl(), privateKeyFile, certificateFile, ztsHostNameVerifier)) {
var csrGenerator = new CsrGenerator(certificateDnsSuffix, identityDocument.providerService().getFullName());
var csr = csrGenerator.generateRoleCsr(
identity, athenzRole, identityDocument.providerUniqueId(), identityDocument.clusterType(), keyPair);
@@ -318,7 +307,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
// Allow all zts hosts while removing SIS
HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true;
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withIdentityProvider(hostIdentityProvider).withHostnameVerifier(ztsHostNameVerifier).build()) {
+ try (ZtsClient ztsClient = ztsClient(doc.ztsUrl(), hostIdentityProvider.privateKeyPath(), hostIdentityProvider.certificatePath(), ztsHostNameVerifier)) {
InstanceIdentity instanceIdentity =
ztsClient.registerInstance(
doc.providerService(),
@@ -331,15 +320,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
}
}
- /**
- * Return zts url from identity document, fallback to ztsEndpoint
- */
- private URI ztsEndpoint(IdentityDocument doc) {
- return Optional.ofNullable(doc.ztsUrl())
- .filter(s -> !s.isBlank())
- .map(URI::create)
- .orElse(ztsEndpoint);
- }
private void refreshIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile,
ContainerPath identityDocumentFile, IdentityDocument doc, IdentityType identityType, AthenzIdentity identity) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA);
@@ -347,14 +327,10 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
Pkcs10Csr csr = csrGenerator.generateInstanceCsr(
identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair);
- SSLContext containerIdentitySslContext = new SslContextBuilder().withKeyStore(privateKeyFile, certificateFile)
- .withTrustStore(ztsTrustStorePath)
- .build();
-
try {
// Allow all zts hosts while removing SIS
HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true;
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withSslContext(containerIdentitySslContext).withHostnameVerifier(ztsHostNameVerifier).build()) {
+ try (ZtsClient ztsClient = ztsClient(doc.ztsUrl(), privateKeyFile, certificateFile, ztsHostNameVerifier)) {
InstanceIdentity instanceIdentity =
ztsClient.refreshInstance(
doc.providerService(),
@@ -439,17 +415,26 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
var certsDirectory = legacySiaDirectory.resolve("certs");
Files.createDirectories(keysDirectory);
Files.createDirectories(certsDirectory);
- writeFile(certsDirectory.resolve(certificateFile.getFileName()), new String(Files.readAllBytes(certificateFile)));
- writeFile(keysDirectory.resolve(privateKeyFile.getFileName()), new String(Files.readAllBytes(privateKeyFile)));
+ writeFile(certsDirectory.resolve(certificateFile.getFileName()), Files.readString(certificateFile));
+ writeFile(keysDirectory.resolve(privateKeyFile.getFileName()), Files.readString(privateKeyFile));
}
- /*
- Get the document version to ask for
- */
+ /** Get the document version to ask for */
private int documentVersion(NodeAgentContext context) {
return SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION;
}
+ private ZtsClient ztsClient(URI ztsEndpoint, Path privateKeyFile, Path certificateFile, HostnameVerifier hostnameVerifier) {
+ SSLContext sslContext = new SslContextBuilder()
+ .withKeyStore(privateKeyFile, certificateFile)
+ .withTrustStore(ztsTrustStorePath)
+ .build();
+ return new DefaultZtsClient.Builder(ztsEndpoint)
+ .withSslContext(sslContext)
+ .withHostnameVerifier(hostnameVerifier)
+ .build();
+ }
+
private List<String> getRoleList(NodeAgentContext context) {
try {
return identityDocumentClient.getNodeRoles(context.hostname().value());
@@ -463,7 +448,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
NODE("vespa-node-identity-document.json"),
TENANT("vespa-tenant-identity-document.json");
- private String identityDocument;
+ private final String identityDocument;
IdentityType(String identityDocument) {
this.identityDocument = identityDocument;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
index 9bc18533ddf..e760e36f90b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
@@ -24,7 +24,7 @@ public final class LockedNodeList extends NodeList {
this.lock = Objects.requireNonNull(lock, "lock must be non-null");
}
- /** Returns a new LockedNodeList with the for the same lock. */
+ /** Returns a new LockedNodeList with the same lock. */
public LockedNodeList childList(List<Node> nodes) {
return new LockedNodeList(nodes, lock);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 9da66413b9c..f3d69fdf103 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -205,7 +205,7 @@ public class NodeRepository extends AbstractComponent {
*/
public boolean exclusiveAllocation(ClusterSpec clusterSpec) {
return clusterSpec.isExclusive() ||
- ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
+ ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name()));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
index a2ef76e84d0..40d1d50e0e8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
@@ -195,6 +195,7 @@ public class AllocatableClusterResources {
else { // Return the cheapest flavor satisfying the requested resources, if any
NodeResources cappedWantedResources = applicationLimits.cap(wantedResources.nodeResources());
Optional<AllocatableClusterResources> best = Optional.empty();
+ Optional<AllocatableClusterResources> bestDisregardingDiskLimit = Optional.empty();
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
@@ -202,7 +203,9 @@ public class AllocatableClusterResources {
// Adjust where we don't need exact match to the flavor
if (flavor.resources().storageType() == NodeResources.StorageType.remote) {
- double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive).diskGb();
+ double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, true).diskGb();
+ if (diskGb > applicationLimits.max().nodeResources().diskGb() || diskGb < applicationLimits.min().nodeResources().diskGb()) // TODO: Remove when disk limit is enforced
+ diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, false).diskGb();
advertisedResources = advertisedResources.withDiskGb(diskGb);
realResources = realResources.withDiskGb(diskGb);
}
@@ -213,14 +216,24 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
+
var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
advertisedResources,
wantedResources,
clusterSpec);
+
+ if ( ! systemLimits.isWithinAdvertisedDiskLimits(advertisedResources, clusterSpec)) { // TODO: Remove when disk limit is enforced
+ if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get())) {
+ bestDisregardingDiskLimit = Optional.of(candidate);
+ }
+ continue;
+ }
if (best.isEmpty() || candidate.preferableTo(best.get())) {
best = Optional.of(candidate);
}
}
+ if (best.isEmpty())
+ best = bestDisregardingDiskLimit;
return best;
}
}
@@ -234,7 +247,7 @@ public class AllocatableClusterResources {
boolean bestCase) {
var systemLimits = new NodeResourceLimits(nodeRepository);
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive, bestCase);
- advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive); // Ask for something legal
+ advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase); // What we'll really get
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index b56e8d1b247..2287b768dee 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.Optional;
@@ -63,9 +64,8 @@ public class AllocationOptimizer {
availableRealHostResources,
nodeRepository);
if (allocatableResources.isEmpty()) continue;
- if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get())) {
+ if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
bestAllocation = allocatableResources;
- }
}
}
return bestAllocation;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 2a0b4f02b20..331759127e4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -268,11 +268,9 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
// build() requires a version, even though it is not (should not be) used
.vespaVersion(Vtag.currentVersion)
.build();
- NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), nodeResources, false, true,
+ NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), 1, nodeResources, false, true,
nodeRepository().zone().cloud().account(), Duration.ZERO);
- int wantedGroups = 1;
-
- NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, clusterSpec, nodeSpec, wantedGroups,
+ NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, clusterSpec, nodeSpec,
true, nodeRepository().nameResolver(), nodeRepository().nodes(), nodeRepository().resourcesCalculator(),
nodeRepository().spareCount(), nodeSpec.cloudAccount().isExclave(nodeRepository().zone()));
List<NodeCandidate> nodeCandidates = prioritizer.collect(List.of());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index eec195ccfcb..0bb045dc6a1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -24,7 +24,6 @@ import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.maintenance.NodeFailer;
import com.yahoo.vespa.hosted.provision.node.filter.NodeFilter;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
-import com.yahoo.vespa.hosted.provision.provisioning.HostIpConfig;
import com.yahoo.vespa.orchestrator.HostNameNotFoundException;
import com.yahoo.vespa.orchestrator.Orchestrator;
@@ -36,7 +35,6 @@ import java.util.Collection;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
import java.util.NavigableSet;
import java.util.Optional;
@@ -48,6 +46,7 @@ import java.util.function.Predicate;
import java.util.logging.Level;
import java.util.logging.Logger;
+import static com.yahoo.collections.Iterables.reversed;
import static com.yahoo.vespa.hosted.provision.restapi.NodePatcher.DROP_DOCUMENTS_REPORT;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.groupingBy;
@@ -968,7 +967,7 @@ public class Nodes {
// If the first node is now earlier in lock order than some other locks we have, we need to close those and re-acquire them.
Node next = unlocked.pollFirst();
Set<NodeMutex> outOfOrder = locked.tailSet(new NodeMutex(next, () -> { }), false);
- NodeMutexes.close(outOfOrder.iterator());
+ NodeMutexes.close(outOfOrder);
for (NodeMutex node : outOfOrder) unlocked.add(node.node());
outOfOrder.clear();
@@ -1002,15 +1001,25 @@ public class Nodes {
}
finally {
// If we didn't manage to lock all nodes, we must close the ones we did lock before we throw.
- NodeMutexes.close(locked.iterator());
+ NodeMutexes.close(locked);
}
}
/** A node with their locks, acquired in a universal order. */
public record NodeMutexes(List<NodeMutex> nodes) implements AutoCloseable {
- @Override public void close() { close(nodes.iterator()); }
- private static void close(Iterator<NodeMutex> nodes) {
- if (nodes.hasNext()) try (NodeMutex node = nodes.next()) { close(nodes); }
+ @Override public void close() { close(nodes); }
+ private static void close(Collection<NodeMutex> nodes) {
+ RuntimeException thrown = null;
+ for (NodeMutex node : reversed(List.copyOf(nodes))) {
+ try {
+ node.close();
+ }
+ catch (RuntimeException e) {
+ if (thrown == null) thrown = e;
+ else thrown.addSuppressed(e);
+ }
+ }
+ if (thrown != null) throw thrown;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index c25f33bc8c2..9adff9f9d7a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -14,7 +14,6 @@ import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
-import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
@@ -71,8 +70,8 @@ class Activator {
NodeList allNodes = nodeRepository.nodes().list();
NodeList applicationNodes = allNodes.owner(application);
- NodeList reserved = updatePortsFrom(hosts, applicationNodes.state(Node.State.reserved)
- .matching(node -> hostnames.contains(node.hostname())));
+ NodeList reserved = applicationNodes.state(Node.State.reserved).matching(node -> hostnames.contains(node.hostname()));
+ reserved = updatePortsFrom(hosts, reserved);
nodeRepository.nodes().reserve(reserved.asList()); // Re-reserve nodes to avoid reservation expiry
NodeList oldActive = applicationNodes.state(Node.State.active); // All nodes active now
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index bfd06d744f6..8a39f309935 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -9,11 +9,11 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+
import java.util.Map;
import java.util.TreeMap;
@@ -115,10 +115,6 @@ public class CapacityPolicies {
return versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())).with(architecture);
}
- if (zone.environment() == Environment.dev && zone.system() == SystemName.cd) {
- return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 4, 50, 0.3)));
- }
-
if (clusterSpec.type() == ClusterSpec.Type.content) {
return zone.cloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupIndices.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupIndices.java
new file mode 100644
index 00000000000..44f371be293
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupIndices.java
@@ -0,0 +1,163 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+
+import java.time.Clock;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Knows how to assign a group index to a number of nodes (some of which have an index already),
+ * such that the nodes are placed in the desired groups with minimal group movement.
+ *
+ * @author bratseth
+ */
+class GroupIndices {
+
+ private final NodeSpec requested;
+ private final NodeList allNodes;
+ private final Clock clock;
+
+ GroupIndices(NodeSpec requested, NodeList allNodes, Clock clock) {
+ if (requested.groups() > 1 && requested.count().isEmpty())
+ throw new IllegalArgumentException("Unlimited nodes cannot be grouped");
+ this.requested = requested;
+ this.allNodes = allNodes;
+ this.clock = clock;
+ }
+
+ Collection<NodeCandidate> assignTo(Collection<NodeCandidate> nodes) {
+ int[] countInGroup = countInEachGroup(nodes);
+ nodes = byUnretiringPriority(nodes).stream().map(node -> unretireNodeInExpandedGroup(node, countInGroup)).toList();
+ nodes = nodes.stream().map(node -> assignGroupToNewNode(node, countInGroup)).toList();
+ nodes = byUnretiringPriority(nodes).stream().map(node -> moveNodeInSurplusGroup(node, countInGroup)).toList();
+ nodes = byRetiringPriority(nodes).stream().map(node -> retireSurplusNodeInGroup(node, countInGroup)).toList();
+ nodes = nodes.stream().filter(node -> ! shouldRemove(node)).toList();
+ return nodes;
+ }
+
+ /** Prefer to retire nodes we want the least */
+ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
+ return candidates.stream().sorted(Comparator.reverseOrder()).toList();
+ }
+
+ /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
+ private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
+ return candidates.stream()
+ .sorted(Comparator.comparing(NodeCandidate::wantToRetire)
+ .thenComparing(n -> n.allocation().get().membership().index()))
+ .toList();
+ }
+
+ private int[] countInEachGroup(Collection<NodeCandidate> nodes) {
+ int[] countInGroup = new int[requested.groups()];
+ for (var node : nodes) {
+ if (node.allocation().get().membership().retired()) continue;
+ var currentGroup = node.allocation().get().membership().cluster().group();
+ if (currentGroup.isEmpty()) continue;
+ if (currentGroup.get().index() >= requested.groups()) continue;
+ countInGroup[currentGroup.get().index()]++;
+ }
+ return countInGroup;
+ }
+
+ /** Assign a group to new or to be reactivated nodes. */
+ private NodeCandidate assignGroupToNewNode(NodeCandidate node, int[] countInGroup) {
+ if (node.state() == Node.State.active && node.allocation().get().membership().retired()) return node;
+ if (node.state() == Node.State.active && node.allocation().get().membership().cluster().group().isPresent()) return node;
+ return inFirstGroupWithDeficiency(node, countInGroup);
+ }
+
+ private NodeCandidate moveNodeInSurplusGroup(NodeCandidate node, int[] countInGroup) {
+ var currentGroup = node.allocation().get().membership().cluster().group();
+ if (currentGroup.isEmpty()) return node; // Shouldn't happen
+ if (currentGroup.get().index() < requested.groups()) return node;
+ return inFirstGroupWithDeficiency(node, countInGroup);
+ }
+
+ private NodeCandidate retireSurplusNodeInGroup(NodeCandidate node, int[] countInGroup) {
+ if (node.allocation().get().membership().retired()) return node;
+ var currentGroup = node.allocation().get().membership().cluster().group();
+ if (currentGroup.isEmpty()) return node;
+ if (currentGroup.get().index() >= requested.groups()) return node;
+ if (requested.count().isEmpty()) return node; // Can't retire
+ if (countInGroup[currentGroup.get().index()] <= requested.count().get() / requested.groups()) return node;
+ countInGroup[currentGroup.get().index()]--;
+ return node.withNode(node.toNode().retire(Agent.application, clock.instant()));
+ }
+
+ /** Unretire nodes that are already in the correct group when the group is deficient. */
+ private NodeCandidate unretireNodeInExpandedGroup(NodeCandidate node, int[] countInGroup) {
+ if ( ! node.allocation().get().membership().retired()) return node;
+ var currentGroup = node.allocation().get().membership().cluster().group();
+ if (currentGroup.isEmpty()) return node;
+ if (currentGroup.get().index() >= requested.groups()) return node;
+ if (node.preferToRetire() || node.wantToRetire()) return node;
+ if (requested.count().isPresent() && countInGroup[currentGroup.get().index()] >= requested.count().get() / requested.groups()) return node;
+ node = unretire(node);
+ if (node.allocation().get().membership().retired()) return node;
+ countInGroup[currentGroup.get().index()]++;
+ return node;
+ }
+
+ private NodeCandidate inFirstGroupWithDeficiency(NodeCandidate node, int[] countInGroup) {
+ for (int group = 0; group < requested.groups(); group++) {
+ if (requested.count().isEmpty() || countInGroup[group] < requested.count().get() / requested.groups()) {
+ return inGroup(group, node, countInGroup);
+ }
+ }
+ return node;
+ }
+
+ private boolean shouldRemove(NodeCandidate node) {
+ var currentGroup = node.allocation().get().membership().cluster().group();
+ if (currentGroup.isEmpty()) return true; // new and not assigned an index: Not needed
+ return currentGroup.get().index() >= requested.groups();
+ }
+
+ private NodeCandidate inGroup(int group, NodeCandidate node, int[] countInGroup) {
+ node = unretire(node);
+ if (node.allocation().get().membership().retired()) return node;
+ var membership = node.allocation().get().membership();
+ var currentGroup = membership.cluster().group();
+ countInGroup[group]++;
+ if ( ! currentGroup.isEmpty() && currentGroup.get().index() < requested.groups())
+ countInGroup[membership.cluster().group().get().index()]--;
+ return node.withNode(node.toNode().with(node.allocation().get().with(membership.with(membership.cluster().with(Optional.of(ClusterSpec.Group.from(group)))))));
+ }
+
+ /** Attempt to unretire the given node if it is retired. */
+ private NodeCandidate unretire(NodeCandidate node) {
+ if (node.retiredNow()) return node;
+ if ( ! node.allocation().get().membership().retired()) return node;
+ if ( ! hasCompatibleResources(node) ) return node;
+ var parent = node.parentHostname().flatMap(hostname -> allNodes.node(hostname));
+ if (parent.isPresent() && (parent.get().status().wantToRetire() || parent.get().status().preferToRetire())) return node;
+ node = node.withNode();
+ if ( ! requested.isCompatible(node.resources()))
+ node = node.withNode(resize(node.toNode()));
+ return node.withNode(node.toNode().unretire());
+ }
+
+ private Node resize(Node node) {
+ NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
+ return node.with(new Flavor(requested.resources().get()
+ .with(hostResources.diskSpeed())
+ .with(hostResources.storageType())
+ .with(hostResources.architecture())),
+ Agent.application, clock.instant());
+ }
+
+ private boolean hasCompatibleResources(NodeCandidate candidate) {
+ return requested.isCompatible(candidate.resources()) || candidate.isResizable;
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 0c4838abe4d..e6b47d38779 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -61,14 +61,14 @@ public class GroupPreparer {
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
public PrepareResult prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, NodeIndices indices, int wantedGroups,
+ List<Node> surplusActiveNodes, NodeIndices indices,
LockedNodeList allNodes) {
log.log(Level.FINE, () -> "Preparing " + cluster.type().name() + " " + cluster.id() + " with requested resources " +
requestedNodes.resources().orElse(NodeResources.unspecified()));
// Try preparing in memory without global unallocated lock. Most of the time there should be no changes,
// and we can return nodes previously allocated.
NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::probeNext, wantedGroups, allNodes);
+ indices::probeNext, allNodes);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
surplusActiveNodes.removeAll(acceptedNodes);
@@ -77,7 +77,7 @@ public class GroupPreparer {
} else {
// There were some changes, so re-do the allocation with locks
indices.resetProbe();
- List<Node> prepared = prepareWithLocks(application, cluster, requestedNodes, surplusActiveNodes, indices, wantedGroups);
+ List<Node> prepared = prepareWithLocks(application, cluster, requestedNodes, surplusActiveNodes, indices);
return new PrepareResult(prepared, createUnlockedNodeList());
}
}
@@ -87,12 +87,12 @@ public class GroupPreparer {
/// Note that this will write to the node repo.
private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, NodeIndices indices, int wantedGroups) {
+ List<Node> surplusActiveNodes, NodeIndices indices) {
try (Mutex lock = nodeRepository.applications().lock(application);
Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList allNodes = nodeRepository.nodes().list(allocationLock);
NodeAllocation allocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::next, wantedGroups, allNodes);
+ indices::next, allNodes);
NodeType hostType = allocation.nodeType().hostType();
if (canProvisionDynamically(hostType) && allocation.hostDeficit().isPresent()) {
HostSharing sharing = hostSharing(cluster, hostType);
@@ -134,27 +134,25 @@ public class GroupPreparer {
// Non-dynamically provisioned zone with a deficit because we just now retired some nodes.
// Try again, but without retiring
indices.resetProbe();
- List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), surplusActiveNodes, indices, wantedGroups);
+ List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), surplusActiveNodes, indices);
log.warning("Prepared " + application + " " + cluster.id() + " without retirement due to lack of capacity");
return accepted;
}
if (! allocation.fulfilled() && requestedNodes.canFail())
- throw new NodeAllocationException((cluster.group().isPresent() ? "Node allocation failure on " + cluster.group().get()
- : "") + allocation.allocationFailureDetails(),
- true);
+ throw new NodeAllocationException(allocation.allocationFailureDetails(), true);
// Carry out and return allocation
+ List<Node> acceptedNodes = allocation.finalNodes();
nodeRepository.nodes().reserve(allocation.reservableNodes());
nodeRepository.nodes().addReservedNodes(new LockedNodeList(allocation.newNodes(), allocationLock));
- List<Node> acceptedNodes = allocation.finalNodes();
surplusActiveNodes.removeAll(acceptedNodes);
return acceptedNodes;
}
}
private NodeAllocation prepareAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, Supplier<Integer> nextIndex, int wantedGroups,
+ List<Node> surplusActiveNodes, Supplier<Integer> nextIndex,
LockedNodeList allNodes) {
NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requestedNodes, nextIndex, nodeRepository);
@@ -162,7 +160,6 @@ public class GroupPreparer {
application,
cluster,
requestedNodes,
- wantedGroups,
nodeRepository.zone().cloud().dynamicProvisioning(),
nodeRepository.nameResolver(),
nodeRepository.nodes(),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index a2e0e59e329..40e5909d4d9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -19,7 +19,6 @@ import com.yahoo.vespa.hosted.provision.node.Allocation;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.LinkedHashMap;
@@ -60,7 +59,7 @@ class NodeAllocation {
/** The number of already allocated nodes of compatible size */
private int acceptedAndCompatible = 0;
- /** The number of already allocated nodes which can be made compatible*/
+ /** The number of already allocated nodes which can be made compatible */
private int acceptedAndCompatibleOrResizable = 0;
/** The number of nodes rejected because of clashing parentHostname */
@@ -120,7 +119,6 @@ class NodeAllocation {
ClusterMembership membership = allocation.membership();
if ( ! allocation.owner().equals(application)) continue; // wrong application
if ( ! membership.cluster().satisfies(cluster)) continue; // wrong cluster id/type
- if ((! candidate.isSurplus || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; // wrong group, and we can't or have no reason to change it
if ( candidate.state() == Node.State.active && allocation.removable()) continue; // don't accept; causes removal
if ( candidate.state() == Node.State.active && candidate.wantToFail()) continue; // don't accept; causes failing
if ( indexes.contains(membership.index())) continue; // duplicate index (just to be sure)
@@ -175,6 +173,7 @@ class NodeAllocation {
if (candidate.preferToRetire() && candidate.replaceableBy(candidates)) return Retirement.softRequest;
if (violatesExclusivity(candidate)) return Retirement.violatesExclusivity;
if (requiredHostFlavor.isPresent() && ! candidate.parent.map(node -> node.flavor().name()).equals(requiredHostFlavor)) return Retirement.violatesHostFlavor;
+ if (candidate.violatesSpares) return Retirement.violatesSpares;
return Retirement.none;
}
@@ -243,12 +242,10 @@ class NodeAllocation {
*/
private boolean acceptIncompatible(NodeCandidate candidate) {
if (candidate.state() != Node.State.active) return false;
- if (! candidate.allocation().get().membership().cluster().group().equals(cluster.group())) return false;
if (candidate.allocation().get().membership().retired()) return true; // don't second-guess if already retired
if ( ! requestedNodes.considerRetiring()) // the node is active and we are not allowed to remove gracefully, so keep
return true;
-
return cluster.isStateful() ||
(cluster.type() == ClusterSpec.Type.container && !hasCompatibleResources(candidate));
}
@@ -259,7 +256,6 @@ class NodeAllocation {
private Node acceptNode(NodeCandidate candidate, Retirement retirement, boolean resizeable) {
Node node = candidate.toNode();
-
if (node.allocation().isPresent()) // Record the currently requested resources
node = node.with(node.allocation().get().withRequestedResources(requestedNodes.resources().orElse(node.resources())));
@@ -268,10 +264,11 @@ class NodeAllocation {
// We want to allocate new nodes rather than unretiring with resize, so count without those
// for the purpose of deciding when to stop accepting nodes (saturation)
if (node.allocation().isEmpty()
- || ! ( requestedNodes.needsResize(node) &&
- (node.allocation().get().membership().retired() || ! requestedNodes.considerRetiring()))) {
+ || (canBeUsedInGroupWithDeficiency(node) &&
+ ! ( requestedNodes.needsResize(node) && (node.allocation().get().membership().retired() || ! requestedNodes.considerRetiring())))) {
acceptedAndCompatible++;
}
+
if (hasCompatibleResources(candidate))
acceptedAndCompatibleOrResizable++;
@@ -289,15 +286,28 @@ class NodeAllocation {
node = node.retire(nodeRepository.clock().instant());
}
if ( ! node.allocation().get().membership().cluster().equals(cluster)) {
- // group may be different
- node = setCluster(cluster, node);
+ // Cluster has the updated settings but do not set a group
+ node = setCluster(cluster.with(node.allocation().get().membership().cluster().group()), node);
}
- candidate = candidate.withNode(node);
+ candidate = candidate.withNode(node, retirement != Retirement.none && retirement != Retirement.alreadyRetired );
indexes.add(node.allocation().get().membership().index());
nodes.put(node.hostname(), candidate);
return node;
}
+ private boolean canBeUsedInGroupWithDeficiency(Node node) {
+ if (requestedNodes.count().isEmpty()) return true;
+ if (node.allocation().isEmpty()) return true;
+ var group = node.allocation().get().membership().cluster().group();
+ if (group.isEmpty()) return true;
+ long nodesInGroup = nodes.values().stream().filter(n -> groupOf(n).equals(group)).count();
+ return nodesInGroup < requestedNodes.count().get() / requestedNodes.groups();
+ }
+
+ private Optional<ClusterSpec.Group> groupOf(NodeCandidate candidate) {
+ return candidate.allocation().flatMap(a -> a.membership().cluster().group());
+ }
+
private Node resize(Node node) {
NodeResources hostResources = allNodes.parentOf(node).get().flavor().resources();
return node.with(new Flavor(requestedNodes.resources().get()
@@ -391,52 +401,21 @@ class NodeAllocation {
return requestedNodes.type();
}
- /**
- * Make the number of <i>non-retired</i> nodes in the list equal to the requested number
- * of nodes, and retire the rest of the list. Only retire currently active nodes.
- * Prefer to retire nodes of the wrong flavor.
- * Make as few changes to the retired set as possible.
- *
- * @return the final list of nodes
- */
List<Node> finalNodes() {
- int wantToRetireCount = (int) matching(NodeCandidate::wantToRetire).count();
- int currentRetiredCount = (int) matching(node -> node.allocation().get().membership().retired()).count();
- int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), wantToRetireCount, currentRetiredCount);
-
- if (deltaRetiredCount > 0) { // retire until deltaRetiredCount is 0
- for (NodeCandidate candidate : byRetiringPriority(nodes.values())) {
- if ( ! candidate.allocation().get().membership().retired() && candidate.state() == Node.State.active) {
- candidate = candidate.withNode();
- candidate = candidate.withNode(candidate.toNode().retire(Agent.application, nodeRepository.clock().instant()));
- nodes.put(candidate.toNode().hostname(), candidate);
- if (--deltaRetiredCount == 0) break;
- }
- }
- }
- else if (deltaRetiredCount < 0) { // unretire until deltaRetiredCount is 0
- for (NodeCandidate candidate : byUnretiringPriority(nodes.values())) {
- if (candidate.allocation().get().membership().retired() && hasCompatibleResources(candidate) ) {
- candidate = candidate.withNode();
- if (candidate.isResizable)
- candidate = candidate.withNode(resize(candidate.toNode()));
- candidate = candidate.withNode(candidate.toNode().unretire());
- nodes.put(candidate.toNode().hostname(), candidate);
- if (++deltaRetiredCount == 0) break;
- }
- }
- }
-
+ // Set whether the node is exclusive
for (NodeCandidate candidate : nodes.values()) {
- // Set whether the node is exclusive
candidate = candidate.withNode();
Allocation allocation = candidate.allocation().get();
candidate = candidate.withNode(candidate.toNode().with(allocation.with(allocation.membership()
- .with(allocation.membership().cluster().exclusive(cluster.isExclusive())))));
+ .with(allocation.membership().cluster().exclusive(cluster.isExclusive())))));
nodes.put(candidate.toNode().hostname(), candidate);
}
- return nodes.values().stream().map(NodeCandidate::toNode).toList();
+ GroupIndices groupIndices = new GroupIndices(requestedNodes, allNodes, nodeRepository.clock());
+ Collection<NodeCandidate> finalNodes = groupIndices.assignTo(nodes.values());
+ nodes.clear();
+ finalNodes.forEach(candidate -> nodes.put(candidate.toNode().hostname(), candidate));
+ return finalNodes.stream().map(NodeCandidate::toNode).toList();
}
List<Node> reservableNodes() {
@@ -461,19 +440,6 @@ class NodeAllocation {
return allNodes.nodeType(nodeType()).size();
}
- /** Prefer to retire nodes we want the least */
- private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) {
- return candidates.stream().sorted(Comparator.reverseOrder()).toList();
- }
-
- /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */
- private List<NodeCandidate> byUnretiringPriority(Collection<NodeCandidate> candidates) {
- return candidates.stream()
- .sorted(Comparator.comparing(NodeCandidate::wantToRetire)
- .thenComparing(n -> n.allocation().get().membership().index()))
- .toList();
- }
-
String allocationFailureDetails() {
List<String> reasons = new ArrayList<>();
if (rejectedDueToExclusivity > 0)
@@ -486,7 +452,7 @@ class NodeAllocation {
reasons.add("insufficient real resources on hosts");
if (reasons.isEmpty()) return "";
- return ": Not enough suitable nodes available due to " + String.join(", ", reasons);
+ return "Not enough suitable nodes available due to " + String.join(", ", reasons);
}
private static Integer parseIndex(String hostname) {
@@ -510,6 +476,7 @@ class NodeAllocation {
violatesExclusivity("node violates host exclusivity"),
violatesHostFlavor("node violates host flavor"),
violatesHostFlavorGeneration("node violates host flavor generation"),
+ violatesSpares("node is assigned to a host we want to use as a spare"),
none("");
private final String description;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 8462e23fbfd..adc04c491e2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -81,6 +81,9 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
public abstract boolean preferToRetire();
+ /** Returns true if we have decided to retire this node as part of this deployment */
+ public boolean retiredNow() { return false; }
+
public abstract boolean wantToFail();
public abstract Flavor flavor();
@@ -217,7 +220,12 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
/** Returns a copy of this with node set to given value */
NodeCandidate withNode(Node node) {
- return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizable);
+ return withNode(node, retiredNow());
+ }
+
+ /** Returns a copy of this with node set to given value */
+ NodeCandidate withNode(Node node, boolean retiredNow) {
+ return new ConcreteNodeCandidate(node, retiredNow, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizable);
}
/** Returns the switch priority, based on switch exclusivity, of this compared to other */
@@ -260,7 +268,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
boolean isSurplus,
boolean isNew,
boolean isResizeable) {
- return new ConcreteNodeCandidate(node, freeParentCapacity, Optional.of(parent), violatesSpares, true, isSurplus, isNew, isResizeable);
+ return new ConcreteNodeCandidate(node, false, freeParentCapacity, Optional.of(parent), violatesSpares, true, isSurplus, isNew, isResizeable);
}
public static NodeCandidate createNewChild(NodeResources resources,
@@ -274,26 +282,33 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
}
public static NodeCandidate createNewExclusiveChild(Node node, Node parent) {
- return new ConcreteNodeCandidate(node, node.resources(), Optional.of(parent), false, true, false, true, false);
+ return new ConcreteNodeCandidate(node, false, node.resources(), Optional.of(parent), false, true, false, true, false);
}
public static NodeCandidate createStandalone(Node node, boolean isSurplus, boolean isNew) {
- return new ConcreteNodeCandidate(node, node.resources(), Optional.empty(), false, true, isSurplus, isNew, false);
+ return new ConcreteNodeCandidate(node, false, node.resources(), Optional.empty(), false, true, isSurplus, isNew, false);
}
/** A candidate backed by a node */
static class ConcreteNodeCandidate extends NodeCandidate {
private final Node node;
+ private final boolean retiredNow;
- ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent,
+ ConcreteNodeCandidate(Node node,
+ boolean retiredNow,
+ NodeResources freeParentCapacity, Optional<Node> parent,
boolean violatesSpares, boolean exclusiveSwitch,
boolean isSurplus, boolean isNew, boolean isResizeable) {
super(freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizeable);
+ this.retiredNow = retiredNow;
this.node = Objects.requireNonNull(node, "Node cannot be null");
}
@Override
+ public boolean retiredNow() { return retiredNow; }
+
+ @Override
public NodeResources resources() { return node.resources(); }
@Override
@@ -322,7 +337,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
@Override
public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) {
- return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at),
+ return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), retiredNow,
freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizable);
}
@@ -332,7 +347,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
@Override
public NodeCandidate withExclusiveSwitch(boolean exclusiveSwitch) {
- return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch,
+ return new ConcreteNodeCandidate(node, retiredNow, freeParentCapacity, parent, violatesSpares, exclusiveSwitch,
isSurplus, isNew, isResizable);
}
@@ -439,7 +454,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
NodeType.tenant)
.cloudAccount(parent.get().cloudAccount())
.build();
- return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizable);
+ return new ConcreteNodeCandidate(node, false, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplus, isNew, isResizable);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 4f21c8dcd50..9f00e5fdbba 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -46,7 +46,7 @@ public class NodePrioritizer {
private final boolean enclave;
public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
- int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes,
+ boolean dynamicProvisioning, NameResolver nameResolver, Nodes nodes,
HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean enclave) {
this.allNodes = allNodes;
this.calculator = hostResourcesCalculator;
@@ -70,12 +70,9 @@ public class NodePrioritizer {
.stream())
.distinct()
.count();
- this.topologyChange = currentGroups != wantedGroups;
+ this.topologyChange = currentGroups != requestedNodes.groups();
- this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream()
- .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group()))
- .filter(clusterSpec.group()::equals)
- .count();
+ this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream().count();
// In dynamically provisioned zones, we can always take spare hosts since we can provision new on-demand,
// NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index ffd2805bcff..c29c51ccbd5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -81,11 +81,10 @@ public class NodeRepositoryProvisioner implements Provisioner {
* The nodes are ordered by increasing index number.
*/
@Override
- public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
- ProvisionLogger logger) {
+ public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
log.log(Level.FINE, "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
- validate(application, cluster, requested);
+ validate(application, cluster, requested, logger);
int groups;
NodeResources resources;
@@ -97,23 +96,21 @@ public class NodeRepositoryProvisioner implements Provisioner {
validate(actual, target, cluster, application);
logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger);
- groups = target.groups();
resources = getNodeResources(cluster, target.nodeResources(), application);
- nodeSpec = NodeSpec.from(target.nodes(), resources, cluster.isExclusive(), actual.canFail(),
+ nodeSpec = NodeSpec.from(target.nodes(), target.groups(), resources, cluster.isExclusive(), actual.canFail(),
requested.cloudAccount().orElse(nodeRepository.zone().cloud().account()),
requested.clusterInfo().hostTTL());
}
else {
- groups = 1; // type request with multiple groups is not supported
cluster = cluster.withExclusivity(true);
resources = getNodeResources(cluster, requested.minResources().nodeResources(), application);
nodeSpec = NodeSpec.from(requested.type(), nodeRepository.zone().cloud().account());
}
- return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups),
+ return asSortedHosts(preparer.prepare(application, cluster, nodeSpec),
requireCompatibleResources(resources, cluster));
}
- private void validate(ApplicationId application, ClusterSpec cluster, Capacity requested) {
+ private void validate(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), application, cluster);
@@ -121,6 +118,18 @@ public class NodeRepositoryProvisioner implements Provisioner {
if ( ! requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: Gpu capacity cannot have ranges");
+
+ logInsufficientDiskResources(cluster, requested, logger);
+ }
+
+ private void logInsufficientDiskResources(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
+ var resources = requested.minResources().nodeResources();
+ if ( ! nodeResourceLimits.isWithinAdvertisedDiskLimits(resources, cluster)) {
+ logger.logApplicationPackage(Level.WARNING, "Requested disk (" + resources.diskGb() +
+ "Gb) in " + cluster.id() + " is not large enough to fit " +
+ "core/heap dumps. Minimum recommended disk resources " +
+ "is 2x memory for containers and 3x memory for content");
+ }
}
private NodeResources getNodeResources(ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 9ded1a2735c..8c5a7b6c61e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -2,14 +2,17 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.util.Locale;
+import java.util.logging.Level;
/**
* Defines the resource limits for nodes in various zones
@@ -35,6 +38,12 @@ public class NodeResourceLimits {
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
+ // TODO: Remove this when we are ready to fail, not just warn on this. */
+ public boolean isWithinAdvertisedDiskLimits(NodeResources requested, ClusterSpec cluster) {
+ if (requested.diskGbIsUnspecified() || requested.memoryGbIsUnspecified()) return true;
+ return requested.diskGb() >= minAdvertisedDiskGb(requested, cluster);
+ }
+
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true; // Resource limits only apply to tenant nodes
@@ -52,9 +61,12 @@ public class NodeResourceLimits {
return true;
}
- public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive) {
+ public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive, boolean followRecommendations) {
if (requested.isUnspecified()) return requested;
+ if (followRecommendations) // TODO: Do unconditionally when we enforce this limit
+ requested = requested.withDiskGb(Math.max(minAdvertisedDiskGb(requested, cluster), requested.diskGb()));
+
return requested.withVcpu(Math.max(minAdvertisedVcpu(applicationId, cluster), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(cluster), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
@@ -78,6 +90,15 @@ public class NodeResourceLimits {
return minRealDiskGb() + reservedDiskSpaceGb(requested.storageType(), exclusive);
}
+ // TODO: Move this check into the above when we are ready to fail, not just warn on this. */
+ private double minAdvertisedDiskGb(NodeResources requested, ClusterSpec cluster) {
+ return requested.memoryGb() * switch (cluster.type()) {
+ case combined, content -> 3;
+ case container -> 2;
+ default -> 0; // No constraint on other types
+ };
+ }
+
// Note: Assumes node type 'host'
private long reservedDiskSpaceGb(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && ! zone().cloud().allowHostSharing())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index f32928a9ec4..f4b2c4ceee0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -35,21 +35,20 @@ public interface NodeSpec {
return fulfilledDeficitCount(count) == 0;
}
+ /** Returns the total number of nodes this is requesting, or empty if not specified */
+ Optional<Integer> count();
+
+ int groups();
+
/** Returns whether this should throw an exception if the requested nodes are not fully available */
boolean canFail();
/** Returns whether we should retire nodes at all when fulfilling this spec */
boolean considerRetiring();
- /** Returns the ideal number of nodes that should be retired to fulfill this spec */
- int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount);
-
/** Returns number of additional nodes needed for this spec to be fulfilled given the current node count */
int fulfilledDeficitCount(int count);
- /** Returns a specification of a fraction of all the nodes of this. It is assumed the argument is a valid divisor. */
- NodeSpec fraction(int divisor);
-
/** Returns the resources requested by this or empty if none are explicitly requested */
Optional<NodeResources> resources();
@@ -77,9 +76,9 @@ public interface NodeSpec {
return false;
}
- static NodeSpec from(int nodeCount, NodeResources resources, boolean exclusive, boolean canFail,
+ static NodeSpec from(int nodeCount, int groupCount, NodeResources resources, boolean exclusive, boolean canFail,
CloudAccount cloudAccount, Duration hostTTL) {
- return new CountNodeSpec(nodeCount, resources, exclusive, canFail, canFail, cloudAccount, hostTTL);
+ return new CountNodeSpec(nodeCount, groupCount, resources, exclusive, canFail, canFail, cloudAccount, hostTTL);
}
static NodeSpec from(NodeType type, CloudAccount cloudAccount) {
@@ -90,6 +89,7 @@ public interface NodeSpec {
class CountNodeSpec implements NodeSpec {
private final int count;
+ private final int groups;
private final NodeResources requestedNodeResources;
private final boolean exclusive;
private final boolean canFail;
@@ -97,9 +97,10 @@ public interface NodeSpec {
private final CloudAccount cloudAccount;
private final Duration hostTTL;
- private CountNodeSpec(int count, NodeResources resources, boolean exclusive, boolean canFail,
+ private CountNodeSpec(int count, int groups, NodeResources resources, boolean exclusive, boolean canFail,
boolean considerRetiring, CloudAccount cloudAccount, Duration hostTTL) {
this.count = count;
+ this.groups = groups;
this.requestedNodeResources = Objects.requireNonNull(resources, "Resources must be specified");
this.exclusive = exclusive;
this.canFail = canFail;
@@ -112,6 +113,12 @@ public interface NodeSpec {
}
@Override
+ public Optional<Integer> count() { return Optional.of(count); }
+
+ @Override
+ public int groups() { return groups; }
+
+ @Override
public Optional<NodeResources> resources() {
return Optional.of(requestedNodeResources);
}
@@ -136,22 +143,12 @@ public interface NodeSpec {
}
@Override
- public int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount) {
- return acceptedCount - this.count - currentRetiredCount;
- }
-
- @Override
public int fulfilledDeficitCount(int count) {
return Math.max(this.count - count, 0);
}
- @Override
- public NodeSpec fraction(int divisor) {
- return new CountNodeSpec(count/divisor, requestedNodeResources, exclusive, canFail, considerRetiring, cloudAccount, hostTTL);
- }
-
public NodeSpec withoutRetiring() {
- return new CountNodeSpec(count, requestedNodeResources, exclusive, canFail, false, cloudAccount, hostTTL);
+ return new CountNodeSpec(count, groups, requestedNodeResources, exclusive, canFail, false, cloudAccount, hostTTL);
}
@Override
@@ -163,7 +160,6 @@ public interface NodeSpec {
public boolean canResize(NodeResources currentNodeResources, NodeResources currentSpareHostResources,
ClusterSpec.Type type, boolean hasTopologyChange, int currentClusterSize) {
if (exclusive) return false; // exclusive resources must match the host
-
// Never allow in-place resize when also changing topology or decreasing cluster size
if (hasTopologyChange || count < currentClusterSize) return false;
@@ -192,7 +188,10 @@ public interface NodeSpec {
public Duration hostTTL() { return hostTTL; }
@Override
- public String toString() { return "request for " + count + " nodes with " + requestedNodeResources; }
+ public String toString() {
+ return "request for " + count + " nodes" +
+ ( groups > 1 ? " (in " + groups + " groups)" : "") +
+ " with " + requestedNodeResources; }
}
@@ -211,6 +210,12 @@ public interface NodeSpec {
}
@Override
+ public Optional<Integer> count() { return Optional.empty(); }
+
+ @Override
+ public int groups() { return 1; }
+
+ @Override
public NodeType type() { return type; }
@Override
@@ -226,20 +231,12 @@ public interface NodeSpec {
public boolean considerRetiring() { return true; }
@Override
- public int idealRetiredCount(int acceptedCount, int wantToRetireCount, int currentRetiredCount) {
- return wantToRetireCount - currentRetiredCount;
- }
-
- @Override
public int fulfilledDeficitCount(int count) {
// If no wanted count is specified for this node type, then any count fulfills the deficit
return Math.max(0, WANTED_NODE_COUNT.getOrDefault(type, 0) - count);
}
@Override
- public NodeSpec fraction(int divisor) { return this; }
-
- @Override
public Optional<NodeResources> resources() {
return Optional.empty();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index b6c7324c75c..25efcabfe8e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -10,6 +10,7 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import java.time.Clock;
import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
@@ -32,16 +33,15 @@ class Preparer {
}
/** Prepare all required resources for the given application and cluster */
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
try {
- var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
+ var nodes = prepareNodes(application, cluster, requestedNodes);
prepareLoadBalancer(application, cluster, requestedNodes);
return nodes;
}
catch (NodeAllocationException e) {
throw new NodeAllocationException("Could not satisfy " + requestedNodes +
- ( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
- " in " + application + " " + cluster + ": " + e.getMessage(),
+ " in " + application + " " + cluster, e,
e.retryable());
}
}
@@ -54,34 +54,29 @@ class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- int wantedGroups) {
+ private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
LockedNodeList allNodes = groupPreparer.createUnlockedNodeList();
- NodeList appNodes = allNodes.owner(application);
- List<Node> surplusNodes = findNodesInRemovableGroups(appNodes, cluster, wantedGroups);
+ NodeList clusterNodes = allNodes.owner(application);
+ List<Node> surplusNodes = findNodesInRemovableGroups(clusterNodes, requestedNodes.groups());
- List<Integer> usedIndices = appNodes.cluster(cluster.id()).mapToList(node -> node.allocation().get().membership().index());
+ List<Integer> usedIndices = clusterNodes.mapToList(node -> node.allocation().get().membership().index());
NodeIndices indices = new NodeIndices(usedIndices);
List<Node> acceptedNodes = new ArrayList<>();
- for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
- ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex)));
- GroupPreparer.PrepareResult result = groupPreparer.prepare(application, clusterGroup,
- requestedNodes.fraction(wantedGroups),
- surplusNodes, indices, wantedGroups,
- allNodes);
- allNodes = result.allNodes(); // Might have changed
- List<Node> accepted = result.prepared();
- if (requestedNodes.rejectNonActiveParent()) {
- NodeList activeHosts = allNodes.state(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
- accepted = accepted.stream()
- .filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
- .toList();
- }
-
- replace(acceptedNodes, accepted);
+ GroupPreparer.PrepareResult result = groupPreparer.prepare(application, cluster,
+ requestedNodes,
+ surplusNodes, indices,
+ allNodes);
+ List<Node> accepted = result.prepared();
+ if (requestedNodes.rejectNonActiveParent()) {
+ NodeList activeHosts = result.allNodes().state(Node.State.active).parents().nodeType(requestedNodes.type().hostType());
+ accepted = accepted.stream()
+ .filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
+ .toList();
}
- moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
+
+ replace(acceptedNodes, accepted);
+ moveToActiveGroup(surplusNodes, requestedNodes.groups(), cluster.group());
acceptedNodes.removeAll(surplusNodes);
return acceptedNodes;
}
@@ -95,18 +90,16 @@ class Preparer {
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
*/
- private List<Node> findNodesInRemovableGroups(NodeList appNodes, ClusterSpec requestedCluster, int wantedGroups) {
+ private List<Node> findNodesInRemovableGroups(NodeList clusterNodes, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>();
- for (Node node : appNodes.state(Node.State.active)) {
+ for (Node node : clusterNodes.state(Node.State.active)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
- if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
- if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
if (nodeCluster.group().get().index() >= wantedGroups)
surplusNodes.add(node);
}
return surplusNodes;
}
-
+
/** Move nodes from unwanted groups to wanted groups to avoid lingering groups consisting of retired nodes */
private void moveToActiveGroup(List<Node> surplusNodes, int wantedGroups, Optional<ClusterSpec.Group> targetGroup) {
for (ListIterator<Node> i = surplusNodes.listIterator(); i.hasNext(); ) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java
new file mode 100644
index 00000000000..65abcbef698
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java
@@ -0,0 +1,35 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.testutils;
+
+import com.yahoo.config.provision.ProvisionLogger;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Level;
+
+/**
+ * A logger which remembers all messages logged in addition to writing them to standard out.
+ *
+ * @author bratseth
+ */
+public class InMemoryProvisionLogger implements ProvisionLogger {
+
+ private final List<String> systemLog = new ArrayList<>();
+ private final List<String> applicationLog = new ArrayList<>();
+
+ @Override
+ public void log(Level level, String message) {
+ System.out.println("ProvisionLogger system " + level + ": " + message);
+ systemLog.add(level + ": " + message);
+ }
+
+ @Override
+ public void logApplicationPackage(Level level, String message) {
+ System.out.println("ProvisionLogger application " + level + ": " + message);
+ applicationLog.add(level + ": " + message);
+ }
+
+ public List<String> systemLog() { return systemLog; }
+ public List<String> applicationLog() { return applicationLog; }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
index dcde521bfda..3ed01e00ee6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
@@ -261,7 +261,7 @@ public class MockDeployer implements Deployer {
public ClusterSpec cluster() { return cluster; }
private List<HostSpec> prepare(NodeRepositoryProvisioner provisioner) {
- return provisioner.prepare(id, cluster, capacity, null);
+ return provisioner.prepare(id, cluster, capacity, new InMemoryProvisionLogger());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 64c5dff0718..bd31c7578b9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -57,11 +57,15 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_single_content_group() {
- var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
+ var now = new ClusterResources(5, 1, new NodeResources(2, 16, 750, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .build();
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
- 8, 1, 4.0, 9.3, 36.2,
+ 9, 1, 3.6, 8.5, 360.9,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
@@ -83,7 +87,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 7, 1, 1.1, 8.7, 25.4,
+ 8, 1, 1.0, 8.3, 338.4,
fixture.autoscale());
}
@@ -210,7 +214,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 4, 16.0, 25.5,
+ 9, 1, 4, 16.0, 150,
fixture.autoscale());
}
@@ -227,9 +231,9 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_without_traffic_exclusive() {
- var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 10, 0.3));
- var now = new ClusterResources(4, 1, new NodeResources(8, 16, 10, 0.3));
- var max = new ClusterResources(4, 1, new NodeResources(16, 32, 50, 0.3));
+ var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 100, 0.3));
+ var now = new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3));
+ var max = new ClusterResources(4, 1, new NodeResources(16, 32, 500, 0.3));
var fixture = DynamicProvisioningTester.fixture(min, now, max)
.clusterType(ClusterSpec.Type.container)
.awsProdSetup(false)
@@ -238,7 +242,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(duration.negated());
fixture.loader().zeroTraffic(20, 1);
fixture.tester().assertResources("Scaled down",
- 2, 1, 2, 16, 10,
+ 2, 1, 2, 16, 100,
fixture.autoscale());
}
@@ -256,7 +260,7 @@ public class AutoscalingTest {
fixture.completeLastScaling();
fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
- 3, 1, 2, 16, 27.2,
+ 3, 1, 2, 16, 75.0,
fixture.autoscale());
}
@@ -283,7 +287,7 @@ public class AutoscalingTest {
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
- 13, 1, 1.5, 29.1, 26.7,
+ 13, 1, 1.5, 29.1, 87.3,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
@@ -312,7 +316,6 @@ public class AutoscalingTest {
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.8, 120);
- System.out.println("Autoscaling ----------");
assertEquals(DiskSpeed.any, fixture.autoscale(capacity).resources().get().nodeResources().diskSpeed());
}
@@ -384,15 +387,15 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 6, 6, 5.0, 7.4, 10.0,
+ 6, 6, 5.0, 7.4, 22.3,
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_size_limit() {
- var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 10, 1));
- var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
+ var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
+ var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
@@ -401,7 +404,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 8, 4, 4.6, 4.2, 10.0,
+ 12, 6, 2.8, 4.2, 27.5,
fixture.autoscale());
}
@@ -457,7 +460,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 13, 1, 4, 8, 13.6,
+ 13, 1, 4, 8, 100.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -480,9 +483,61 @@ public class AutoscalingTest {
}
@Test
+ public void too_small_disk_compared_to_memory() {
+ var resources = new ClusterResources(2, 1, new NodeResources(1, 10, 19, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(resources))
+ .build();
+ assertEquals(2, fixture.tester().provisionLogger().applicationLog().size()); // tester deploys twice
+ assertEquals("WARNING: Requested disk (19.0Gb) in cluster 'cluster1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 2x memory for containers and 3x memory for content",
+ fixture.tester().provisionLogger().applicationLog().get(0));
+ }
+
+ @Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1));
+ var now = new ClusterResources(10, 1, new NodeResources(5, 50, 150, 1));
+ var max = new ClusterResources(10, 1, new NodeResources(10, 100, 200, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 11, 1, 13.0, 60.0, 179.9,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 10.0, 66.2, 198.6,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory_exclusive() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1, DiskSpeed.any, StorageType.remote));
+ var now = new ClusterResources(10, 1, new NodeResources(16, 64, 192, 1, DiskSpeed.any, StorageType.remote));
+ var max = new ClusterResources(10, 1, new NodeResources(30, 200, 500, 1, DiskSpeed.any, StorageType.remote));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(false)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 13, 1, 36.0, 72.0, 900.0,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 16.0, 64, 247.5,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
public void test_autoscaling_group_size_unconstrained() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3, 100, 100, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3, 100, 300, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
@@ -492,7 +547,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 10, 5, 7.7, 41.5, 38.5,
+ 10, 5, 7.7, 41.5, 124.6,
fixture.autoscale());
}
@@ -509,7 +564,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 7, 7, 9.4, 78.6, 77.0,
+ 7, 7, 9.4, 78.6, 235.8,
fixture.autoscale());
}
@@ -528,7 +583,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 7, 1, 3.2, 43.3, 40.1,
+ 7, 1, 3.2, 43.3, 129.8,
fixture.autoscale());
}
@@ -548,7 +603,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 5, 1, 1.0, 62.6, 60.1,
+ 5, 1, 1.0, 62.6, 187.7,
fixture.autoscale());
}
@@ -565,7 +620,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
- 8, 2, 13.9, 96.3, 60.1,
+ 8, 2, 13.9, 96.3, 288.8,
fixture.autoscale());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index 4d2816cb14f..00ae9ac5a9d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -30,6 +30,7 @@ import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
import com.yahoo.vespa.hosted.provision.testutils.ServiceMonitorStub;
import com.yahoo.vespa.service.duper.InfraApplication;
import com.yahoo.vespa.service.duper.TenantHostApplication;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import java.time.Clock;
import java.time.Duration;
@@ -270,7 +271,7 @@ public class NodeFailTester {
}
public void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
- List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, null);
+ List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, new InMemoryProvisionLogger());
try (var lock = provisioner.lock(applicationId)) {
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 1b677224295..8aaf0eb20e7 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -75,7 +75,7 @@ public class ScalingSuggestionsMaintainerTest {
assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 14.2 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java
index 478b201d71b..7cf1b0d5177 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java
@@ -94,7 +94,6 @@ public class DynamicAllocationTest {
hostsWithChildren.add(node.parentHostname().get());
}
assertEquals(4 - spareCount, hostsWithChildren.size());
-
}
/**
@@ -342,8 +341,8 @@ public class DynamicAllocationTest {
tester.activate(application, hosts);
NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application);
- assertEquals(Set.of("127.0.127.2", "::2"), activeNodes.asList().get(0).ipConfig().primary());
- assertEquals(Set.of("127.0.127.13", "::d"), activeNodes.asList().get(1).ipConfig().primary());
+ assertEquals(Set.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary());
+ assertEquals(Set.of("127.0.127.13", "::d"), activeNodes.asList().get(0).ipConfig().primary());
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index e1e83ad2fb3..5539bb0cb6e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.Architecture;
@@ -316,13 +317,6 @@ public class DynamicProvisioningTest {
tester.assertNodes("Allocation specifies memory in the advertised amount",
2, 1, 2, 20, 40,
app1, cluster1);
-
- // Redeploy the same
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40),
- resources(4, 1, 2, 20, 40)));
- tester.assertNodes("Allocation specifies memory in the advertised amount",
- 2, 1, 2, 20, 40,
- app1, cluster1);
}
@Test
@@ -340,7 +334,7 @@ public class DynamicProvisioningTest {
.flagSource(flagSource)
.build();
- ApplicationId app = ProvisioningTester.applicationId();
+ ApplicationId app = ProvisioningTester.applicationId("a1");
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("8").build();
Capacity capacity = Capacity.from(new ClusterResources(4, 2, new NodeResources(2, 4, 50, 0.1, DiskSpeed.any, StorageType.any, Architecture.any)));
@@ -505,7 +499,7 @@ public class DynamicProvisioningTest {
}
@Test
- public void gpu_host() {
+ public void gpu_host() {
List<Flavor> flavors = List.of(new Flavor("gpu", new NodeResources(4, 16, 125, 10, fast, local,
Architecture.x86_64, new NodeResources.GpuResources(1, 16))));
ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
index b043a1cfb0f..4799d3b5577 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
@@ -24,12 +24,9 @@ import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.Fixture;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
-import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
-import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import java.time.Duration;
-import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -38,13 +35,13 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
- * A provisioniong tester which
+ * A provisioning tester which
* - Supports dynamic provisioning (only).
* - Optionally replicates the actual AWS setup and logic used on Vespa Cloud.
* - Supports autoscaling testing.
*
- * TODO: All provisioning testing should migrate to use this, and then the provisionging tester should be collapsed
- * into this.
+ * TODO: All provisioning testing should migrate to use this, and then the provisioning tester should be collapsed
+ * into this. ... or we should just use autoscalingtester for everything.
*
* @author bratseth
*/
@@ -82,12 +79,7 @@ public class DynamicProvisioningTester {
capacityPolicies = new CapacityPolicies(provisioningTester.nodeRepository());
}
- private static List<Flavor> toFlavors(List<NodeResources> resources) {
- List<Flavor> flavors = new ArrayList<>();
- for (int i = 0; i < resources.size(); i++)
- flavors.add(new Flavor("flavor" + i, resources.get(i)));
- return flavors;
- }
+ public InMemoryProvisionLogger provisionLogger() { return provisioningTester.provisionLogger(); }
public static Fixture.Builder fixture() { return new Fixture.Builder(); }
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InPlaceResizeProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InPlaceResizeProvisionTest.java
index 0bb6dc61d1b..54f0507831d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InPlaceResizeProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InPlaceResizeProvisionTest.java
@@ -57,7 +57,7 @@ public class InPlaceResizeProvisionTest {
private final ProvisioningTester tester = new ProvisioningTester.Builder()
.flagSource(flagSource)
.zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- private final ApplicationId app = ProvisioningTester.applicationId();
+ private final ApplicationId app = ProvisioningTester.applicationId("a1");
@Test
public void single_group_same_cluster_size_resource_increase() {
@@ -167,8 +167,6 @@ public class InPlaceResizeProvisionTest {
assertEquals(0, listCluster(content1).retired().size());
}
-
- /** In this scenario there should be no resizing */
@Test
public void increase_size_decrease_resources() {
addParentHosts(14, largeResources.with(fast));
@@ -198,15 +196,15 @@ public class InPlaceResizeProvisionTest {
assertSizeAndResources(listCluster(content1).retired(), 4, resources);
assertSizeAndResources(listCluster(content1).not().retired(), 8, halvedResources);
- // ... same with setting a node to want to retire
- Node nodeToWantoToRetire = listCluster(content1).not().retired().asList().get(0);
- try (NodeMutex lock = tester.nodeRepository().nodes().lockAndGetRequired(nodeToWantoToRetire)) {
+ // Here we'll unretire and resize one of the previously retired nodes as there is no rule against it
+ Node nodeToWantToRetire = listCluster(content1).not().retired().asList().get(0);
+ try (NodeMutex lock = tester.nodeRepository().nodes().lockAndGetRequired(nodeToWantToRetire)) {
tester.nodeRepository().nodes().write(lock.node().withWantToRetire(true, Agent.system,
tester.clock().instant()), lock);
}
new PrepareHelper(tester, app).prepare(content1, 8, 1, halvedResources).activate();
- assertTrue(listCluster(content1).retired().stream().anyMatch(n -> n.equals(nodeToWantoToRetire)));
- assertEquals(5, listCluster(content1).retired().size());
+ assertTrue(listCluster(content1).retired().stream().anyMatch(n -> n.equals(nodeToWantToRetire)));
+ assertEquals(4, listCluster(content1).retired().size());
assertSizeAndResources(listCluster(content1).not().retired(), 8, halvedResources);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidateTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidateTest.java
index 32db213c445..c82b29c7d65 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidateTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidateTest.java
@@ -24,17 +24,17 @@ public class NodeCandidateTest {
@Test
public void testOrdering() {
List<NodeCandidate> expected = List.of(
- new NodeCandidate.ConcreteNodeCandidate(node("01", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.empty(), false, true, true, false, false),
- new NodeCandidate.ConcreteNodeCandidate(node("02", Node.State.active), new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
- new NodeCandidate.ConcreteNodeCandidate(node("04", Node.State.reserved), new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
- new NodeCandidate.ConcreteNodeCandidate(node("03", Node.State.inactive), new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
- new NodeCandidate.ConcreteNodeCandidate(node("05", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.active)), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("06", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.ready)), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("07", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.provisioned)), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("08", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.failed)), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("09", Node.State.ready), new NodeResources(1, 1, 1, 1), Optional.empty(), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("10", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, true, false),
- new NodeCandidate.ConcreteNodeCandidate(node("11", Node.State.ready), new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, true, false)
+ new NodeCandidate.ConcreteNodeCandidate(node("01", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.empty(), false, true, true, false, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("02", Node.State.active), false, new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("04", Node.State.reserved), false, new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("03", Node.State.inactive), false, new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, false, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("05", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.active)), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("06", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.ready)), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("07", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.provisioned)), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("08", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.of(node("host1", Node.State.failed)), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("09", Node.State.ready), false, new NodeResources(1, 1, 1, 1), Optional.empty(), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("10", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, true, false),
+ new NodeCandidate.ConcreteNodeCandidate(node("11", Node.State.ready), false, new NodeResources(2, 2, 2, 2), Optional.empty(), true, true, false, true, false)
);
assertOrder(expected);
}
@@ -148,7 +148,7 @@ public class NodeCandidateTest {
Node parent = Node.create(hostname + "parent", hostname, new Flavor(totalHostResources), Node.State.ready, NodeType.host)
.ipConfig(IP.Config.of(Set.of("::1"), Set.of("::2")))
.build();
- return new NodeCandidate.ConcreteNodeCandidate(node, totalHostResources.subtract(allocatedHostResources), Optional.of(parent),
+ return new NodeCandidate.ConcreteNodeCandidate(node, false, totalHostResources.subtract(allocatedHostResources), Optional.of(parent),
false, exclusiveSwitch, false, true, false);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 477101e10e2..cb4644f179f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -241,7 +241,7 @@ public class ProvisioningTest {
public void application_deployment_variable_application_size() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- ApplicationId application1 = ProvisioningTester.applicationId();
+ ApplicationId application1 = ProvisioningTester.applicationId("a1");
tester.makeReadyHosts(30, defaultResources);
tester.activateTenantHosts();
@@ -498,7 +498,7 @@ public class ProvisioningTest {
@Test
public void test_changing_limits() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.flavors(List.of(hostFlavor))
.build();
@@ -508,52 +508,52 @@ public class ProvisioningTest {
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(8, 4, 4, 20, 40)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(8, 4, 4, 20, 400)));
tester.assertNodes("Initial allocation at min",
- 4, 2, 2, 10, 20,
+ 4, 2, 2, 10, 200,
app1, cluster1);
// Move window above current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 40),
- resources(10, 5, 5, 25, 50)));
+ tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 400),
+ resources(10, 5, 5, 25, 500)));
tester.assertNodes("New allocation at new min",
- 8, 4, 4, 21, 40,
+ 8, 4, 4, 21, 400,
app1, cluster1);
// Move window below current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(6, 3, 3, 15, 25)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(6, 3, 3, 15, 250)));
tester.assertNodes("Allocation preserving resources within new limits",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Widening window does not change allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 15),
- resources(8, 4, 4, 21, 30)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 150),
+ resources(8, 4, 4, 21, 300)));
tester.assertNodes("Same allocation",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Changing limits in opposite directions cause a mixture of min and max
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 10),
- resources(4, 2, 14, 40, 13)));
+ tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 100),
+ resources(4, 2, 14, 40, 130)));
tester.assertNodes("A mix of min and max",
- 4, 1, 10, 30, 13,
+ 4, 1, 10, 30, 130,
app1, cluster1);
// Changing group size
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 10),
- resources(9, 3, 12, 35, 15)));
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 100),
+ resources(9, 3, 12, 35, 150)));
tester.assertNodes("Groups changed",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
// Stop specifying node resources
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(6, 3, NodeResources.unspecified()),
new ClusterResources(9, 3, NodeResources.unspecified())));
tester.assertNodes("No change",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
}
@@ -821,7 +821,7 @@ public class ProvisioningTest {
public void highest_node_indexes_are_retired_first() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- ApplicationId application1 = ProvisioningTester.applicationId();
+ ApplicationId application1 = ProvisioningTester.applicationId("a1");
tester.makeReadyHosts(14, defaultResources).activateTenantHosts();
@@ -833,17 +833,19 @@ public class ProvisioningTest {
SystemState state2 = prepare(application1, 2, 2, 2, 2, defaultResources, tester);
tester.activate(application1, state2.allHosts);
- // content0
- assertFalse(state2.hostByMembership("content0", 0, 0).membership().get().retired());
- assertFalse(state2.hostByMembership("content0", 0, 1).membership().get().retired());
- assertTrue( state2.hostByMembership("content0", 0, 2).membership().get().retired());
- assertTrue( state2.hostByMembership("content0", 0, 3).membership().get().retired());
-
- // content1
- assertFalse(state2.hostByMembership("content1", 0, 0).membership().get().retired());
- assertFalse(state2.hostByMembership("content1", 0, 1).membership().get().retired());
- assertTrue( state2.hostByMembership("content1", 0, 2).membership().get().retired());
- assertTrue( state2.hostByMembership("content1", 0, 3).membership().get().retired());
+ List<Integer> unretiredInContent0Indices = state2.content0.stream().filter(h -> ! h.membership().get().retired()).map(h -> h.membership().get().index()).toList();
+ for (var host : state2.content0) {
+ if ( ! host.membership().get().retired()) continue;
+ for (int unretiredIndex : unretiredInContent0Indices)
+ assertTrue(host.membership().get().index() > unretiredIndex);
+ }
+
+ List<Integer> unretiredInContent1Indices = state2.content1.stream().filter(h -> ! h.membership().get().retired()).map(h -> h.membership().get().index()).toList();
+ for (var host : state2.content1) {
+ if ( ! host.membership().get().retired()) continue;
+ for (int unretiredIndex : unretiredInContent1Indices)
+ assertTrue(host.membership().get().index() > unretiredIndex);
+ }
}
@Test
@@ -857,7 +859,7 @@ public class ProvisioningTest {
tester.deploy(application, spec, Capacity.from(new ClusterResources(6, 1, defaultResources)));
- // Pick out a random application node and make it's parent larger, this will make it the spare host
+ // Pick out a random application node and make its parent larger, this will make it the spare host
NodeList nodes = tester.nodeRepository().nodes().list();
Node randomNode = nodes.owner(application).shuffle(new Random()).first().get();
tester.nodeRepository().nodes().write(nodes.parentOf(randomNode).get()
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 7e85131eaf4..a3a90d58c2c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -47,6 +47,7 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
import com.yahoo.vespa.hosted.provision.testutils.MockProvisionServiceProvider;
import com.yahoo.vespa.hosted.provision.testutils.OrchestratorMock;
@@ -93,7 +94,7 @@ public class ProvisioningTester {
private final HostProvisioner hostProvisioner;
private final NodeRepositoryProvisioner provisioner;
private final CapacityPolicies capacityPolicies;
- private final ProvisionLogger provisionLogger;
+ private final InMemoryProvisionLogger provisionLogger;
private final LoadBalancerServiceMock loadBalancerService;
private int nextHost = 0;
@@ -132,7 +133,7 @@ public class ProvisioningTester {
1000);
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, zone, provisionServiceProvider);
this.capacityPolicies = new CapacityPolicies(nodeRepository);
- this.provisionLogger = new NullProvisionLogger();
+ this.provisionLogger = new InMemoryProvisionLogger();
this.loadBalancerService = loadBalancerService;
}
@@ -162,6 +163,7 @@ public class ProvisioningTester {
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); }
+ public InMemoryProvisionLogger provisionLogger() { return provisionLogger; }
public Node node(String hostname) { return nodeRepository.nodes().node(hostname).get(); }
public int decideSize(Capacity capacity, ApplicationId application) {
@@ -773,10 +775,6 @@ public class ProvisioningTester {
}
- private static class NullProvisionLogger implements ProvisionLogger {
- @Override public void log(Level level, String message) { }
- }
-
static class MockResourcesCalculator implements HostResourcesCalculator {
private final int memoryTaxGb;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 62f42b0d035..6ec189d98c3 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -23,6 +23,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.yolean.Exceptions;
import org.junit.Test;
import java.util.HashSet;
@@ -442,9 +443,8 @@ public class VirtualNodeProvisioningTest {
"Could not satisfy request for 3 nodes with " +
"[vcpu: 2.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
"in tenant2.app2 container cluster 'my-container' 6.39: " +
- "Node allocation failure on group 0: " +
"Not enough suitable nodes available due to host exclusivity constraints",
- e.getMessage());
+ Exceptions.toMessageString(e));
}
// Adding 3 nodes of another application for the same tenant works
@@ -469,8 +469,8 @@ public class VirtualNodeProvisioningTest {
assertEquals("Could not satisfy request for 2 nodes with " +
"[vcpu: 1.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote, architecture: any] " +
"in tenant.app1 content cluster 'my-content'" +
- " 6.42: Node allocation failure on group 0",
- e.getMessage());
+ " 6.42",
+ Exceptions.toMessageString(e));
}
}
@@ -513,18 +513,18 @@ public class VirtualNodeProvisioningTest {
2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources( 5, 6, 11, 1);
- var newMaxResources = new NodeResources(20, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 6, 18, 1);
+ var newMaxResources = new NodeResources(20, 10, 90, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total (redundancy adjusted) resources",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
}
diff --git a/parent/pom.xml b/parent/pom.xml
index 62ab23614be..f240f80c3c6 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -252,7 +252,7 @@
<plugin>
<groupId>com.helger.maven</groupId>
<artifactId>ph-javacc-maven-plugin</artifactId>
- <version>4.1.2</version>
+ <version>4.1.5</version>
<executions>
<execution>
<phase>generate-sources</phase>
@@ -826,6 +826,11 @@
<version>2.2.1</version>
</dependency>
<dependency>
+ <groupId>org.apache.maven.enforcer</groupId>
+ <artifactId>enforcer-api</artifactId>
+ <version>${maven-enforcer-plugin.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.maven.plugin-tools</groupId>
<artifactId>maven-plugin-annotations</artifactId>
<version>${maven-plugin-tools.version}</version>
@@ -836,6 +841,11 @@
<version>${maven-jar-plugin.version}</version>
</dependency>
<dependency>
+ <groupId>org.apache.maven.shared</groupId>
+ <artifactId>maven-dependency-tree</artifactId>
+ <version>3.2.0</version>
+ </dependency>
+ <dependency>
<groupId>org.apache.maven.surefire</groupId>
<artifactId>surefire-junit4</artifactId>
<version>${surefire.version}</version>
@@ -1146,7 +1156,7 @@
<commons-io.version>2.11.0</commons-io.version>
<commons.math3.version>3.6.1</commons.math3.version>
<eclipse-collections.version>11.0.0</eclipse-collections.version>
- <felix.version>7.0.1</felix.version>
+ <felix.version>7.0.5</felix.version>
<felix.log.version>1.0.1</felix.log.version>
<findbugs.version>3.0.2</findbugs.version> <!-- Should be kept in sync with guava -->
<hdrhistogram.version>2.1.12</hdrhistogram.version>
@@ -1157,12 +1167,12 @@
<junit.version>5.8.1</junit.version>
<maven-archiver.version>3.6.0</maven-archiver.version>
<maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version>
- <maven-bundle-plugin.version>5.1.2</maven-bundle-plugin.version>
+ <maven-bundle-plugin.version>5.1.9</maven-bundle-plugin.version>
<maven-compiler-plugin.version>3.10.1</maven-compiler-plugin.version>
<maven-core.version>3.8.7</maven-core.version>
<maven-dependency-plugin.version>3.6.0</maven-dependency-plugin.version> <!-- NOTE: When upgrading, also update explicit versions in tenant base poms! -->
<maven-deploy-plugin.version>2.8.2</maven-deploy-plugin.version>
- <maven-enforcer-plugin.version>3.0.0</maven-enforcer-plugin.version>
+ <maven-enforcer-plugin.version>3.3.0</maven-enforcer-plugin.version>
<maven-failsafe-plugin.version>3.0.0-M6</maven-failsafe-plugin.version>
<maven-install-plugin.version>3.1.1</maven-install-plugin.version>
<maven-jar-plugin.version>3.2.0</maven-jar-plugin.version>
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 00000000000..6b37e2049cb
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,21 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:base"
+ ],
+ "dependencyDashboardApproval": true,
+ "transitiveRemediation": true,
+ "prHourlyLimit": 10,
+ "prConcurrentLimit": 10,
+ "ignorePaths": [],
+ "ignoreDeps": [
+ "com.github.spotbugs:spotbugs-annotations",
+ "com.yahoo.vespa.bundle-plugin:test-bundles",
+ "com.yahoo.vespa.jdisc_core:test_bundles",
+ "com.yahoo.vespa:cloud-tenant-base",
+ "com.yahoo.vespa:parent",
+ "com.yahoo.vespa:zookeeper-server-parent",
+ "github.com/go-json-experiment/json",
+ "javax.servlet:javax.servlet-api"
+ ]
+}
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 10ae44aaefb..79a1569633f 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -10,6 +10,7 @@ shared:
environment:
USER_SHELL_BIN: bash
annotations:
+ screwdriver.cd/restrictPR: fork
restore-cache: &restore-cache
restore-cache: |
(cd /tmp && if [[ -f $MAIN_CACHE_FILE ]]; then tar xf $MAIN_CACHE_FILE; fi)
diff --git a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
index 054d41be89a..6cdf0c478ad 100644
--- a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
+++ b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
@@ -526,7 +526,12 @@ TEST_F("require that oldest serial is found", Fixture(1, IINTERVAL))
EXPECT_TRUE(handler->_done.await(LONG_TIMEOUT));
EXPECT_EQUAL(25ul, handler->_oldestSerial);
FlushDoneHistory handlerFlushDoneHistory(handler->getFlushDoneHistory());
- EXPECT_EQUAL(FlushDoneHistory({ 10, 20, 25 }), handlerFlushDoneHistory);
+ if (handlerFlushDoneHistory.size() == 2u) {
+ // Lost sample of oldest serial might happen when system load is high
+ EXPECT_EQUAL(FlushDoneHistory({ 10, 25 }), handlerFlushDoneHistory);
+ } else {
+ EXPECT_EQUAL(FlushDoneHistory({ 10, 20, 25 }), handlerFlushDoneHistory);
+ }
}
TEST_F("require that GC targets are not considered when oldest serial is found", Fixture(1, IINTERVAL))
diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json
index 7d6f2f8790c..a8e028ff6ad 100644
--- a/searchlib/abi-spec.json
+++ b/searchlib/abi-spec.json
@@ -822,46 +822,43 @@
"abstract"
],
"methods" : [
- "public void setTabSize(int)",
- "public int getTabSize()",
- "protected void expandBuff(boolean)",
+ "public void <init>(int, int, int)",
+ "public final void reInit(int, int, int)",
"protected abstract int streamRead(char[], int, int)",
"protected abstract void streamClose()",
+ "protected int getBufSizeAfterExpansion()",
+ "protected void expandBuff(boolean)",
+ "protected final void internalAdjustBuffSize()",
"protected void fillBuff()",
- "public char beginToken()",
- "protected void updateLineColumn(char)",
+ "protected final void internalSetBufLineColumn(int, int)",
+ "protected final void internalUpdateLineColumn(char)",
"public char readChar()",
+ "public char beginToken()",
"public int getBeginColumn()",
"public int getBeginLine()",
"public int getEndColumn()",
"public int getEndLine()",
"public void backup(int)",
- "public void <init>(int, int, int)",
- "public void reInit(int, int, int)",
"public java.lang.String getImage()",
"public char[] getSuffix(int)",
"public void done()",
- "public void adjustBeginLineColumn(int, int)",
- "public void setTrackLineColumn(boolean)",
- "public boolean isTrackLineColumn()"
+ "public final int getTabSize()",
+ "public final void setTabSize(int)",
+ "public final void adjustBeginLineColumn(int, int)",
+ "protected final int getLine()",
+ "protected final int getColumn()",
+ "public final boolean isTrackLineColumn()",
+ "public final void setTrackLineColumn(boolean)"
],
"fields" : [
"public static final int DEFAULT_BUF_SIZE",
- "protected int bufpos",
+ "protected char[] buffer",
"protected int bufsize",
+ "protected int bufpos",
"protected int available",
"protected int tokenBegin",
- "protected int[] bufline",
- "protected int[] bufcolumn",
- "protected int column",
- "protected int line",
- "protected boolean prevCharIsCR",
- "protected boolean prevCharIsLF",
- "protected char[] buffer",
- "protected int maxNextCharInd",
"protected int inBuf",
- "protected char[] nextCharBuf",
- "protected int nextCharInd"
+ "protected int maxNextCharInd"
]
},
"com.yahoo.searchlib.rankingexpression.parser.CharStream" : {
@@ -883,10 +880,10 @@
"public abstract java.lang.String getImage()",
"public abstract char[] getSuffix(int)",
"public abstract void done()",
- "public abstract void setTabSize(int)",
"public abstract int getTabSize()",
- "public abstract void setTrackLineColumn(boolean)",
- "public abstract boolean isTrackLineColumn()"
+ "public abstract void setTabSize(int)",
+ "public abstract boolean isTrackLineColumn()",
+ "public abstract void setTrackLineColumn(boolean)"
],
"fields" : [ ]
},
@@ -989,9 +986,7 @@
"public final com.yahoo.tensor.functions.Slice$DimensionValue dimensionValue(java.util.Optional)",
"public final java.lang.String label()",
"public final java.lang.String string()",
- "public void <init>(java.io.InputStream)",
"public void <init>(java.io.InputStream, java.lang.String)",
- "public void ReInit(java.io.InputStream)",
"public void ReInit(java.io.InputStream, java.lang.String)",
"public void <init>(java.io.Reader)",
"public void ReInit(java.io.Reader)",
@@ -1156,26 +1151,22 @@
"public"
],
"methods" : [
- "protected int streamRead(char[], int, int)",
- "protected void streamClose()",
- "protected void fillBuff()",
- "public char readChar()",
"public void <init>(java.io.Reader, int, int, int)",
"public void <init>(java.io.Reader, int, int)",
"public void <init>(java.io.Reader)",
- "public void reInit(java.io.Reader)",
- "public void reInit(java.io.Reader, int, int)",
"public void reInit(java.io.Reader, int, int, int)",
+ "public void reInit(java.io.Reader, int, int)",
+ "public void reInit(java.io.Reader)",
"public void <init>(java.io.InputStream, java.lang.String, int, int, int)",
"public void <init>(java.io.InputStream, java.lang.String, int, int)",
"public void <init>(java.io.InputStream, java.lang.String)",
"public void reInit(java.io.InputStream, java.lang.String)",
"public void reInit(java.io.InputStream, java.lang.String, int, int)",
- "public void reInit(java.io.InputStream, java.lang.String, int, int, int)"
+ "public void reInit(java.io.InputStream, java.lang.String, int, int, int)",
+ "protected int streamRead(char[], int, int)",
+ "protected void streamClose()"
],
- "fields" : [
- "protected java.io.Reader inputStream"
- ]
+ "fields" : [ ]
},
"com.yahoo.searchlib.rankingexpression.parser.Token" : {
"superClass" : "java.lang.Object",
diff --git a/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp b/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
index 6278b216b52..75c49c3a003 100644
--- a/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
+++ b/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
@@ -151,12 +151,10 @@ FieldOptions::~FieldOptions() = default;
void
FieldOptions::validateFields(const Schema &schema)
{
- for (std::vector<vespalib::string>::const_iterator
- i = _fields.begin(), ie = _fields.end();
- i != ie; ++i) {
- uint32_t fieldId = schema.getIndexFieldId(*i);
+ for (const auto& field : _fields) {
+ uint32_t fieldId = schema.getIndexFieldId(field);
if (fieldId == Schema::UNKNOWN_FIELD_ID) {
- LOG(error, "No such field: %s", i->c_str());
+ LOG(error, "No such field: %s", field.c_str());
std::_Exit(1);
}
_ids.push_back(fieldId);
@@ -399,10 +397,8 @@ ShowPostingListSubApp::readWordList(const Schema &schema)
_wmv.resize(numFields);
if (!_fieldOptions.empty()) {
- for (std::vector<uint32_t>::const_iterator
- i = _fieldOptions._ids.begin(), ie = _fieldOptions._ids.end();
- i != ie; ++i) {
- SchemaUtil::IndexIterator index(schema, *i);
+ for (auto id : _fieldOptions._ids) {
+ SchemaUtil::IndexIterator index(schema, id);
if (!readWordList(index))
return false;
}
@@ -462,10 +458,8 @@ ShowPostingListSubApp::showTransposedPostingList()
return;
std::vector<PosEntry> entries;
if (!_fieldOptions.empty()) {
- for (std::vector<uint32_t>::const_iterator
- i = _fieldOptions._ids.begin(), ie = _fieldOptions._ids.end();
- i != ie; ++i) {
- SchemaUtil::IndexIterator index(schema, *i);
+ for (auto id : _fieldOptions._ids) {
+ SchemaUtil::IndexIterator index(schema, id);
readPostings(index, entries);
}
} else {
@@ -481,35 +475,34 @@ ShowPostingListSubApp::showTransposedPostingList()
uint32_t prevElemId = static_cast<uint32_t>(-1);
uint32_t prevElementLen = 0;
int32_t prevElementWeight = 0;
- for (std::vector<PosEntry>::const_iterator
- i = entries.begin(), ie = entries.end(); i != ie; ++i) {
- if (i->_docId != prevDocId) {
- std::cout << "docId = " << i->_docId << '\n';
- prevDocId = i->_docId;
+ for (const auto& entry : entries) {
+ if (entry._docId != prevDocId) {
+ std::cout << "docId = " << entry._docId << '\n';
+ prevDocId = entry._docId;
prevFieldId = static_cast<uint32_t>(-1);
}
- if (i->_fieldId != prevFieldId) {
- std::cout << " field = " << i->_fieldId <<
- " \"" << schema.getIndexField(i->_fieldId).getName() <<
+ if (entry._fieldId != prevFieldId) {
+ std::cout << " field = " << entry._fieldId <<
+ " \"" << schema.getIndexField(entry._fieldId).getName() <<
"\"\n";
- prevFieldId = i->_fieldId;
+ prevFieldId = entry._fieldId;
prevElemId = static_cast<uint32_t>(-1);
}
- if (i->_elementId != prevElemId ||
- i->_elementLen != prevElementLen ||
- i->_elementWeight != prevElementWeight) {
- std::cout << " element = " << i->_elementId <<
- ", elementLen = " << i->_elementLen <<
- ", elementWeight = " << i->_elementWeight <<
+ if (entry._elementId != prevElemId ||
+ entry._elementLen != prevElementLen ||
+ entry._elementWeight != prevElementWeight) {
+ std::cout << " element = " << entry._elementId <<
+ ", elementLen = " << entry._elementLen <<
+ ", elementWeight = " << entry._elementWeight <<
'\n';
- prevElemId = i->_elementId;
- prevElementLen = i->_elementLen;
- prevElementWeight = i->_elementWeight;
+ prevElemId = entry._elementId;
+ prevElementLen = entry._elementLen;
+ prevElementWeight = entry._elementWeight;
}
- assert(i->_wordNum != 0);
- assert(i->_wordNum < _wordsv[i->_fieldId].size());
- std::cout << " pos = " << i->_wordPos <<
- ", word = \"" << _wordsv[i->_fieldId][i->_wordNum] << "\"";
+ assert(entry._wordNum != 0);
+ assert(entry._wordNum < _wordsv[entry._fieldId].size());
+ std::cout << " pos = " << entry._wordPos <<
+ ", word = \"" << _wordsv[entry._fieldId][entry._wordNum] << "\"";
std::cout << '\n';
}
}
@@ -588,13 +581,10 @@ ShowPostingListSubApp::showPostingList()
handle->second);
std::vector<TermFieldMatchData> tfmdv(numFields);
TermFieldMatchDataArray tfmda;
- for (std::vector<TermFieldMatchData>::iterator
- tfit = tfmdv.begin(), tfite = tfmdv.end();
- tfit != tfite; ++tfit) {
- tfmda.add(&*tfit);
+ for (auto& tfmd : tfmdv) {
+ tfmda.add(&tfmd);
}
- std::unique_ptr<SearchIterator> sb(handle->second.createIterator(
- handle->first, tfmda));
+ auto sb = handle->second.createIterator(handle->first, tfmda);
sb->initFullRange();
uint32_t docId = 0;
bool first = true;
diff --git a/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp b/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp
index d153481ef36..f87096aa1e3 100644
--- a/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp
+++ b/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp
@@ -228,9 +228,8 @@ DiskIndexTest::requireThatWeCanReadPostingList()
{ // field 'f1'
LookupResult::UP r = _index->lookup(0, "w1");
PostingListHandle::UP h = _index->readPostingList(*r);
- SearchIterator * sb = h->createIterator(r->counts, mda);
+ auto sb = h->createIterator(r->counts, mda);
EXPECT_EQ(SimpleResult({1,3}), SimpleResult().search(*sb));
- delete sb;
}
}
diff --git a/searchlib/src/tests/query/streaming_query_large_test.cpp b/searchlib/src/tests/query/streaming_query_large_test.cpp
index b39dad43a7b..13af3774e7d 100644
--- a/searchlib/src/tests/query/streaming_query_large_test.cpp
+++ b/searchlib/src/tests/query/streaming_query_large_test.cpp
@@ -29,15 +29,11 @@ namespace {
// a stack overflow if the stack usage increases.
TEST("testveryLongQueryResultingInBug6850778") {
uint32_t NUMITEMS=20000;
-#ifdef VESPA_USE_ADDRESS_SANITIZER
- setMaxStackSize(12_Mi);
-#else
-#ifdef VESPA_USE_THREAD_SANITIZER
+#if defined(VESPA_USE_THREAD_SANITIZER) || defined(VESPA_USE_ADDRESS_SANITIZER)
NUMITEMS = 10000;
#else
setMaxStackSize(4_Mi);
#endif
-#endif
QueryBuilder<SimpleQueryNodeTypes> builder;
for (uint32_t i=0; i <= NUMITEMS; i++) {
builder.addAnd(2);
diff --git a/searchlib/src/tests/queryeval/filter_search/filter_search_test.cpp b/searchlib/src/tests/queryeval/filter_search/filter_search_test.cpp
index ea4753ab847..8f2f8f2e96b 100644
--- a/searchlib/src/tests/queryeval/filter_search/filter_search_test.cpp
+++ b/searchlib/src/tests/queryeval/filter_search/filter_search_test.cpp
@@ -273,7 +273,9 @@ struct WeightedSetTermAdapter {
WeightedSetTermAdapter();
~WeightedSetTermAdapter();
void addChild(std::unique_ptr<Blueprint> child) {
- blueprint.addTerm(std::move(child), 100);
+ Blueprint::HitEstimate estimate = blueprint.getState().estimate();
+ blueprint.addTerm(std::move(child), 100, estimate);
+ blueprint.complete(estimate);
}
auto createFilterSearch(bool strict, Constraint constraint) const {
return blueprint.createFilterSearch(strict, constraint);
@@ -292,7 +294,9 @@ struct DotProductAdapter {
void addChild(std::unique_ptr<Blueprint> child) {
auto child_field = blueprint.getNextChildField(field);
auto term = std::make_unique<LeafProxy>(child_field, std::move(child));
- blueprint.addTerm(std::move(term), 100);
+ Blueprint::HitEstimate estimate = blueprint.getState().estimate();
+ blueprint.addTerm(std::move(term), 100, estimate);
+ blueprint.complete(estimate);
}
auto createFilterSearch(bool strict, Constraint constraint) const {
return blueprint.createFilterSearch(strict, constraint);
@@ -310,7 +314,9 @@ struct ParallelWeakAndAdapter {
void addChild(std::unique_ptr<Blueprint> child) {
auto child_field = blueprint.getNextChildField(field);
auto term = std::make_unique<LeafProxy>(child_field, std::move(child));
- blueprint.addTerm(std::move(term), 100);
+ Blueprint::HitEstimate estimate = blueprint.getState().estimate();
+ blueprint.addTerm(std::move(term), 100, estimate);
+ blueprint.complete(estimate);
}
auto createFilterSearch(bool strict, Constraint constraint) const {
return blueprint.createFilterSearch(strict, constraint);
diff --git a/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp b/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
index 90e16d4feff..f93aa537625 100644
--- a/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
+++ b/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
@@ -312,9 +312,11 @@ TEST("require that children get a common (yet separate) term field match data")
auto top_handle = layout.allocTermField(42);
FieldSpec top_spec("foo", 42, top_handle);
WeightedSetTermBlueprint blueprint(top_spec);
+ queryeval::Blueprint::HitEstimate estimate;
for (size_t i = 0; i < 5; ++i) {
- blueprint.addTerm(vmd.create(blueprint.getNextChildField(top_spec)), 1);
+ blueprint.addTerm(vmd.create(blueprint.getNextChildField(top_spec)), 1, estimate);
}
+ blueprint.complete(estimate);
auto match_data = layout.createMatchData();
auto search = blueprint.createSearch(*match_data, true);
auto top_tfmd = match_data->resolveTermField(top_handle);
diff --git a/searchlib/src/vespa/searchcommon/common/schema.cpp b/searchlib/src/vespa/searchcommon/common/schema.cpp
index 1f2f924a4cd..7a3e15dbd6d 100644
--- a/searchlib/src/vespa/searchcommon/common/schema.cpp
+++ b/searchlib/src/vespa/searchcommon/common/schema.cpp
@@ -59,7 +59,7 @@ template <typename T>
uint32_t
getFieldId(vespalib::stringref name, const T &map)
{
- typename T::const_iterator it = map.find(name);
+ auto it = map.find(name);
return (it != map.end()) ? it->second : Schema::UNKNOWN_FIELD_ID;
}
@@ -433,13 +433,12 @@ struct IntersectHelper {
void intersect(const std::vector<T> &set1, const std::vector<T> &set2,
const Map &set2_map,
std::vector<T> &intersection, Map &intersection_map) {
- for (typename std::vector<T>::const_iterator
- it = set1.begin(); it != set1.end(); ++it) {
- typename Map::const_iterator it2 = set2_map.find(it->getName());
+ for (const auto& elem : set1) {
+ auto it2 = set2_map.find(elem.getName());
if (it2 != set2_map.end()) {
- if (is_matching(*it, set2[it2->second])) {
- intersection_map[it->getName()] = intersection.size();
- intersection.push_back(*it);
+ if (is_matching(elem, set2[it2->second])) {
+ intersection_map[elem.getName()] = intersection.size();
+ intersection.push_back(elem);
}
}
}
diff --git a/searchlib/src/vespa/searchlib/aggregation/group.cpp b/searchlib/src/vespa/searchlib/aggregation/group.cpp
index 5a16756f0d7..60afcc96ef5 100644
--- a/searchlib/src/vespa/searchlib/aggregation/group.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/group.cpp
@@ -98,7 +98,7 @@ Group::Value::groupSingle(const ResultNode & selectResult, HitRank rank, const G
}
GroupHash & childMap = *_childInfo._childMap;
Group * group(nullptr);
- GroupHash::iterator found = childMap.find(selectResult);
+ auto found = childMap.find(selectResult);
if (found == childMap.end()) { // group not present in child map
if (level.allowMoreGroups(childMap.size())) {
group = new Group(level.getGroupPrototype());
diff --git a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
index e9df10d3a61..96cfb29a693 100644
--- a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
@@ -309,7 +309,7 @@ bool
Grouping::needResort() const
{
bool resort(_root.needResort());
- for (GroupingLevelList::const_iterator it(_levels.begin()), mt(_levels.end()); !resort && (it != mt); ++it) {
+ for (auto it(_levels.begin()), mt(_levels.end()); !resort && (it != mt); ++it) {
resort = it->needResort();
}
return (resort && getTopN() <= 0);
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
index 532d645524b..152fcef5e8b 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
@@ -412,7 +412,6 @@ template <typename SearchType>
class DirectWeightedSetBlueprint : public ComplexLeafBlueprint
{
private:
- HitEstimate _estimate;
std::vector<int32_t> _weights;
std::vector<IDocumentWeightAttribute::LookupResult> _terms;
const IAttributeVector &_iattr;
@@ -422,7 +421,6 @@ private:
public:
DirectWeightedSetBlueprint(const FieldSpec &field, const IAttributeVector &iattr, const IDocumentWeightAttribute &attr, size_t size_hint)
: ComplexLeafBlueprint(field),
- _estimate(),
_weights(),
_terms(),
_iattr(iattr),
@@ -435,20 +433,22 @@ public:
}
~DirectWeightedSetBlueprint() override;
- void addTerm(const IDocumentWeightAttribute::LookupKey & key, int32_t weight) {
+ void addTerm(const IDocumentWeightAttribute::LookupKey & key, int32_t weight, HitEstimate & estimate) {
IDocumentWeightAttribute::LookupResult result = _attr.lookup(key, _dictionary_snapshot);
HitEstimate childEst(result.posting_size, (result.posting_size == 0));
if (!childEst.empty) {
- if (_estimate.empty) {
- _estimate = childEst;
+ if (estimate.empty) {
+ estimate = childEst;
} else {
- _estimate.estHits += childEst.estHits;
+ estimate.estHits += childEst.estHits;
}
- setEstimate(_estimate);
_weights.push_back(weight);
_terms.push_back(result);
}
}
+ void complete(HitEstimate estimate) {
+ setEstimate(estimate);
+ }
SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray &tfmda, bool) const override;
@@ -506,7 +506,6 @@ DirectWeightedSetBlueprint<SearchType>::createFilterSearch(bool, FilterConstrain
class DirectWandBlueprint : public queryeval::ComplexLeafBlueprint
{
private:
- HitEstimate _estimate;
mutable queryeval::SharedWeakAndPriorityQueue _scores;
const queryeval::wand::score_t _scoreThreshold;
double _thresholdBoostFactor;
@@ -520,7 +519,6 @@ public:
DirectWandBlueprint(const FieldSpec &field, const IDocumentWeightAttribute &attr, uint32_t scoresToTrack,
queryeval::wand::score_t scoreThreshold, double thresholdBoostFactor, size_t size_hint)
: ComplexLeafBlueprint(field),
- _estimate(),
_scores(scoresToTrack),
_scoreThreshold(scoreThreshold),
_thresholdBoostFactor(thresholdBoostFactor),
@@ -536,20 +534,22 @@ public:
~DirectWandBlueprint() override;
- void addTerm(const IDocumentWeightAttribute::LookupKey & key, int32_t weight) {
+ void addTerm(const IDocumentWeightAttribute::LookupKey & key, int32_t weight, HitEstimate & estimate) {
IDocumentWeightAttribute::LookupResult result = _attr.lookup(key, _dictionary_snapshot);
HitEstimate childEst(result.posting_size, (result.posting_size == 0));
if (!childEst.empty) {
- if (_estimate.empty) {
- _estimate = childEst;
+ if (estimate.empty) {
+ estimate = childEst;
} else {
- _estimate.estHits += childEst.estHits;
+ estimate.estHits += childEst.estHits;
}
- setEstimate(_estimate);
_weights.push_back(weight);
_terms.push_back(result);
}
}
+ void complete(HitEstimate estimate) {
+ setEstimate(estimate);
+ }
SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray &tfmda, bool strict) const override {
assert(tfmda.size() == 1);
@@ -857,9 +857,11 @@ template <typename WS>
void
CreateBlueprintVisitor::createDirectWeightedSet(WS *bp, MultiTerm &n) {
Blueprint::UP result(bp);
+ Blueprint::HitEstimate estimate;
for (uint32_t i(0); i < n.getNumTerms(); i++) {
- bp->addTerm(LookupKey(n, i), n.weight(i).percent());
+ bp->addTerm(LookupKey(n, i), n.weight(i).percent(), estimate);
}
+ bp->complete(estimate);
setResult(std::move(result));
}
@@ -869,11 +871,13 @@ CreateBlueprintVisitor::createShallowWeightedSet(WS *bp, MultiTerm &n, const Fie
Blueprint::UP result(bp);
SearchContextParams scParams = createContextParams();
bp->reserve(n.getNumTerms());
+ Blueprint::HitEstimate estimate;
for (uint32_t i(0); i < n.getNumTerms(); i++) {
FieldSpec childfs = bp->getNextChildField(fs);
auto term = n.getAsString(i);
- bp->addTerm(std::make_unique<AttributeFieldBlueprint>(childfs, _attr, extractTerm(term.first, isInteger), scParams.useBitVector(childfs.isFilter())), term.second.percent());
+ bp->addTerm(std::make_unique<AttributeFieldBlueprint>(childfs, _attr, extractTerm(term.first, isInteger), scParams.useBitVector(childfs.isFilter())), term.second.percent(), estimate);
}
+ bp->complete(estimate);
setResult(std::move(result));
}
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
index c467590fe69..2fac2350735 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
@@ -99,12 +99,12 @@ public:
}
}
void and_hits_into(BitVector & result,uint32_t begin_id) override {
- typename Map::iterator end = _map.end();
+ auto end = _map.end();
result.foreach_truebit([&, end](uint32_t key) { if ( _map.find(_attr.getToken(key)) == end) { result.clearBit(key); }}, begin_id);
}
void doSeek(uint32_t docId) override {
- typename Map::const_iterator pos = _map.find(_attr.getToken(docId));
+ auto pos = _map.find(_attr.getToken(docId));
if (pos != _map.end()) {
_weight = pos->second;
setDocId(docId);
diff --git a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
index 97a7dc8bcb1..443fc8369d3 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
@@ -12,7 +12,7 @@ namespace search {
const IAttributeVector *
AttributeContext::getAttribute(AttributeMap & map, const string & name, bool stableEnum) const
{
- AttributeMap::const_iterator itr = map.find(name);
+ auto itr = map.find(name);
if (itr != map.end()) {
if (itr->second) {
return itr->second->attribute();
diff --git a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
index 8c1b453c354..c85d77ff70a 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
@@ -33,7 +33,7 @@ waitBaseDir(const string &baseDir)
std::unique_lock<std::mutex> guard(baseDirLock);
bool waited = false;
- BaseDirSet::iterator it = baseDirSet.find(baseDir);
+ auto it = baseDirSet.find(baseDir);
while (it != baseDirSet.end()) {
if (!waited) {
waited = true;
@@ -57,7 +57,7 @@ dropBaseDir(const string &baseDir)
return;
std::lock_guard<std::mutex> guard(baseDirLock);
- BaseDirSet::iterator it = baseDirSet.find(baseDir);
+ auto it = baseDirSet.find(baseDir);
if (it == baseDirSet.end()) {
LOG(error, "AttributeManager: Cannot drop basedir %s, already dropped", baseDir.c_str());
} else {
@@ -114,8 +114,8 @@ AttributeManager::~AttributeManager()
uint64_t AttributeManager::getMemoryFootprint() const
{
uint64_t sum(0);
- for(AttributeMap::const_iterator it(_attributes.begin()), mt(_attributes.end()); it != mt; it++) {
- sum += it->second->getStatus().getAllocated();
+ for (const auto& elem : _attributes) {
+ sum += elem.second->getStatus().getAllocated();
}
return sum;
@@ -125,7 +125,7 @@ const AttributeManager::VectorHolder *
AttributeManager::findAndLoadAttribute(const string & name) const
{
const VectorHolder * loadedVector(nullptr);
- AttributeMap::const_iterator found = _attributes.find(name);
+ auto found = _attributes.find(name);
if (found != _attributes.end()) {
AttributeVector & vec = *found->second;
if ( ! vec.isLoaded() ) {
@@ -173,7 +173,7 @@ bool
AttributeManager::add(const AttributeManager::VectorHolder & vector)
{
bool retval(true);
- AttributeMap::iterator found = _attributes.find(vector->getName());
+ auto found = _attributes.find(vector->getName());
if (found == _attributes.end()) {
vector->setInterlock(_interlock);
_attributes[vector->getName()] = vector;
@@ -186,8 +186,8 @@ void
AttributeManager::getAttributeList(AttributeList & list) const
{
list.reserve(_attributes.size());
- for(AttributeMap::const_iterator it(_attributes.begin()), mt(_attributes.end()); it != mt; it++) {
- list.push_back(AttributeGuard(it->second));
+ for (const auto& elem : _attributes) {
+ list.push_back(AttributeGuard(elem.second));
}
}
@@ -224,7 +224,7 @@ AttributeManager::addVector(const string & name, const Config & config)
LOG(error, "Attribute Vector '%s' has type conflict", name.c_str());
}
} else {
- AttributeMap::iterator found = _attributes.find(name);
+ auto found = _attributes.find(name);
if (found != _attributes.end()) {
const VectorHolder & vh(found->second);
if ( vh.get() &&
diff --git a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
index 4aa9ad01f2c..62eaf05a2e3 100644
--- a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
@@ -64,7 +64,7 @@ PostingListMerger<DataT>::merge(PostingVector &v, PostingVector &temp, const Sta
size_t aEnd = startPos[i * 2 + 1];
size_t bStart = aEnd;
size_t bEnd = startPos[i * 2 + 2];
- typename PostingVector::const_iterator it = v.begin();
+ auto it = v.begin();
std::merge(it + aStart, it + aEnd,
it + bStart, it + bEnd,
temp.begin() + aStart);
diff --git a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
index a49a17470d7..dca79f045a0 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
@@ -25,15 +25,14 @@ struct CompareValue {
void
removeDupAdditions(PostingChange<AttributePosting>::A &additions)
{
- using Iterator = PostingChange<AttributePosting>::A::iterator;
if (additions.empty())
return;
if (additions.size() == 1)
return;
std::sort(additions.begin(), additions.end());
- Iterator i = additions.begin();
- Iterator ie = additions.end();
- Iterator d = i;
+ auto i = additions.begin();
+ auto ie = additions.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (d->_key == i->_key)
break;
@@ -53,15 +52,14 @@ removeDupAdditions(PostingChange<AttributePosting>::A &additions)
void
removeDupAdditions(PostingChange<AttributeWeightPosting>::A &additions)
{
- using Iterator = PostingChange<AttributeWeightPosting>::A::iterator;
if (additions.empty())
return;
if (additions.size() == 1u)
return;
std::sort(additions.begin(), additions.end());
- Iterator i = additions.begin();
- Iterator ie = additions.end();
- Iterator d = i;
+ auto i = additions.begin();
+ auto ie = additions.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (d->_key == i->_key)
break;
@@ -85,15 +83,14 @@ removeDupAdditions(PostingChange<AttributeWeightPosting>::A &additions)
void
removeDupRemovals(std::vector<uint32_t> &removals)
{
- using Iterator = std::vector<uint32_t>::iterator;
if (removals.empty())
return;
if (removals.size() == 1u)
return;
std::sort(removals.begin(), removals.end());
- Iterator i = removals.begin();
- Iterator ie = removals.end();
- Iterator d = i;
+ auto i = removals.begin();
+ auto ie = removals.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (*d == *i)
break;
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
index 835aaadc559..444c935b6f8 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
@@ -1184,8 +1184,7 @@ lookupOverflow(uint64_t wordNum) const
assert(!_overflows.empty());
- OverflowVector::const_iterator lb =
- std::lower_bound(_overflows.begin(), _overflows.end(), wordNum);
+ auto lb = std::lower_bound(_overflows.begin(), _overflows.end(), wordNum);
assert(lb != _overflows.end());
assert(lb->_wordNum == wordNum);
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index ea7fd5ee76c..022bc789e38 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -32,9 +32,9 @@ BitVectorCache::computeCountVector(KeySet & keys, CountVector & v) const
{
std::shared_lock guard(_mutex);
keySets.resize(_chunks.size());
- Key2Index::const_iterator end(_keys.end());
+ auto end = _keys.end();
for (Key k : keys) {
- Key2Index::const_iterator found = _keys.find(k);
+ auto found = _keys.find(k);
if (found != end) {
const KeyMeta & m = found->second;
keySets[m.chunkId()].insert(m.chunkIndex());
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp b/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
index f6dd5a318ae..efdbedd0941 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
@@ -86,8 +86,7 @@ BitVectorDictionary::lookup(uint64_t wordNum)
{
WordSingleKey key;
key._wordNum = wordNum;
- std::vector<WordSingleKey>::const_iterator itr =
- std::lower_bound(_entries.begin(), _entries.end(), key);
+ auto itr = std::lower_bound(_entries.begin(), _entries.end(), key);
if (itr == _entries.end() || key < *itr) {
return BitVector::UP();
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
index 90bcaabc7a5..b1757c0e831 100644
--- a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
@@ -342,7 +342,7 @@ public:
}
const DiskIndex::LookupResult &
lookup(const vespalib::string & word, uint32_t fieldId) {
- Cache::const_iterator it = _cache.find(word);
+ auto it = _cache.find(word);
if (it == _cache.end()) {
_cache[word] = _diskIndex.lookup(_fieldIds, word);
it = _cache.find(word);
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
index d01030ee975..20a5a76905f 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
@@ -317,8 +317,6 @@ FieldReaderStripInfo::scan_element_lengths(uint32_t scan_chunk)
void
FieldReaderStripInfo::read()
{
- using Element = search::index::WordDocElementFeatures;
-
for (;;) {
FieldReader::read();
DocIdAndFeatures &features = _docIdAndFeatures;
@@ -328,8 +326,7 @@ FieldReaderStripInfo::read()
assert(!features.has_raw_data());
uint32_t numElements = features.elements().size();
assert(numElements > 0);
- std::vector<Element>::iterator element =
- features.elements().begin();
+ auto element = features.elements().begin();
if (_hasElements) {
if (!_hasElementWeights) {
for (uint32_t elementDone = 0; elementDone < numElements; ++elementDone, ++element) {
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
index 3a1b7928c93..07d31e16f66 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
@@ -179,19 +179,16 @@ Zc4PostingWriter<bigEndian>::flush_word_no_skip()
const uint64_t *features = _featureWriteContext.getComprBuf();
uint64_t featureOffset = 0;
- std::vector<DocIdAndFeatureSize>::const_iterator dit = _docIds.begin();
- std::vector<DocIdAndFeatureSize>::const_iterator dite = _docIds.end();
-
- for (; dit != dite; ++dit) {
- uint32_t docId = dit->_doc_id;
- uint32_t featureSize = dit->_features_size;
+ for (const auto& elem : _docIds) {
+ uint32_t docId = elem._doc_id;
+ uint32_t featureSize = elem._features_size;
e.encodeExpGolomb(docId - baseDocId, docIdK);
baseDocId = docId + 1;
if (_encode_interleaved_features) {
- assert(dit->_field_length > 0);
- e.encodeExpGolomb(dit->_field_length - 1, K_VALUE_ZCPOSTING_FIELD_LENGTH);
- assert(dit->_num_occs > 0);
- e.encodeExpGolomb(dit->_num_occs - 1, K_VALUE_ZCPOSTING_NUM_OCCS);
+ assert(elem._field_length > 0);
+ e.encodeExpGolomb(elem._field_length - 1, K_VALUE_ZCPOSTING_FIELD_LENGTH);
+ assert(elem._num_occs > 0);
+ e.encodeExpGolomb(elem._num_occs - 1, K_VALUE_ZCPOSTING_NUM_OCCS);
}
if (featureSize != 0) {
e.writeBits(features + (featureOffset >> 6),
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp
index 4ed72c2f8c3..8a987c49544 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp
@@ -53,7 +53,7 @@ ZcPosOccRandRead::~ZcPosOccRandRead()
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
ZcPosOccRandRead::
createIterator(const PostingListCounts &counts,
const PostingListHandle &handle,
@@ -67,7 +67,7 @@ createIterator(const PostingListCounts &counts,
assert(handle._bitOffsetMem <= handle._bitOffset);
if (handle._bitLength == 0) {
- return new search::queryeval::EmptySearch;
+ return std::make_unique<search::queryeval::EmptySearch>();
}
const char *cmem = static_cast<const char *>(handle._mem);
@@ -80,7 +80,7 @@ createIterator(const PostingListCounts &counts,
handle._bitOffsetMem) & 63;
Position start(mem, bitOffset);
- return create_zc_posocc_iterator(true, counts, start, handle._bitLength, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(true, counts, start, handle._bitLength, _posting_params, _fieldsParams, matchData);
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.h b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.h
index f23af15f72d..db7806beadd 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.h
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.h
@@ -33,7 +33,7 @@ public:
* Create iterator for single word. Semantic lifetime of counts and
* handle must exceed lifetime of iterator.
*/
- queryeval::SearchIterator *
+ std::unique_ptr<queryeval::SearchIterator>
createIterator(const PostingListCounts &counts, const PostingListHandle &handle,
const fef::TermFieldMatchDataArray &matchData, bool usebitVector) const override;
diff --git a/searchlib/src/vespa/searchlib/docstore/chunk.cpp b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
index 35166cf8d78..60255af3521 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunk.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
@@ -102,17 +102,17 @@ vespalib::ConstBufferRef
Chunk::getLid(uint32_t lid) const
{
vespalib::ConstBufferRef buf;
- for (LidList::const_iterator it(_lids.begin()), mt(_lids.end()); it != mt; it++) {
- if (it->getLid() == lid) {
+ for (const auto& elem : _lids) {
+ if (elem.getLid() == lid) {
#if 1
uint32_t bLid(0), bLen(0);
- vespalib::nbostream is(getData().data() + it->getOffset(), it->size());
+ vespalib::nbostream is(getData().data() + elem.getOffset(), elem.size());
is >> bLid >> bLen;
assert(bLid == lid);
- assert(bLen == it->netSize());
- assert((bLen + 2*sizeof(uint32_t)) == it->size());
+ assert(bLen == elem.netSize());
+ assert((bLen + 2*sizeof(uint32_t)) == elem.size());
#endif
- buf = vespalib::ConstBufferRef(getData().data() + it->getNetOffset(), it->netSize());
+ buf = vespalib::ConstBufferRef(getData().data() + elem.getNetOffset(), elem.netSize());
}
}
return buf;
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index bde7492f485..7f63cb4c2d4 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -777,8 +777,7 @@ LogDataStore::preload()
if (!partList.empty()) {
verifyModificationTime(partList);
partList = scanDir(getBaseDir(), ".idx");
- using It = NameIdSet::const_iterator;
- for (It it(partList.begin()), mt(--partList.end()); it != mt; it++) {
+ for (auto it(partList.begin()), mt(--partList.end()); it != mt; it++) {
_fileChunks.push_back(createReadOnlyFile(FileId(_fileChunks.size()), *it));
}
_fileChunks.push_back(isReadOnly()
@@ -824,7 +823,7 @@ LogDataStore::NameIdSet
LogDataStore::findIncompleteCompactedFiles(const NameIdSet & partList) {
NameIdSet incomplete;
if ( !partList.empty()) {
- NameIdSet::const_iterator it = partList.begin();
+ auto it = partList.begin();
for (FileChunk::NameId prev = *it++; it != partList.end(); it++) {
if (prev.next() == *it) {
if (!incomplete.empty() && (*incomplete.rbegin() == prev)) {
@@ -869,15 +868,13 @@ LogDataStore::eraseIncompleteCompactedFiles(NameIdSet partList)
void
LogDataStore::eraseDanglingDatFiles(const NameIdSet &partList, const NameIdSet &datPartList)
{
- using IT = NameIdSet::const_iterator;
-
- IT iib(partList.begin());
- IT ii(iib);
- IT iie(partList.end());
- IT dib(datPartList.begin());
- IT di(dib);
- IT die(datPartList.end());
- IT dirb(die);
+ auto iib = partList.begin();
+ auto ii = iib;
+ auto iie = partList.end();
+ auto dib = datPartList.begin();
+ auto di = dib;
+ auto die = datPartList.end();
+ auto dirb = die;
NameId endMarker(NameId::last());
if (dirb != dib) {
diff --git a/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp b/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
index 5989f952f17..48c95ba92b9 100644
--- a/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
+++ b/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
@@ -26,7 +26,7 @@ PropertiesMap::lookupCreate(vespalib::stringref name)
const fef::Properties &
PropertiesMap::lookup(vespalib::stringref name) const
{
- PropsMap::const_iterator pos = _propertiesMap.find(name);
+ auto pos = _propertiesMap.find(name);
if (pos == _propertiesMap.end()) {
return _emptyProperties;
}
diff --git a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
index 16f5ee04be4..254d9d030af 100644
--- a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
@@ -115,7 +115,7 @@ DocumentFieldNode::onPrepare(bool preserveAccurateTypes)
if ( !_fieldPath.empty() ) {
bool nestedMultiValue(false);
- for(document::FieldPath::const_iterator it(_fieldPath.begin()), mt(_fieldPath.end()); !nestedMultiValue && (it != mt); it++) {
+ for (auto it(_fieldPath.begin()), mt(_fieldPath.end()); !nestedMultiValue && (it != mt); it++) {
const FieldPathEntry & fpe = **it;
if (fpe.getType() == document::FieldPathEntry::STRUCT_FIELD) {
const FieldValue & fv = fpe.getFieldValueToSet();
diff --git a/searchlib/src/vespa/searchlib/expression/resultvector.h b/searchlib/src/vespa/searchlib/expression/resultvector.h
index 22fac0b214b..0c71f2f79e6 100644
--- a/searchlib/src/vespa/searchlib/expression/resultvector.h
+++ b/searchlib/src/vespa/searchlib/expression/resultvector.h
@@ -174,8 +174,8 @@ size_t
ResultNodeVectorT<B, C, G>::hash() const
{
size_t h(0);
- for(typename Vector::const_iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
- h ^= it->hash();
+ for(const auto& elem : _result) {
+ h ^= elem.hash();
}
return h;
}
@@ -184,8 +184,8 @@ template <typename B, typename C, typename G>
void
ResultNodeVectorT<B, C, G>::negate()
{
- for(typename Vector::iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
- it->negate();
+ for (auto& elem : _result) {
+ elem.negate();
}
}
@@ -194,7 +194,7 @@ const ResultNode *
ResultNodeVectorT<B, C, G>::find(const ResultNode & key) const
{
G getter;
- typename Vector::const_iterator found = std::lower_bound(_result.begin(), _result.end(), getter(key), typename C::less() );
+ auto found = std::lower_bound(_result.begin(), _result.end(), getter(key), typename C::less() );
if (found != _result.end()) {
typename C::equal equal;
return equal(*found, getter(key)) ? &(*found) : nullptr;
diff --git a/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp b/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
index 55daf6ed5ff..91aca0a19fe 100644
--- a/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
@@ -33,11 +33,8 @@ FieldLengthExecutor::execute(uint32_t docId)
{
uint32_t val = 0;
bool validVal = false;
- for (std::vector<TermFieldHandle>::const_iterator
- hi = _fieldHandles.begin(), hie = _fieldHandles.end();
- hi != hie; ++hi)
- {
- const TermFieldMatchData &tfmd = *_md->resolveTermField(*hi);
+ for (auto handle : _fieldHandles) {
+ const TermFieldMatchData &tfmd = *_md->resolveTermField(handle);
if (tfmd.getDocId() == docId) {
FieldPositionsIterator it = tfmd.getIterator();
if (it.valid()) {
diff --git a/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp b/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
index 166280b289d..bd5d5ca952b 100644
--- a/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
@@ -103,7 +103,7 @@ struct State {
Path nextP = firstP;
uint32_t pos = edges[j];
nextP.path.push_back(pos);
- TermIdxMap::const_iterator it = matchedTermForPos.find(pos);
+ auto it = matchedTermForPos.find(pos);
if (it == matchedTermForPos.end()) {
return nextP;
} else {
@@ -158,7 +158,7 @@ struct State {
uint32_t pos = positionsForTerm[tix][0];
assert(pos < posLimit);
- TermIdxMap::const_iterator it = matchedTermForPos.find(pos);
+ auto it = matchedTermForPos.find(pos);
if (it == matchedTermForPos.end()) {
++found;
matchedTermForPos[pos] = tix;
diff --git a/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp b/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
index 5caa2bd577e..d2948ad3185 100644
--- a/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
@@ -38,10 +38,8 @@ void
QueryCompletenessExecutor::execute(uint32_t docId)
{
uint32_t hit = 0, miss = 0;
- for (std::vector<search::fef::TermFieldHandle>::iterator it = _fieldHandles.begin();
- it != _fieldHandles.end(); ++it)
- {
- const fef::TermFieldMatchData &tfmd = *_md->resolveTermField(*it);
+ for (const auto& handle : _fieldHandles) {
+ const fef::TermFieldMatchData &tfmd = *_md->resolveTermField(handle);
if (tfmd.getDocId() == docId) {
search::fef::FieldPositionsIterator field = tfmd.getIterator();
while (field.valid() && field.getPosition() < _config.fieldBegin) {
diff --git a/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp b/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
index af7c83cdc9a..dcb1227d6b5 100644
--- a/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
+++ b/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
@@ -35,7 +35,7 @@ BlueprintFactory::visitDumpFeatures(const IIndexEnvironment &indexEnv,
Blueprint::SP
BlueprintFactory::createBlueprint(const vespalib::string &name) const
{
- BlueprintMap::const_iterator itr = _blueprintMap.find(name);
+ auto itr = _blueprintMap.find(name);
if (itr == _blueprintMap.end()) {
return {};
}
diff --git a/searchlib/src/vespa/searchlib/fef/objectstore.cpp b/searchlib/src/vespa/searchlib/fef/objectstore.cpp
index 4cf185ad55e..c7ef7aa0316 100644
--- a/searchlib/src/vespa/searchlib/fef/objectstore.cpp
+++ b/searchlib/src/vespa/searchlib/fef/objectstore.cpp
@@ -20,7 +20,7 @@ ObjectStore::~ObjectStore()
void
ObjectStore::add(const vespalib::string & key, Anything::UP value)
{
- ObjectMap::iterator found = _objectMap.find(key);
+ auto found = _objectMap.find(key);
if (found != _objectMap.end()) {
delete found->second;
found->second = NULL;
@@ -31,7 +31,7 @@ ObjectStore::add(const vespalib::string & key, Anything::UP value)
const Anything *
ObjectStore::get(const vespalib::string & key) const
{
- ObjectMap::const_iterator found = _objectMap.find(key);
+ auto found = _objectMap.find(key);
return (found != _objectMap.end()) ? found->second : NULL;
}
diff --git a/searchlib/src/vespa/searchlib/fef/properties.cpp b/searchlib/src/vespa/searchlib/fef/properties.cpp
index 70cfe802ad2..2cc4e50b593 100644
--- a/searchlib/src/vespa/searchlib/fef/properties.cpp
+++ b/searchlib/src/vespa/searchlib/fef/properties.cpp
@@ -62,7 +62,7 @@ uint32_t
Properties::count(vespalib::stringref key) const noexcept
{
if (!key.empty()) {
- Map::const_iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node != _data.end()) {
return node->second.size();
}
@@ -74,7 +74,7 @@ Properties &
Properties::remove(vespalib::stringref key)
{
if (!key.empty()) {
- Map::iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node != _data.end()) {
_numValues -= node->second.size();
_data.erase(node);
@@ -86,15 +86,13 @@ Properties::remove(vespalib::stringref key)
Properties &
Properties::import(const Properties &src)
{
- Map::const_iterator itr = src._data.begin();
- Map::const_iterator end = src._data.end();
- for (; itr != end; ++itr) {
- Map::insert_result res = _data.insert(Map::value_type(itr->first, itr->second));
+ for (const auto& elem : src._data) {
+ Map::insert_result res = _data.insert(Map::value_type(elem.first, elem.second));
if ( ! res.second) {
_numValues -= res.first->second.size();
- res.first->second = itr->second;
+ res.first->second = elem.second;
}
- _numValues += itr->second.size();
+ _numValues += elem.second.size();
}
return *this;
}
@@ -124,16 +122,12 @@ uint32_t
Properties::hashCode() const noexcept
{
uint32_t hash = numKeys() + numValues();
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- const Key &key = itr->first;
- const Value &value = itr->second;
- Value::const_iterator v_itr = value.begin();
- Value::const_iterator v_end = value.end();
+ for (const auto& elem : _data) {
+ const Key &key = elem.first;
+ const Value &value = elem.second;
hash += rawHash(key.data(), key.size());
- for (; v_itr != v_end; ++v_itr) {
- hash += rawHash(v_itr->data(), v_itr->size());
+ for (const auto& velem : value) {
+ hash += rawHash(velem.data(), velem.size());
}
}
return hash;
@@ -142,10 +136,8 @@ Properties::hashCode() const noexcept
void
Properties::visitProperties(IPropertiesVisitor &visitor) const
{
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- visitor.visitProperty(itr->first, Property(itr->second));
+ for (const auto& elem : _data) {
+ visitor.visitProperty(elem.first, Property(elem.second));
}
}
@@ -155,15 +147,13 @@ Properties::visitNamespace(vespalib::stringref ns,
{
vespalib::string tmp;
vespalib::string prefix = ns + ".";
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- if ((itr->first.find(prefix) == 0) &&
- (itr->first.size() > prefix.size()))
+ for (const auto& elem : _data) {
+ if ((elem.first.find(prefix) == 0) &&
+ (elem.first.size() > prefix.size()))
{
- tmp = vespalib::stringref(itr->first.data() + prefix.size(),
- itr->first.size() - prefix.size());
- visitor.visitProperty(tmp, Property(itr->second));
+ tmp = vespalib::stringref(elem.first.data() + prefix.size(),
+ elem.first.size() - prefix.size());
+ visitor.visitProperty(tmp, Property(elem.second));
}
}
}
@@ -174,7 +164,7 @@ Properties::lookup(vespalib::stringref key) const noexcept
if (key.empty()) {
return Property();
}
- Map::const_iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node == _data.end()) {
return Property();
}
diff --git a/searchlib/src/vespa/searchlib/fef/tablemanager.cpp b/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
index 6169e712c37..59bc0b5f600 100644
--- a/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
+++ b/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
@@ -12,7 +12,7 @@ const Table *
TableManager::getTable(const vespalib::string & name) const
{
std::lock_guard guard(_lock);
- TableCache::const_iterator itr = _cache.find(name);
+ auto itr = _cache.find(name);
if (itr != _cache.end()) {
return itr->second.get();
}
diff --git a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
index 4709c17408a..beebc8b78a0 100644
--- a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
@@ -114,15 +114,11 @@ bool
MatchDataBuilder::apply(uint32_t docId)
{
// For each term, do
- for (TermMap::const_iterator term_iter = _match.begin();
- term_iter != _match.end(); ++term_iter)
- {
- uint32_t termId = term_iter->first;
-
- for (FieldPositions::const_iterator field_iter = term_iter->second.begin();
- field_iter != term_iter->second.end(); ++field_iter)
- {
- uint32_t fieldId = field_iter->first;
+ for (const auto& term_elem : _match) {
+ uint32_t termId = term_elem.first;
+
+ for (const auto& field_elem : term_elem.second) {
+ uint32_t fieldId = field_elem.first;
TermFieldMatchData *match = getTermFieldMatchData(termId, fieldId);
// Make sure there is a corresponding term field match data object.
@@ -134,7 +130,7 @@ MatchDataBuilder::apply(uint32_t docId)
// find field data
MyField field;
- IndexData::const_iterator idxItr = _index.find(fieldId);
+ auto idxItr = _index.find(fieldId);
if (idxItr != _index.end()) {
field = idxItr->second;
}
@@ -144,11 +140,8 @@ MatchDataBuilder::apply(uint32_t docId)
vespalib::string name = info != nullptr ? info->name() : vespalib::make_string("%d", fieldId).c_str();
// For each occurence of that term, in that field, do
- for (Positions::const_iterator occ_iter = field_iter->second.begin();
- occ_iter != field_iter->second.end(); occ_iter++)
- {
+ for (const auto& occ : field_elem.second) {
// Append a term match position to the term match data.
- Position occ = *occ_iter;
match->appendPosition(TermFieldMatchDataPosition(
occ.eid,
occ.pos,
diff --git a/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp b/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
index 4de4c56e3ac..3ac7b857173 100644
--- a/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
@@ -25,7 +25,7 @@ RankResult::addScore(const vespalib::string & featureName, feature_t score)
feature_t
RankResult::getScore(const vespalib::string & featureName) const
{
- RankScores::const_iterator itr = _rankScores.find(featureName);
+ auto itr = _rankScores.find(featureName);
if (itr != _rankScores.end()) {
return itr->second;
}
@@ -43,19 +43,18 @@ RankResult::includes(const RankResult & rhs) const
{
double epsilon = std::max(_epsilon, rhs._epsilon);
- RankScores::const_iterator findItr;
- for (RankScores::const_iterator itr = rhs._rankScores.begin(); itr != rhs._rankScores.end(); ++itr) {
- findItr = _rankScores.find(itr->first);
+ for (const auto& score : rhs._rankScores) {
+ auto findItr = _rankScores.find(score.first);
if (findItr == _rankScores.end()) {
- LOG(info, "Did not find expected feature '%s' in this rank result", itr->first.c_str());
+ LOG(info, "Did not find expected feature '%s' in this rank result", score.first.c_str());
return false;
}
- if (itr->second < findItr->second - epsilon ||
- itr->second > findItr->second + epsilon ||
- (std::isnan(findItr->second) && !std::isnan(itr->second)))
+ if (score.second < findItr->second - epsilon ||
+ score.second > findItr->second + epsilon ||
+ (std::isnan(findItr->second) && !std::isnan(score.second)))
{
- LOG(info, "Feature '%s' did not have expected score.", itr->first.c_str());
- LOG(info, "Expected: %f ~ %f", itr->second, epsilon);
+ LOG(info, "Feature '%s' did not have expected score.", score.first.c_str());
+ LOG(info, "Expected: %f ~ %f", score.second, epsilon);
LOG(info, "Actual : %f", findItr->second);
return false;
}
@@ -73,8 +72,8 @@ RankResult::clear()
std::vector<vespalib::string> &
RankResult::getKeys(std::vector<vespalib::string> &ret)
{
- for (RankScores::const_iterator it = _rankScores.begin(); it != _rankScores.end(); ++it) {
- ret.push_back(it->first);
+ for (const auto& score : _rankScores) {
+ ret.push_back(score.first);
}
return ret;
}
@@ -99,8 +98,8 @@ RankResult::getEpsilon() const {
std::ostream & operator<<(std::ostream & os, const RankResult & rhs) {
os << "[";
- for (RankResult::RankScores::const_iterator itr = rhs._rankScores.begin(); itr != rhs._rankScores.end(); ++itr) {
- os << "['" << itr->first << "' = " << itr->second << "]";
+ for (const auto& score : rhs._rankScores) {
+ os << "['" << score.first << "' = " << score.second << "]";
}
return os << "]";
}
diff --git a/searchlib/src/vespa/searchlib/grouping/collect.h b/searchlib/src/vespa/searchlib/grouping/collect.h
index 34906e90324..198daed2e18 100644
--- a/searchlib/src/vespa/searchlib/grouping/collect.h
+++ b/searchlib/src/vespa/searchlib/grouping/collect.h
@@ -23,7 +23,7 @@ protected:
int diff(0);
size_t aOff(getAggrBase(a));
size_t bOff(getAggrBase(b));
- for(std::vector<SortInfo>::const_iterator it(_sortInfo.begin()), mt(_sortInfo.end()); (diff == 0) && (it != mt); it++) {
+ for (auto it(_sortInfo.begin()), mt(_sortInfo.end()); (diff == 0) && (it != mt); it++) {
diff = _aggregator[it->getIndex()].cmp(&_aggrBacking[aOff], &_aggrBacking[bOff]) * it->getSign();
}
return diff;
diff --git a/searchlib/src/vespa/searchlib/grouping/groupengine.cpp b/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
index 853548b47f7..5039082434b 100644
--- a/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
+++ b/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
@@ -50,7 +50,7 @@ GroupRef GroupEngine::group(Children & children, uint32_t docId, double rank)
throw std::runtime_error("Does not know how to handle failed select statements");
}
const ResultNode &selectResult = *selector.getResult();
- Children::iterator found = children.find(selectResult);
+ auto found = children.find(selectResult);
GroupRef gr;
if (found == children.end()) {
if (_request->allowMoreGroups(children.size())) {
@@ -158,8 +158,8 @@ Group::UP GroupEngine::getGroup(GroupRef ref) const
std::vector<GroupRef> v(ch.size());
{
size_t i(0);
- for (Children::const_iterator it(ch.begin()), mt(ch.end()); it != mt; it++) {
- v[i++] = *it;
+ for (const auto& elem : ch) {
+ v[i++] = elem;
}
}
uint64_t maxN(_nextEngine->_request->getPrecision());
diff --git a/searchlib/src/vespa/searchlib/index/postinglistfile.cpp b/searchlib/src/vespa/searchlib/index/postinglistfile.cpp
index acb1d40e353..7bb724f0fe6 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistfile.cpp
+++ b/searchlib/src/vespa/searchlib/index/postinglistfile.cpp
@@ -3,6 +3,7 @@
#include "postinglistfile.h"
#include "postinglistparams.h"
#include <vespa/fastos/file.h>
+#include <vespa/searchlib/queryeval/searchiterator.h>
namespace search::index {
@@ -94,7 +95,7 @@ PostingListFileRandReadPassThrough::~PostingListFileRandReadPassThrough()
}
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
PostingListFileRandReadPassThrough::
createIterator(const PostingListCounts &counts,
const PostingListHandle &handle,
diff --git a/searchlib/src/vespa/searchlib/index/postinglistfile.h b/searchlib/src/vespa/searchlib/index/postinglistfile.h
index 17bfb47b812..93d0dd362f7 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistfile.h
+++ b/searchlib/src/vespa/searchlib/index/postinglistfile.h
@@ -147,7 +147,7 @@ public:
* didn't cover the whole word, probably need access to higher level
* API above caches.
*/
- virtual search::queryeval::SearchIterator *
+ virtual std::unique_ptr<search::queryeval::SearchIterator>
createIterator(const PostingListCounts &counts,
const PostingListHandle &handle,
const search::fef::TermFieldMatchDataArray &matchData,
@@ -194,7 +194,7 @@ public:
PostingListFileRandReadPassThrough(PostingListFileRandRead *lower, bool ownLower);
~PostingListFileRandReadPassThrough();
- search::queryeval::SearchIterator *
+ std::unique_ptr<search::queryeval::SearchIterator>
createIterator(const PostingListCounts &counts,
const PostingListHandle &handle,
const search::fef::TermFieldMatchDataArray &matchData,
diff --git a/searchlib/src/vespa/searchlib/index/postinglisthandle.cpp b/searchlib/src/vespa/searchlib/index/postinglisthandle.cpp
index 82737531d69..c8cccd89207 100644
--- a/searchlib/src/vespa/searchlib/index/postinglisthandle.cpp
+++ b/searchlib/src/vespa/searchlib/index/postinglisthandle.cpp
@@ -2,10 +2,11 @@
#include "postinglisthandle.h"
#include "postinglistfile.h"
+#include <vespa/searchlib/queryeval/searchiterator.h>
namespace search::index {
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
PostingListHandle::createIterator(const PostingListCounts &counts,
const search::fef::TermFieldMatchDataArray &matchData,
bool useBitVector) const
diff --git a/searchlib/src/vespa/searchlib/index/postinglisthandle.h b/searchlib/src/vespa/searchlib/index/postinglisthandle.h
index 9a4ec212636..1f3a72a876f 100644
--- a/searchlib/src/vespa/searchlib/index/postinglisthandle.h
+++ b/searchlib/src/vespa/searchlib/index/postinglisthandle.h
@@ -61,7 +61,7 @@ public:
* didn't cover the whole word, probably need access to higher level
* API above caches.
*/
- search::queryeval::SearchIterator *
+ std::unique_ptr<search::queryeval::SearchIterator>
createIterator(const PostingListCounts &counts,
const search::fef::TermFieldMatchDataArray &matchData,
bool useBitVector=false) const;
diff --git a/searchlib/src/vespa/searchlib/index/postinglistparams.cpp b/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
index 6275399c498..27f2d60d420 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
+++ b/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
@@ -14,9 +14,7 @@ namespace search::index {
bool
PostingListParams::isSet(const vespalib::string &key) const
{
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
return true;
}
@@ -33,9 +31,7 @@ PostingListParams::setStr(const vespalib::string &key,
const vespalib::string &
PostingListParams::getStr(const vespalib::string &key) const
{
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
return it->second;
}
@@ -81,9 +77,7 @@ void
PostingListParams::get(const vespalib::string &key, TYPE &val) const
{
std::istringstream is;
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
is.str(it->second);
is >> val;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
index 25aff06b5ef..206b92c85d0 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
@@ -138,12 +138,12 @@ FieldInverter::processAnnotations(const StringFieldValue &value)
}
}
std::sort(_terms.begin(), _terms.end());
- SpanTermVector::const_iterator it = _terms.begin();
- SpanTermVector::const_iterator ite = _terms.end();
+ auto it = _terms.begin();
+ auto ite = _terms.end();
uint32_t wordRef;
bool mustStep = false;
for (; it != ite; ) {
- SpanTermVector::const_iterator it_begin = it;
+ auto it_begin = it;
for (; it != ite && it->first == it_begin->first; ++it) {
if (it->second) { // it->second is a const FieldValue *.
wordRef = saveWord(*it->second);
diff --git a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
index a2d244250cf..bb44eaa0f3d 100644
--- a/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/create_blueprint_visitor_helper.cpp
@@ -76,12 +76,14 @@ template <typename WS, typename NODE>
void
CreateBlueprintVisitorHelper::createWeightedSet(std::unique_ptr<WS> bp, NODE &n) {
bp->reserve(n.getNumTerms());
+ Blueprint::HitEstimate estimate;
for (size_t i = 0; i < n.getNumTerms(); ++i) {
auto term = n.getAsString(i);
query::SimpleStringTerm node(term.first, n.getView(), 0, term.second); // TODO Temporary
FieldSpec field = bp->getNextChildField(_field);
- bp->addTerm(_searchable.createBlueprint(_requestContext, field, node), term.second.percent());
+ bp->addTerm(_searchable.createBlueprint(_requestContext, field, node), term.second.percent(), estimate);
}
+ bp->complete(estimate);
setResult(std::move(bp));
}
void
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
index de5bdc33e3c..3e85ae4d00a 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.cpp
@@ -9,12 +9,10 @@ namespace search::queryeval {
DotProductBlueprint::DotProductBlueprint(const FieldSpec &field)
: ComplexLeafBlueprint(field),
- _estimate(),
_layout(),
_weights(),
_terms()
-{
-}
+{ }
DotProductBlueprint::~DotProductBlueprint() = default;
@@ -32,16 +30,15 @@ DotProductBlueprint::reserve(size_t num_children) {
}
void
-DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight)
+DotProductBlueprint::addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate)
{
HitEstimate childEst = term->getState().estimate();
if (! childEst.empty) {
- if (_estimate.empty) {
- _estimate = childEst;
+ if (estimate.empty) {
+ estimate = childEst;
} else {
- _estimate.estHits += childEst.estHits;
+ estimate.estHits += childEst.estHits;
}
- setEstimate(_estimate);
}
_weights.push_back(weight);
_terms.push_back(std::move(term));
diff --git a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
index 2975958b5af..18770691350 100644
--- a/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/dot_product_blueprint.h
@@ -11,7 +11,6 @@ namespace search::queryeval {
class DotProductBlueprint : public ComplexLeafBlueprint
{
- HitEstimate _estimate;
fef::MatchDataLayout _layout;
std::vector<int32_t> _weights;
std::vector<Blueprint::UP> _terms;
@@ -27,7 +26,10 @@ public:
// used by create visitor
void reserve(size_t num_children);
- void addTerm(Blueprint::UP term, int32_t weight);
+ void addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate);
+ void complete(HitEstimate estimate) {
+ setEstimate(estimate);
+ }
SearchIteratorUP createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
SearchIteratorUP createFilterSearch(bool strict, FilterConstraint constraint) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp b/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
index 5297646d7f8..1ac715ca92d 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
@@ -21,10 +21,8 @@ void setup_fields(uint32_t window, std::vector<T> &matchers, const TermFieldMatc
for (size_t i = 0; i < in.size(); ++i) {
fields.insert(in[i]->getFieldId());
}
- std::set<uint32_t>::const_iterator pos = fields.begin();
- std::set<uint32_t>::const_iterator end = fields.end();
- for (; pos != end; ++pos) {
- matchers.push_back(T(window, *pos, in));
+ for (const auto& elem : fields) {
+ matchers.push_back(T(window, elem, in));
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
index b4b55098eaa..e303e0b16d9 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.cpp
@@ -22,7 +22,6 @@ ParallelWeakAndBlueprint::ParallelWeakAndBlueprint(const FieldSpec &field,
_scoreThreshold(scoreThreshold),
_thresholdBoostFactor(thresholdBoostFactor),
_scoresAdjustFrequency(DEFAULT_PARALLEL_WAND_SCORES_ADJUST_FREQUENCY),
- _estimate(),
_layout(),
_weights(),
_terms()
@@ -40,7 +39,6 @@ ParallelWeakAndBlueprint::ParallelWeakAndBlueprint(const FieldSpec &field,
_scoreThreshold(scoreThreshold),
_thresholdBoostFactor(thresholdBoostFactor),
_scoresAdjustFrequency(scoresAdjustFrequency),
- _estimate(),
_layout(),
_weights(),
_terms()
@@ -62,20 +60,18 @@ ParallelWeakAndBlueprint::reserve(size_t num_children) {
}
void
-ParallelWeakAndBlueprint::addTerm(Blueprint::UP term, int32_t weight)
+ParallelWeakAndBlueprint::addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate)
{
HitEstimate childEst = term->getState().estimate();
if (!childEst.empty) {
- if (_estimate.empty) {
- _estimate = childEst;
+ if (estimate.empty) {
+ estimate = childEst;
} else {
- _estimate.estHits += childEst.estHits;
+ estimate.estHits += childEst.estHits;
}
- setEstimate(_estimate);
}
_weights.push_back(weight);
_terms.push_back(std::move(term));
- set_tree_size(_terms.size() + 1);
}
SearchIterator::UP
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
index a2c13f12485..cb4d44f4497 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_blueprint.h
@@ -26,7 +26,6 @@ private:
const wand::score_t _scoreThreshold;
double _thresholdBoostFactor;
const uint32_t _scoresAdjustFrequency;
- HitEstimate _estimate;
fef::MatchDataLayout _layout;
std::vector<int32_t> _weights;
std::vector<Blueprint::UP> _terms;
@@ -57,7 +56,11 @@ public:
// Used by create visitor
void reserve(size_t num_children);
- void addTerm(Blueprint::UP term, int32_t weight);
+ void addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate);
+ void complete(HitEstimate estimate) {
+ setEstimate(estimate);
+ set_tree_size(_terms.size() + 1);
+ }
SearchIterator::UP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
std::unique_ptr<SearchIterator> createFilterSearch(bool strict, FilterConstraint constraint) const override;
diff --git a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_search.cpp b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_search.cpp
index 1a7e91b2d1a..8540752e320 100644
--- a/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/wand/parallel_weak_and_search.cpp
@@ -79,12 +79,12 @@ public:
_localScores()
{
}
- virtual size_t get_num_terms() const override { return _terms.size(); }
- virtual int32_t get_term_weight(size_t idx) const override { return _terms.weight(idx); }
- virtual score_t get_max_score(size_t idx) const override { return _terms.maxScore(idx); }
- virtual const MatchParams &getMatchParams() const override { return _matchParams; }
+ size_t get_num_terms() const override { return _terms.size(); }
+ int32_t get_term_weight(size_t idx) const override { return _terms.weight(idx); }
+ score_t get_max_score(size_t idx) const override { return _terms.maxScore(idx); }
+ const MatchParams &getMatchParams() const override { return _matchParams; }
- virtual void doSeek(uint32_t docid) override {
+ void doSeek(uint32_t docid) override {
updateThreshold(_matchParams.scores.getMinScore());
if (IS_STRICT) {
seek_strict(docid);
@@ -92,7 +92,7 @@ public:
seek_unstrict(docid);
}
}
- virtual void doUnpack(uint32_t docid) override {
+ void doUnpack(uint32_t docid) override {
score_t score = _algo.get_full_score(_terms, _heaps, DotProductScorer());
_localScores.push_back(score);
if (_localScores.size() == _matchParams.scoresAdjustFrequency) {
@@ -101,14 +101,14 @@ public:
}
_tfmd.setRawScore(docid, score);
}
- virtual void visitMembers(vespalib::ObjectVisitor &visitor) const override {
+ void visitMembers(vespalib::ObjectVisitor &visitor) const override {
_terms.visit_members(visitor);
}
void initRange(uint32_t begin, uint32_t end) override {
ParallelWeakAndSearch::initRange(begin, end);
_algo.init_range(_terms, _heaps, begin, end);
}
- Trinary is_strict() const override { return IS_STRICT ? Trinary::True : Trinary::False; }
+ Trinary is_strict() const final { return IS_STRICT ? Trinary::True : Trinary::False; }
};
namespace {
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
index ee55a89dcdc..4e06f170253 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
@@ -62,7 +62,6 @@ WeightedSetTermMatchingElementsSearch::initRange(uint32_t begin_id, uint32_t end
WeightedSetTermBlueprint::WeightedSetTermBlueprint(const FieldSpec &field)
: ComplexLeafBlueprint(field),
- _estimate(),
_layout(),
_children_field(field.getName(), field.getFieldId(), _layout.allocTermField(field.getFieldId()), field.isFilter()),
_weights(),
@@ -81,16 +80,15 @@ WeightedSetTermBlueprint::reserve(size_t num_children) {
}
void
-WeightedSetTermBlueprint::addTerm(Blueprint::UP term, int32_t weight)
+WeightedSetTermBlueprint::addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate)
{
HitEstimate childEst = term->getState().estimate();
if (! childEst.empty) {
- if (_estimate.empty) {
- _estimate = childEst;
+ if (estimate.empty) {
+ estimate = childEst;
} else {
- _estimate.estHits += childEst.estHits;
+ estimate.estHits += childEst.estHits;
}
- setEstimate(_estimate);
}
_weights.push_back(weight);
_terms.push_back(std::move(term));
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
index 3827dc8a35f..b40ab421890 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
@@ -12,7 +12,6 @@ namespace search::queryeval {
class WeightedSetTermBlueprint : public ComplexLeafBlueprint
{
- HitEstimate _estimate;
fef::MatchDataLayout _layout;
FieldSpec _children_field;
std::vector<int32_t> _weights;
@@ -31,7 +30,10 @@ public:
// used by create visitor
void reserve(size_t num_children);
- void addTerm(Blueprint::UP term, int32_t weight);
+ void addTerm(Blueprint::UP term, int32_t weight, HitEstimate & estimate);
+ void complete(HitEstimate estimate) {
+ setEstimate(estimate);
+ }
SearchIteratorUP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
SearchIteratorUP createFilterSearch(bool strict, FilterConstraint constraint) const override;
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp
index 95f07bc3191..bb55593f8e3 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp
@@ -32,7 +32,7 @@ public:
_tfmda.add(&_md);
_md.setNeedNormalFeatures(posting.enable_unpack_normal_features());
_md.setNeedInterleavedFeatures(posting.enable_unpack_interleaved_features());
- _itr.reset(posting.createIterator(_tfmda));
+ _itr = posting.createIterator(_tfmda);
}
~IteratorState() {}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
index 6bba9d96d02..358008f389a 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
@@ -155,12 +155,8 @@ setupT(const FakeWord &fw)
uint64_t lastL4SkipL3SkipPos = 0;
unsigned int l4SkipCnt = 0;
-
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
if (d != de) {
// Prefix support needs counts embedded in posting list
@@ -721,23 +717,16 @@ FakeFilterOccEGCompressed64ArrayIterator<bigEndian>::doUnpack(uint32_t docId)
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
FakeEGCompr64FilterOcc::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
const uint64_t *arr = _compressed.first;
- if (_bigEndian)
- return new FakeFilterOccEGCompressed64ArrayIterator<true>(arr,
- 0,
- _hitDocs,
- _lastDocId,
- matchData);
- else
- return new FakeFilterOccEGCompressed64ArrayIterator<false>(arr,
- 0,
- _hitDocs,
- _lastDocId,
- matchData);
+ if (_bigEndian) {
+ return std::make_unique<FakeFilterOccEGCompressed64ArrayIterator<true>>(arr, 0, _hitDocs, _lastDocId, matchData);
+ } else {
+ return std::make_unique<FakeFilterOccEGCompressed64ArrayIterator<false>>(arr, 0, _hitDocs, _lastDocId, matchData);
+ }
}
@@ -770,7 +759,7 @@ class FakeEGCompr64SkipFilterOcc : public FakeEGCompr64FilterOcc
public:
FakeEGCompr64SkipFilterOcc(const FakeWord &fw);
~FakeEGCompr64SkipFilterOcc();
- search::queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<search::queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
@@ -1455,7 +1444,7 @@ FakeFilterOccEGCompressed64SkipArrayIterator<doSkip>::doUnpack(uint32_t docId)
template <bool doSkip>
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
FakeEGCompr64SkipFilterOcc<doSkip>::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
@@ -1482,15 +1471,16 @@ createIterator(const fef::TermFieldMatchDataArray &matchData) const
const uint64_t *l2SkipArr = _l2SkipCompressed.first;
const uint64_t *l3SkipArr = _l3SkipCompressed.first;
const uint64_t *l4SkipArr = _l4SkipCompressed.first;
- return new FakeFilterOccEGCompressed64SkipArrayIterator<doSkip>(docIdBits.getCompr(),
- docIdBits.getBitOffset(),
- _lastDocId,
- l1SkipArr, 0,
- l2SkipArr, 0,
- l3SkipArr, 0,
- l4SkipArr, 0,
- getName(),
- matchData);
+ return std::make_unique<FakeFilterOccEGCompressed64SkipArrayIterator<doSkip>>
+ (docIdBits.getCompr(),
+ docIdBits.getBitOffset(),
+ _lastDocId,
+ l1SkipArr, 0,
+ l2SkipArr, 0,
+ l3SkipArr, 0,
+ l4SkipArr, 0,
+ getName(),
+ matchData);
}
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.h
index 6e398a2f0b0..2ef91c70921 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.h
@@ -58,7 +58,7 @@ public:
int lowLevelSinglePostingScanUnpack() const override;
int lowLevelAndPairPostingScan(const FakePosting &rhs) const override;
int lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const override;
- queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
index 5bd1d2044fe..54710a85a04 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
@@ -21,27 +21,18 @@ FakeFilterOcc::FakeFilterOcc(const FakeWord &fw)
{
std::vector<uint32_t> fake;
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
-
- while (d != de) {
- fake.push_back(d->_docId);
- ++d;
+ for (const auto& elem : fw._postings) {
+ fake.push_back(elem._docId);
}
std::swap(_uncompressed, fake);
_docIdLimit = fw._docIdLimit;
_hitDocs = fw._postings.size();
}
-
FakeFilterOcc::~FakeFilterOcc()
{
}
-
void
FakeFilterOcc::forceLink()
{
@@ -185,13 +176,11 @@ FakeFilterOccArrayIterator::doUnpack(uint32_t docId)
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
FakeFilterOcc::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
- return new FakeFilterOccArrayIterator(&*_uncompressed.begin(),
- &*_uncompressed.end(),
- matchData);
+ return std::make_unique<FakeFilterOccArrayIterator>(&*_uncompressed.begin(), &*_uncompressed.end(), matchData);
}
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.h
index ed0855cfb87..c05dc9db342 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.h
@@ -30,7 +30,7 @@ public:
int lowLevelSinglePostingScanUnpack() const override;
int lowLevelAndPairPostingScan(const FakePosting &rhs) const override;
int lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const override;
- queryeval::SearchIterator * createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
} // namespace fakedata
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
index ec476c4f6cf..48820b58a7c 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
@@ -129,14 +129,14 @@ lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
FakeMemTreeOcc::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
return memoryindex::make_search_iterator<false>(_tree.begin(_allocator),
_mgr._featureStore,
_packedIndex,
- matchData).release();
+ matchData);
}
@@ -228,10 +228,9 @@ FakeMemTreeOccMgr::remove(uint32_t wordIdx, uint32_t docId)
void
FakeMemTreeOccMgr::sortUnflushed()
{
- using I = std::vector<PendingOp>::iterator;
uint32_t seq = 0;
- for (I i(_unflushed.begin()), ie(_unflushed.end()); i != ie; ++i) {
- i->setSeq(++seq);
+ for (auto& elem : _unflushed) {
+ elem.setSeq(++seq);
}
std::sort(_unflushed.begin(), _unflushed.end());
}
@@ -241,16 +240,15 @@ void
FakeMemTreeOccMgr::flush()
{
using Aligner = FeatureStore::Aligner;
- using I = std::vector<PendingOp>::iterator;
if (_unflushed.empty())
return;
uint32_t lastWord = std::numeric_limits<uint32_t>::max();
sortUnflushed();
- for (I i(_unflushed.begin()), ie(_unflushed.end()); i != ie; ++i) {
- uint32_t wordIdx = i->getWordIdx();
- uint32_t docId = i->getDocId();
+ for (auto& elem : _unflushed) {
+ uint32_t wordIdx = elem.getWordIdx();
+ uint32_t docId = elem.getDocId();
PostingIdx &pidx(*_postingIdxs[wordIdx].get());
Tree &tree = pidx._tree;
Tree::Iterator &itr = pidx._iterator;
@@ -261,7 +259,7 @@ FakeMemTreeOccMgr::flush()
itr.linearSeek(docId);
}
lastWord = wordIdx;
- if (i->getRemove()) {
+ if (elem.getRemove()) {
if (itr.valid() && itr.getKey() == docId) {
uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData().get_features_relaxed()));
_featureSizes[wordIdx] -= Aligner::align((bits + 7) / 8) * 8;
@@ -269,7 +267,7 @@ FakeMemTreeOccMgr::flush()
}
} else {
if (!itr.valid() || docId < itr.getKey()) {
- tree.insert(itr, docId, PostingListEntryType(i->getFeatureRef(), 0, 1));
+ tree.insert(itr, docId, PostingListEntryType(elem.getFeatureRef(), 0, 1));
}
}
}
@@ -320,13 +318,12 @@ FakeMemTreeOccFactory::~FakeMemTreeOccFactory()
FakePosting::SP
FakeMemTreeOccFactory::make(const FakeWord &fw)
{
- std::map<const FakeWord *, uint32_t>::const_iterator
- i(_mgr._fw2WordIdx.find(&fw));
+ auto itr = _mgr._fw2WordIdx.find(&fw);
- if (i == _mgr._fw2WordIdx.end())
+ if (itr == _mgr._fw2WordIdx.end())
LOG_ABORT("should not be reached");
- uint32_t wordIdx = i->second;
+ uint32_t wordIdx = itr->second;
assert(_mgr._postingIdxs.size() > wordIdx);
@@ -341,8 +338,8 @@ FakeMemTreeOccFactory::setup(const std::vector<const FakeWord *> &fws)
using PostingIdx = FakeMemTreeOccMgr::PostingIdx;
std::vector<FakeWord::RandomizedReader> r;
uint32_t wordIdx = 0;
- std::vector<const FakeWord *>::const_iterator fwi(fws.begin());
- std::vector<const FakeWord *>::const_iterator fwe(fws.end());
+ auto fwi = fws.begin();
+ auto fwe = fws.end();
while (fwi != fwe) {
_mgr._fakeWords.push_back(*fwi);
_mgr._featureSizes.push_back(0);
@@ -355,8 +352,8 @@ FakeMemTreeOccFactory::setup(const std::vector<const FakeWord *> &fws)
}
PostingPriorityQueueMerger<FakeWord::RandomizedReader, FakeWord::RandomizedWriter> heap;
- std::vector<FakeWord::RandomizedReader>::iterator i(r.begin());
- std::vector<FakeWord::RandomizedReader>::iterator ie(r.end());
+ auto i = r.begin();
+ auto ie = r.end();
FlushToken flush_token;
while (i != ie) {
i->read();
@@ -386,8 +383,7 @@ FakeMemTreeOcc2Factory::~FakeMemTreeOcc2Factory() = default;
FakePosting::SP
FakeMemTreeOcc2Factory::make(const FakeWord &fw)
{
- std::map<const FakeWord *, uint32_t>::const_iterator
- i(_mgr._fw2WordIdx.find(&fw));
+ auto i = _mgr._fw2WordIdx.find(&fw);
if (i == _mgr._fw2WordIdx.end())
LOG_ABORT("should not be reached");
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
index ea8699d94b2..28698e29cf9 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
@@ -176,7 +176,7 @@ public:
int lowLevelSinglePostingScanUnpack() const override;
int lowLevelAndPairPostingScan(const FakePosting &rhs) const override;
int lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const override;
- queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h
index 7e32fcc31ad..56a54b2cf85 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h
@@ -72,7 +72,7 @@ public:
/*
* Iterator factory, for current query evaluation framework.
*/
- virtual search::queryeval::SearchIterator *
+ virtual std::unique_ptr<search::queryeval::SearchIterator>
createIterator(const fef::TermFieldMatchDataArray &matchData) const = 0;
const std::string &getName() const
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
index f68fb4a9037..4242f71bd60 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
@@ -246,11 +246,11 @@ FakeWord::fakeup(search::BitVector &bitmap,
}
uint32_t field_len = 0;
do {
- DocWordPosFeatureList::iterator ie(wpf.end());
- DocWordPosFeatureList::iterator i(wpf.begin());
+ auto ie = wpf.end();
+ auto i = wpf.begin();
while (i != ie) {
uint32_t lastwordpos = i->_wordPos;
- DocWordPosFeatureList::iterator pi(i);
+ auto pi = i;
++i;
while (i != ie &&
pi->_elementId == i->_elementId) {
@@ -287,11 +287,8 @@ FakeWord::fakeup(search::BitVector &bitmap,
dwf._accPositions = wordPosFeatures.size();
assert(dwf._positions == wpf.size());
postings.push_back(dwf);
- DocWordPosFeatureList::iterator ie(wpf.end());
- DocWordPosFeatureList::iterator i(wpf.begin());
- while (i != ie) {
- wordPosFeatures.push_back(*i);
- ++i;
+ for (const auto& elem : wpf) {
+ wordPosFeatures.push_back(elem);
}
++idx;
if (idx >= docIdLimit)
@@ -318,12 +315,11 @@ FakeWord::fakeupTemps(vespalib::Rand48 &rnd,
void
FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
{
- using DWFL = DocWordFeatureList;
Randomizer randomAdd;
Randomizer randomRem;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
int32_t ref = 0;
while (d != de) {
@@ -338,8 +334,8 @@ FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
++ref;
}
- DWFL::const_iterator ed(_extraPostings.begin());
- DWFL::const_iterator ede(_extraPostings.end());
+ auto ed = _extraPostings.begin();
+ auto ede = _extraPostings.end();
int32_t eref = -1;
uint32_t tref = 0;
@@ -378,9 +374,8 @@ FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
void
FakeWord::addDocIdBias(uint32_t docIdBias)
{
- using DWFL = DocWordFeatureList;
- DWFL::iterator d(_postings.begin());
- DWFL::iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
for (; d != de; ++d) {
d->_docId += docIdBias;
}
@@ -404,14 +399,12 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator,
iterator->initFullRange();
uint32_t docId = 0;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -484,14 +477,12 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator,
iterator->initFullRange();
uint32_t docId = 1;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -556,10 +547,8 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, bool verbose) co
iterator->initFullRange();
uint32_t docId = 1;
- using DWFL = DocWordFeatureList;
-
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -599,14 +588,12 @@ FakeWord::validate(FieldReader &fieldReader,
uint32_t presidue;
bool unpres;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -633,13 +620,8 @@ FakeWord::validate(FieldReader &fieldReader,
#else
(void) unpres;
- using Elements = WordDocElementFeatures;
- using Positions = WordDocElementWordPosFeatures;
-
- std::vector<Elements>::const_iterator element =
- features.elements().begin();
- std::vector<Positions>::const_iterator position =
- features.word_positions().begin();
+ auto element = features.elements().begin();
+ auto position = features.word_positions().begin();
TermFieldMatchData *tfmd = matchData[0];
assert(tfmd != 0);
@@ -701,12 +683,10 @@ FakeWord::validate(FieldReader &fieldReader,
void
FakeWord::validate(const std::vector<uint32_t> &docIds) const
{
- using DWFL = DocWordFeatureList;
- using DL = std::vector<uint32_t>;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DL::const_iterator di(docIds.begin());
- DL::const_iterator die(docIds.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto di = docIds.begin();
+ auto die = docIds.end();
while (d != de) {
assert(di != die);
@@ -721,9 +701,8 @@ FakeWord::validate(const std::vector<uint32_t> &docIds) const
void
FakeWord::validate(const search::BitVector &bv) const
{
- using DWFL = DocWordFeatureList;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
uint32_t bitHits = bv.countTrueBits();
assert(bitHits == _postings.size());
(void) bitHits;
@@ -745,13 +724,10 @@ FakeWord::dump(FieldWriter &fieldWriter,
uint32_t residue;
DocIdAndPosOccFeatures features;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
-
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start dumping word '%s'\n", _name.c_str());
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
index 3caca05669c..2f9714f1638 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
@@ -64,12 +64,8 @@ FakeZcbFilterOcc::FakeZcbFilterOcc(const FakeWord &fw)
std::vector<uint8_t> bytes;
uint32_t lastDocId = 0u;
-
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
while (d != de) {
if (lastDocId == 0u) {
@@ -241,14 +237,12 @@ FakeFilterOccZCBArrayIterator::doUnpack(uint32_t docId)
}
-search::queryeval::SearchIterator *
+std::unique_ptr<search::queryeval::SearchIterator>
FakeZcbFilterOcc::
createIterator(const fef::TermFieldMatchDataArray &matchData) const
{
const uint8_t *arr = &*_compressed.begin();
- return new FakeFilterOccZCBArrayIterator(arr,
- _hitDocs,
- matchData);
+ return std::make_unique<FakeFilterOccZCBArrayIterator>(arr, _hitDocs, matchData);
}
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.h
index 87d25cb6761..599b9c83d76 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.h
@@ -29,7 +29,7 @@ public:
int lowLevelSinglePostingScanUnpack() const override;
int lowLevelAndPairPostingScan(const FakePosting &rhs) const override;
int lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const override;
- queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
} // namespace fakedata
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
index 24c2f82279e..dc2791fa4f6 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
@@ -149,14 +149,10 @@ FakeZcFilterOcc::setupT(const FakeWord &fw)
PostingListCounts counts;
Zc4PostingWriter<bigEndian> writer(counts);
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
- using DWPFL = FW::DocWordPosFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
- DWPFL::const_iterator p(fw._wordPosFeatures.begin());
- DWPFL::const_iterator pe(fw._wordPosFeatures.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
+ auto p = fw._wordPosFeatures.begin();
+ auto pe = fw._wordPosFeatures.end();
DocIdAndPosOccFeatures features;
EGPosOccEncodeContext<bigEndian> f1(&_fieldsParams);
EG2PosOccEncodeContext<bigEndian> f0(&_fieldsParams);
@@ -506,11 +502,11 @@ FakeFilterOccZCArrayIterator::doUnpack(uint32_t docId)
}
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeZcFilterOcc::
createIterator(const TermFieldMatchDataArray &matchData) const
{
- return new FakeFilterOccZCArrayIterator(_compressed.first, 0, _posting_params._doc_id_limit, matchData);
+ return std::make_unique<FakeFilterOccZCArrayIterator>(_compressed.first, 0, _posting_params._doc_id_limit, matchData);
}
class FakeZcSkipFilterOcc : public FakeZcFilterOcc
@@ -520,7 +516,7 @@ public:
FakeZcSkipFilterOcc(const FakeWord &fw);
~FakeZcSkipFilterOcc() override;
- SearchIterator *createIterator(const TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<SearchIterator> createIterator(const TermFieldMatchDataArray &matchData) const override;
};
static FPFactoryInit
@@ -538,10 +534,10 @@ FakeZcSkipFilterOcc::FakeZcSkipFilterOcc(const FakeWord &fw)
FakeZcSkipFilterOcc::~FakeZcSkipFilterOcc() = default;
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeZcSkipFilterOcc::createIterator(const TermFieldMatchDataArray &matchData) const
{
- return create_zc_posocc_iterator(true, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(true, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData);
}
@@ -554,7 +550,7 @@ public:
~FakeEGCompr64PosOcc() override;
size_t bitSize() const override;
bool hasWordPositions() const override;
- SearchIterator *createIterator(const TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<SearchIterator> createIterator(const TermFieldMatchDataArray &matchData) const override;
};
@@ -591,11 +587,11 @@ FakeEGCompr64PosOcc<bigEndian>::hasWordPositions() const
template <bool bigEndian>
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeEGCompr64PosOcc<bigEndian>::
createIterator(const TermFieldMatchDataArray &matchData) const
{
- return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData);
}
@@ -608,7 +604,7 @@ public:
~FakeEG2Compr64PosOcc() override;
size_t bitSize() const override;
bool hasWordPositions() const override;
- SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
@@ -646,11 +642,11 @@ FakeEG2Compr64PosOcc<bigEndian>::hasWordPositions() const
template <bool bigEndian>
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeEG2Compr64PosOcc<bigEndian>::
createIterator(const TermFieldMatchDataArray &matchData) const
{
- return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData);
}
@@ -664,7 +660,7 @@ public:
size_t bitSize() const override;
bool hasWordPositions() const override;
- SearchIterator *createIterator(const TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<SearchIterator> createIterator(const TermFieldMatchDataArray &matchData) const override;
};
@@ -703,11 +699,11 @@ FakeZcSkipPosOcc<bigEndian>::hasWordPositions() const
template <bool bigEndian>
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeZcSkipPosOcc<bigEndian>::
createIterator(const TermFieldMatchDataArray &matchData) const
{
- return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData);
}
@@ -724,7 +720,7 @@ public:
~FakeZc4SkipPosOcc() override;
size_t bitSize() const override;
bool hasWordPositions() const override;
- SearchIterator *createIterator(const TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<SearchIterator> createIterator(const TermFieldMatchDataArray &matchData) const override;
bool enable_unpack_normal_features() const override { return _unpack_normal_features; }
bool enable_unpack_interleaved_features() const override { return _unpack_interleaved_features; }
};
@@ -770,7 +766,7 @@ FakeZc4SkipPosOcc<bigEndian>::hasWordPositions() const
template <bool bigEndian>
-SearchIterator *
+std::unique_ptr<SearchIterator>
FakeZc4SkipPosOcc<bigEndian>::
createIterator(const TermFieldMatchDataArray &matchData) const
{
@@ -781,7 +777,7 @@ createIterator(const TermFieldMatchDataArray &matchData) const
assert(!_unpack_normal_features);
assert(!_unpack_interleaved_features);
}
-return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release();
+ return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData);
}
template <bool bigEndian>
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h
index bcdd780e1e4..7d0670f993b 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h
@@ -65,7 +65,7 @@ public:
int lowLevelSinglePostingScanUnpack() const override;
int lowLevelAndPairPostingScan(const FakePosting &rhs) const override;
int lowLevelAndPairPostingScanUnpack(const FakePosting &rhs) const override;
- queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
+ std::unique_ptr<queryeval::SearchIterator> createIterator(const fef::TermFieldMatchDataArray &matchData) const override;
};
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
index d705541b3fc..150d8a3af32 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
@@ -49,7 +49,7 @@ getFPFactory(const std::string &name, const Schema &schema)
if (fpFactoryMap == nullptr)
return nullptr;
- FPFactoryMap::const_iterator i(fpFactoryMap->find(name));
+ auto i = fpFactoryMap->find(name);
if (i != fpFactoryMap->end())
return i->second(schema);
@@ -64,10 +64,9 @@ getPostingTypes()
std::vector<std::string> res;
if (fpFactoryMap != nullptr)
- for (FPFactoryMap::const_iterator i(fpFactoryMap->begin());
- i != fpFactoryMap->end();
- ++i)
- res.push_back(i->first);
+ for (const auto& elem : *fpFactoryMap) {
+ res.push_back(elem.first);
+ }
return res;
}
diff --git a/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp b/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
index 933e14fe379..4197dee6cba 100644
--- a/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
+++ b/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
@@ -21,17 +21,13 @@ MockAttributeContext::getAttributeStableEnum(const string &name) const {
}
void
MockAttributeContext::getAttributeList(std::vector<const IAttributeVector *> & list) const {
- Map::const_iterator pos = _vectors.begin();
- Map::const_iterator end = _vectors.end();
- for (; pos != end; ++pos) {
- list.push_back(pos->second);
+ for (const auto& elem : _vectors) {
+ list.push_back(elem.second);
}
}
MockAttributeContext::~MockAttributeContext() {
- Map::iterator pos = _vectors.begin();
- Map::iterator end = _vectors.end();
- for (; pos != end; ++pos) {
- delete pos->second;
+ for (auto& elem : _vectors) {
+ delete elem.second;
}
}
diff --git a/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp b/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
index bc5e8356957..15cb3065b75 100644
--- a/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
+++ b/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
@@ -6,7 +6,7 @@ namespace search::attribute::test {
AttributeVector::SP
MockAttributeManager::findAttribute(const vespalib::string &name) const {
- AttributeMap::const_iterator itr = _attributes.find(name);
+ auto itr = _attributes.find(name);
if (itr != _attributes.end()) {
return itr->second;
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
index 8916a4cf0b5..9ec186f92c0 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
@@ -164,7 +164,7 @@ Session *
TransLogClient::findSession(const vespalib::string & domainName, int sessionId)
{
SessionKey key(domainName, sessionId);
- SessionMap::iterator found(_sessions.find(key));
+ auto found = _sessions.find(key);
Session * session((found != _sessions.end()) ? found->second : nullptr);
return session;
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
index db02f4f037e..ac9c6318fb5 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
@@ -519,8 +519,8 @@ TransLogServer::listDomains(FRT_RPCRequest *req)
vespalib::string domains;
ReadGuard domainGuard(_domainMutex);
- for(DomainList::const_iterator it(_domains.begin()), mt(_domains.end()); it != mt; it++) {
- domains += it->second->name();
+ for (const auto& elem : _domains) {
+ domains += elem.second->name();
domains += "\n";
}
ret.AddInt32(0);
diff --git a/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp b/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
index 7f4733d8a70..b7fc9cfab05 100644
--- a/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
+++ b/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
@@ -10,7 +10,6 @@ template <class Reader>
void
PostingPriorityQueue<Reader>::adjust()
{
- using VIT = typename Vector::iterator;
if (!_vec.front().get()->isValid()) {
_vec.erase(_vec.begin()); // Iterator no longer valid
return;
@@ -19,9 +18,9 @@ PostingPriorityQueue<Reader>::adjust()
return;
}
// Peform binary search to find first element higher than changed value
- VIT gt = std::upper_bound(_vec.begin() + 1, _vec.end(), _vec.front());
- VIT to = _vec.begin();
- VIT from = to;
+ auto gt = std::upper_bound(_vec.begin() + 1, _vec.end(), _vec.front());
+ auto to = _vec.begin();
+ auto from = to;
++from;
Ref changed = *to; // Remember changed value
while (from != gt) { // Shift elements to make space for changed value
diff --git a/socket_test/pom.xml b/socket_test/pom.xml
index f68b04d01c8..50965f05cdf 100644
--- a/socket_test/pom.xml
+++ b/socket_test/pom.xml
@@ -22,8 +22,8 @@
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
- <version>2.4</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
diff --git a/streamingvisitors/src/vespa/searchvisitor/rankmanager.h b/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
index 71910c65242..54414f80512 100644
--- a/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
+++ b/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
@@ -44,7 +44,7 @@ public:
bool initRankSetup(const search::fef::BlueprintFactory & factory);
bool setup(const RankManager & manager);
int getIndex(const vespalib::string & key) const {
- Map::const_iterator found(_rpmap.find(key));
+ auto found = _rpmap.find(key);
return (found != _rpmap.end()) ? found->second : 0;
}
@@ -60,7 +60,7 @@ public:
return _indexEnv[getIndex(rankProfile)];
}
const View *getView(const vespalib::string & index) const {
- ViewMap::const_iterator itr = _views.find(index);
+ auto itr = _views.find(index);
if (itr != _views.end()) {
return &itr->second;
}
diff --git a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
index 92e1ff0e460..78d72102fe9 100644
--- a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
@@ -81,10 +81,8 @@ RankProcessor::initQueryEnvironment()
vespalib::string expandedIndexName = vsm::FieldSearchSpecMap::stripNonFields(term.getTerm()->index());
const RankManager::View *view = _rankManagerSnapshot->getView(expandedIndexName);
if (view != nullptr) {
- RankManager::View::const_iterator iter = view->begin();
- RankManager::View::const_iterator endp = view->end();
- for (; iter != endp; ++iter) {
- qtd.getTermData().addField(*iter).setHandle(_mdLayout.allocTermField(*iter));
+ for (auto field_id : *view) {
+ qtd.getTermData().addField(field_id).setHandle(_mdLayout.allocTermField(field_id));
}
} else {
LOG(warning, "Could not find a view for index '%s'. Ranking no fields.",
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
index 75e07615bd9..2119364c2bc 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
@@ -131,10 +131,10 @@ SearchEnvironment::getEnv(const vespalib::string & searchCluster)
std::lock_guard guard(_lock);
_threadLocals.emplace_back(std::move(envMap));
}
- EnvMap::iterator localFound = _localEnvMap->find(searchCluster);
+ auto localFound = _localEnvMap->find(searchCluster);
if (localFound == _localEnvMap->end()) {
std::lock_guard guard(_lock);
- EnvMap::iterator found = _envMap.find(searchCluster);
+ auto found = _envMap.find(searchCluster);
if (found == _envMap.end()) {
LOG(debug, "Init VSMAdapter with config id = '%s'", searchCluster.c_str());
Env::SP env = std::make_shared<Env>(searchClusterUri, _wordFolder, _transport, _file_distributor_connection_spec);
diff --git a/streamingvisitors/src/vespa/vsm/common/document.cpp b/streamingvisitors/src/vespa/vsm/common/document.cpp
index a345c82ce2d..167a54a75ea 100644
--- a/streamingvisitors/src/vespa/vsm/common/document.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/document.cpp
@@ -23,8 +23,8 @@ vespalib::asciistream & operator << (vespalib::asciistream & os, const FieldRef
vespalib::asciistream & operator << (vespalib::asciistream & os, const StringFieldIdTMap & f)
{
- for (StringFieldIdTMapT::const_iterator it=f._map.begin(), mt=f._map.end(); it != mt; it++) {
- os << it->first << " = " << it->second << '\n';
+ for (const auto& elem : f._map) {
+ os << elem.first << " = " << elem.second << '\n';
}
return os;
}
@@ -49,7 +49,7 @@ void StringFieldIdTMap::add(const vespalib::string & s)
FieldIdT StringFieldIdTMap::fieldNo(const vespalib::string & fName) const
{
- StringFieldIdTMapT::const_iterator found = _map.find(fName);
+ auto found = _map.find(fName);
FieldIdT fNo((found != _map.end()) ? found->second : npos);
return fNo;
}
diff --git a/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp b/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
index 7886c44b2e0..71b48495f5e 100644
--- a/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
@@ -43,7 +43,7 @@ void DocumentTypeMapping::init(const vespalib::string & defaultDocumentType,
bool DocumentTypeMapping::prepareBaseDoc(SharedFieldPathMap & map) const
{
- FieldPathMapMapT::const_iterator found = _fieldMap.find(_defaultDocumentTypeName);
+ auto found = _fieldMap.find(_defaultDocumentTypeName);
if (found != _fieldMap.end()) {
map = std::make_shared<FieldPathMapT>(found->second);
LOG(debug, "Found FieldPathMap for default document type '%s' with %zd elements",
@@ -64,8 +64,8 @@ void DocumentTypeMapping::buildFieldMap(
docTypePtr->getName().c_str(), fieldList.size(), typeId.c_str());
const document::DocumentType & docType = *docTypePtr;
size_t highestFNo(0);
- for (StringFieldIdTMapT::const_iterator it = fieldList.begin(), mt = fieldList.end(); it != mt; it++) {
- highestFNo = std::max(highestFNo, size_t(it->second));
+ for (const auto& elem : fieldList) {
+ highestFNo = std::max(highestFNo, size_t(elem.second));
}
highestFNo++;
FieldPathMapT & fieldMap = _fieldMap[typeId];
@@ -73,20 +73,20 @@ void DocumentTypeMapping::buildFieldMap(
fieldMap.resize(highestFNo);
size_t validCount(0);
- for (StringFieldIdTMapT::const_iterator it = fieldList.begin(), mt = fieldList.end(); it != mt; it++) {
- vespalib::string fname = it->first;
- LOG(debug, "Handling %s -> %d", fname.c_str(), it->second);
+ for (const auto& elem : fieldList) {
+ vespalib::string fname = elem.first;
+ LOG(debug, "Handling %s -> %d", fname.c_str(), elem.second);
try {
- if ((it->first[0] != '[') && (it->first != "summaryfeatures") && (it->first != "rankfeatures") && (it->first != "ranklog") && (it->first != "sddocname") && (it->first != "documentid")) {
+ if ((elem.first[0] != '[') && (elem.first != "summaryfeatures") && (elem.first != "rankfeatures") && (elem.first != "ranklog") && (elem.first != "sddocname") && (elem.first != "documentid")) {
FieldPath fieldPath;
docType.buildFieldPath(fieldPath, fname);
- fieldMap[it->second] = std::move(fieldPath);
+ fieldMap[elem.second] = std::move(fieldPath);
validCount++;
- LOG(spam, "Found %s -> %d in document", fname.c_str(), it->second);
+ LOG(spam, "Found %s -> %d in document", fname.c_str(), elem.second);
}
} catch (const std::exception & e) {
LOG(debug, "Could not get field info for '%s' in documenttype '%s' (id = '%s') : %s",
- it->first.c_str(), docType.getName().c_str(), typeId.c_str(), e.what());
+ elem.first.c_str(), docType.getName().c_str(), typeId.c_str(), e.what());
}
}
_documentTypeFreq.insert(std::make_pair(validCount, docTypePtr));
diff --git a/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp b/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
index b39afd83b5a..93a071deade 100644
--- a/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
@@ -14,9 +14,9 @@ FieldModifierMap::~FieldModifierMap() { }
FieldModifier *
FieldModifierMap::getModifier(FieldIdT fId) const
{
- FieldModifierMapT::const_iterator itr = _map.find(fId);
+ auto itr = _map.find(fId);
if (itr == _map.end()) {
- return NULL;
+ return nullptr;
}
return itr->second.get();
}
diff --git a/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp b/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
index 3495d46b85b..ee1b3f79aed 100644
--- a/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
+++ b/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
@@ -32,12 +32,11 @@ UTF8SuffixStringFieldSearcher::matchTerms(const FieldRef & f, const size_t mints
++srcbuf;
}
srcbuf = tokenize(srcbuf, _buf->capacity(), dstbuf, tokenlen);
- for (QueryTermList::iterator it = _qtl.begin(), mt = _qtl.end(); it != mt; ++it) {
- QueryTerm & qt = **it;
+ for (auto qt : _qtl) {
const cmptype_t * term;
- termsize_t tsz = qt.term(term);
+ termsize_t tsz = qt->term(term);
if (matchTermSuffix(term, tsz, dstbuf, tokenlen)) {
- addHit(qt, words);
+ addHit(*qt, words);
}
}
words++;
diff --git a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
index f6ac3a6c88a..5f0be889621 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
@@ -193,7 +193,7 @@ bool FieldSearchSpecMap::buildFieldsInQuery(const Query & query, StringFieldIdTM
const IndexFieldMapT & fim = dtm.second;
vespalib::string rawIndex(term->index());
vespalib::string index(stripNonFields(rawIndex));
- IndexFieldMapT::const_iterator fIt = fim.find(index);
+ auto fIt = fim.find(index);
if (fIt != fim.end()) {
for(FieldIdT fid : fIt->second) {
const FieldSearchSpec & spec = specMap().find(fid)->second;
@@ -286,7 +286,7 @@ FieldSearchSpecMap::reconfigFromQuery(const Query & query)
for (const auto & termA : qtl) {
for (const auto & ifm : documentTypeMap()) {
- IndexFieldMapT::const_iterator itc = ifm.second.find(termA->index());
+ auto itc = ifm.second.find(termA->index());
if (itc != ifm.second.end()) {
for (FieldIdT fid : itc->second) {
FieldSearchSpec & spec = _specMap.find(fid)->second;
diff --git a/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp b/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
index 2d2d3f24bc6..5d29ca993f2 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
@@ -18,7 +18,7 @@ namespace {
void
addIfNotPresent(FieldQueryTermMap & map, vsm::FieldIdT fId, QueryTerm * qt)
{
- FieldQueryTermMap::iterator itr = map.find(fId);
+ auto itr = map.find(fId);
if (itr != map.end()) {
QueryTermList & qtl = itr->second;
if (std::find(qtl.begin(), qtl.end(), qt) == qtl.end()) {
@@ -108,16 +108,14 @@ SnippetModifierManager::setup(const QueryTermList& queryTerms,
FieldQueryTermMap fqtm;
// setup modifiers
- for (QueryTermList::const_iterator i = queryTerms.begin(); i != queryTerms.end(); ++i) {
- QueryTerm * qt = *i;
- IndexFieldMapT::const_iterator j = indexMap.find(qt->index());
- if (j != indexMap.end()) {
- for (FieldIdTList::const_iterator k = j->second.begin(); k != j->second.end(); ++k) {
- FieldIdT fId = *k;
+ for (auto qt : queryTerms) {
+ auto itr = indexMap.find(qt->index());
+ if (itr != indexMap.end()) {
+ for (auto fId : itr->second) {
const FieldSearchSpec & spec = specMap.find(fId)->second;
if (spec.searcher().substring() || qt->isSubstring()) { // we need a modifier for this field id
addIfNotPresent(fqtm, fId, qt);
- if (_modifiers.getModifier(fId) == NULL) {
+ if (_modifiers.getModifier(fId) == nullptr) {
LOG(debug, "Create snippet modifier for field id '%u'", fId);
UTF8SubstringSnippetModifier::SP searcher
(new UTF8SubstringSnippetModifier(fId, _searchModifyBuf, _searchOffsetBuf));
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/BucketDistribution.java b/vdslib/src/main/java/com/yahoo/vdslib/BucketDistribution.java
index c49fdb93d20..ba17b947bb8 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/BucketDistribution.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/BucketDistribution.java
@@ -4,6 +4,7 @@ package com.yahoo.vdslib;
import com.yahoo.document.BucketId;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -14,16 +15,16 @@ import java.util.logging.Logger;
public class BucketDistribution {
// A logger object to enable proper logging.
- private static Logger log = Logger.getLogger(BucketDistribution.class.getName());
+ private static final Logger log = Logger.getLogger(BucketDistribution.class.getName());
// A map from bucket id to column index.
- private int[] bucketToColumn;
+ private final int[] bucketToColumn;
// The number of columns to distribute to.
private int numColumns;
// The number of bits to use for bucket identification.
- private int numBucketBits;
+ private final int numBucketBits;
/**
* Constructs a new bucket distribution object with a given number of columns and buckets.
@@ -68,7 +69,7 @@ public class BucketDistribution {
* @return The bucket distribution.
*/
private static List<Integer> getBucketCount(int numColumns, int numBucketBits) {
- List<Integer> ret = new ArrayList<Integer>(numColumns);
+ List<Integer> ret = new ArrayList<>(numColumns);
int cnt = getNumBuckets(numBucketBits) / numColumns;
int rst = getNumBuckets(numBucketBits) % numColumns;
for (int i = 0; i < numColumns; ++i) {
@@ -100,9 +101,7 @@ public class BucketDistribution {
* that it all buckets point to that single column.
*/
public void reset() {
- for (int i = 0; i < bucketToColumn.length; ++i) {
- bucketToColumn[i] = 0;
- }
+ Arrays.fill(bucketToColumn, 0);
numColumns = 1;
}
@@ -152,32 +151,6 @@ public class BucketDistribution {
}
/**
- * Sets the number of buckets to use for this document distribution object. This will reset and setup this object
- * from scratch. The original number of columns is maintained.
- *
- * @param numBucketBits The new number of bits to use for bucket id.
- */
- public synchronized void setNumBucketBits(int numBucketBits) {
- if (numBucketBits == this.numBucketBits) {
- return;
- }
- this.numBucketBits = numBucketBits;
- bucketToColumn = new int[getNumBuckets(numBucketBits)];
- int numColumns = this.numColumns;
- reset();
- setNumColumns(numColumns);
- }
-
- /**
- * Returns the number of bits used for bucket identifiers.
- *
- * @return The number of bits.
- */
- public int getNumBucketBits() {
- return numBucketBits;
- }
-
- /**
* Returns the number of buckets available using the configured number of bucket bits.
*
* @return The number of buckets.
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/DocumentSummary.java b/vdslib/src/main/java/com/yahoo/vdslib/DocumentSummary.java
index ab5fe0d9a86..4371e19d090 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/DocumentSummary.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/DocumentSummary.java
@@ -5,7 +5,8 @@ import com.yahoo.vespa.objects.BufferSerializer;
import com.yahoo.vespa.objects.Deserializer;
import java.nio.ByteOrder;
-import java.io.UnsupportedEncodingException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
public class DocumentSummary {
@@ -27,23 +28,14 @@ public class DocumentSummary {
int summarySize = buf.getInt(null);
int end = start;
while (cArr[end++] != 0);
- try {
- byte [] sb = new byte [summarySize];
- System.arraycopy(cArr, end, sb, 0, summarySize);
- summaries[i] = new Summary(new String(cArr, start, end-start-1, "utf-8"), sb);
- start = end + summarySize;
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException("UTF-8 apparently not supported");
- }
+ byte [] sb = new byte [summarySize];
+ System.arraycopy(cArr, end, sb, 0, summarySize);
+ summaries[i] = new Summary(new String(cArr, start, end-start-1, UTF_8), sb);
+ start = end + summarySize;
}
}
}
- /** Constructs a new message from a byte buffer. */
- public DocumentSummary(byte[] buffer) {
- this(BufferSerializer.wrap(buffer));
- }
-
final public int getSummaryCount() { return summaries.length; }
final public Summary getSummary(int hitNo) { return summaries[hitNo]; }
@@ -63,7 +55,6 @@ public class DocumentSummary {
final public String getDocId() { return docId; }
final public byte [] getSummary() { return summary; }
- final public void setSummary(byte [] summary) { this.summary = summary; }
public int compareTo(Summary s) {
return getDocId().compareTo(s.getDocId());
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/MetaEntry.java b/vdslib/src/main/java/com/yahoo/vdslib/MetaEntry.java
deleted file mode 100644
index a05f746fe48..00000000000
--- a/vdslib/src/main/java/com/yahoo/vdslib/MetaEntry.java
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vdslib;
-
-import com.yahoo.io.GrowableByteBuffer;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-public class MetaEntry {
- public static int REMOVE_ENTRY = 1;
- public static int BODY_STRIPPED = 2;
- public static int BODY_IN_HEADER = 4;
- public static int UPDATE_ENTRY = 8;
- public static int COMPRESSED = 16;
-
- public static int SIZE = 32;
-
- public long timestamp = 0;
- public int headerPos = 0;
- public int headerLen = 0;
- public int bodyPos = 0;
- public int bodyLen = 0;
- public byte flags = 0;
-
- public MetaEntry() {
- }
-
- public MetaEntry(byte[] buffer, int position) {
- ByteBuffer buf = ByteBuffer.wrap(buffer, position, SIZE);
- buf.order(ByteOrder.LITTLE_ENDIAN);
-
- timestamp = buf.getLong();
- headerPos = buf.getInt();
- headerLen = buf.getInt();
- bodyPos = buf.getInt();
- bodyLen = buf.getInt();
- flags = buf.get();
- }
-
- public void serialize(GrowableByteBuffer buf) {
- ByteOrder originalOrder = buf.order();
- buf.order(ByteOrder.LITTLE_ENDIAN);
- buf.putLong(timestamp); // 8
- buf.putInt(headerPos); // 12
- buf.putInt(headerLen); // 16
- buf.putInt(bodyPos); // 20
- buf.putInt(bodyLen); // 24
- buf.putInt(flags); // 28 (written as little-endian int, this is on purpose)
- buf.putInt(0); // 32
- buf.order(originalOrder);
- }
-}
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/SearchResult.java b/vdslib/src/main/java/com/yahoo/vdslib/SearchResult.java
index b7c9b1b71b5..c89abf87970 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/SearchResult.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/SearchResult.java
@@ -1,19 +1,22 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vdslib;
+
import com.yahoo.data.access.helpers.MatchFeatureData;
import com.yahoo.vespa.objects.BufferSerializer;
import com.yahoo.vespa.objects.Deserializer;
-import java.io.UnsupportedEncodingException;
import java.nio.ByteOrder;
import java.util.ArrayList;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
public class SearchResult {
+
public static class Hit implements Comparable<Hit> {
- private String docId;
+ private final String docId;
private double rank;
private MatchFeatureData.HitValue matchFeatures;
public Hit(Hit h) {
@@ -40,7 +43,7 @@ public class SearchResult {
}
}
public static class HitWithSortBlob extends Hit {
- private byte [] sortBlob;
+ private final byte [] sortBlob;
public HitWithSortBlob(Hit h, byte [] sb) {
super(h);
sortBlob = sb;
@@ -57,12 +60,12 @@ public class SearchResult {
return sortBlob.length - b.sortBlob.length;
}
}
- private int totalHits;
- private Hit[] hits;
- private TreeMap<Integer, byte []> aggregatorList;
- private TreeMap<Integer, byte []> groupingList;
- private static int EXTENSION_FLAGS_PRESENT = -1;
- private static int MATCH_FEATURES_PRESENT_MASK = 1;
+ private final int totalHits;
+ private final Hit[] hits;
+ private final TreeMap<Integer, byte []> aggregatorList;
+ private final TreeMap<Integer, byte []> groupingList;
+ private static final int EXTENSION_FLAGS_PRESENT = -1;
+ private static final int MATCH_FEATURES_PRESENT_MASK = 1;
public SearchResult(Deserializer buf) {
BufferSerializer bser = (BufferSerializer) buf; // TODO: dirty cast. must do this differently
@@ -76,17 +79,13 @@ public class SearchResult {
}
hits = new Hit[numHits];
if (numHits != 0) {
- int docIdBufferLength = buf.getInt(null);
+ int docIdBufferLength = buf.getInt(null); // Unused, but need to call getInt() to advance buffer
byte[] cArr = bser.getBuf().array();
int start = bser.getBuf().arrayOffset() + bser.position();
for(int i=0; i < numHits; i++) {
int end = start;
while (cArr[end++] != 0);
- try {
- hits[i] = new Hit(new String(cArr, start, end-start-1, "utf-8"), 0);
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException("UTF-8 apparently not supported");
- }
+ hits[i] = new Hit(new String(cArr, start, end-start-1, UTF_8), 0);
start = end;
}
bser.position(start - bser.getBuf().arrayOffset());
@@ -104,8 +103,9 @@ public class SearchResult {
hits[i] = new HitWithSortBlob(hits[i], buf.getBytes(null, size[i]));
}
+ // Unused, but need to call getInt() to advance buffer
int numAggregators = buf.getInt(null);
- aggregatorList = new TreeMap<Integer, byte []>();
+ aggregatorList = new TreeMap<>();
for (int i = 0; i < numAggregators; i++) {
int aggrId = buf.getInt(null);
int aggrLength = buf.getInt(null);
@@ -113,7 +113,7 @@ public class SearchResult {
}
int numGroupings = buf.getInt(null);
- groupingList = new TreeMap<Integer, byte []>();
+ groupingList = new TreeMap<>();
for (int i = 0; i < numGroupings; i++) {
int aggrId = buf.getInt(null);
int aggrLength = buf.getInt(null);
@@ -159,18 +159,8 @@ public class SearchResult {
return featureType == 0;
}
- /**
- * Constructs a new message from a byte buffer.
- *
- * @param buffer A byte buffer that contains a serialized message.
- */
- public SearchResult(byte[] buffer) {
- this(BufferSerializer.wrap(buffer));
- }
-
final public int getHitCount() { return hits.length; }
final public int getTotalHitCount() { return (totalHits != 0) ? totalHits : getHitCount(); }
final public Hit getHit(int hitNo) { return hits[hitNo]; }
- final public Map<Integer, byte []> getAggregatorList() { return aggregatorList; }
final public Map<Integer, byte []> getGroupingList() { return groupingList; }
}
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/distribution/ConfiguredNode.java b/vdslib/src/main/java/com/yahoo/vdslib/distribution/ConfiguredNode.java
index 92cf8b025e9..965dd018c4f 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/distribution/ConfiguredNode.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/distribution/ConfiguredNode.java
@@ -7,31 +7,28 @@ package com.yahoo.vdslib.distribution;
*
* @author bratseth
*/
-public class ConfiguredNode implements Comparable<ConfiguredNode> {
+public record ConfiguredNode(int index, boolean retired) implements Comparable<ConfiguredNode> {
- private final int index;
-
- private final boolean retired;
-
- public ConfiguredNode(int index, boolean retired) {
- this.index = index;
- this.retired = retired;
- }
-
- /** Return the index (distribution key) of this node */
- public int index() { return index; }
+ /**
+ * Return the index (distribution key) of this node
+ */
+ @Override
+ public int index() {return index;}
- /** Returns whether the node is configured to be retired */
- public boolean retired() { return retired; }
+ /**
+ * Returns whether the node is configured to be retired
+ */
+ @Override
+ public boolean retired() {return retired;}
@Override
- public int hashCode() { return index; }
+ public int hashCode() {return index;}
@Override
public boolean equals(Object other) {
if (other == this) return true;
- if ( ! (other instanceof ConfiguredNode)) return false;
- return ((ConfiguredNode)other).index == this.index;
+ if (! (other instanceof ConfiguredNode)) return false;
+ return ((ConfiguredNode) other).index == this.index;
}
@Override
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/distribution/Distribution.java b/vdslib/src/main/java/com/yahoo/vdslib/distribution/Distribution.java
index a83e2a4f89c..bfa7e919514 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/distribution/Distribution.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/distribution/Distribution.java
@@ -26,14 +26,7 @@ import java.util.concurrent.atomic.AtomicReference;
public class Distribution {
- private static class Config {
- Config(Group nodeGraph, int redundancy) {
- this.nodeGraph = nodeGraph;
- this.redundancy = redundancy;
- }
-
- private final Group nodeGraph;
- private final int redundancy;
+ private record Config(Group nodeGraph, int redundancy) {
}
private ConfigSubscriber configSub;
@@ -197,8 +190,8 @@ public class Distribution {
}
private static class ScoredGroup implements Comparable<ScoredGroup> {
- Group group;
- double score;
+ final Group group;
+ final double score;
ScoredGroup(Group g, double score) { this.group = g; this.score = score; }
@@ -266,8 +259,8 @@ public class Distribution {
}
private static class ResultGroup implements Comparable<ResultGroup> {
- Group group;
- int redundancy;
+ final Group group;
+ final int redundancy;
ResultGroup(Group group, int redundancy) {
this.group = group;
@@ -489,14 +482,11 @@ public class Distribution {
public Set<ConfiguredNode> getNodes() {
final Set<ConfiguredNode> nodes = new HashSet<>();
- GroupVisitor visitor = new GroupVisitor() {
- @Override
- public boolean visitGroup(Group g) {
- if (g.isLeafGroup()) {
- nodes.addAll(g.getNodes());
- }
- return true;
+ GroupVisitor visitor = g -> {
+ if (g.isLeafGroup()) {
+ nodes.addAll(g.getNodes());
}
+ return true;
};
visitGroups(visitor);
return nodes;
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/distribution/Group.java b/vdslib/src/main/java/com/yahoo/vdslib/distribution/Group.java
index b926ee3be8d..c1c2eef5c8f 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/distribution/Group.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/distribution/Group.java
@@ -9,13 +9,13 @@ import java.text.ParseException;
*/
public class Group implements Comparable<Group> {
- private String name;
+ private final String name;
private Group parent = null;
- private int index;
+ private final int index;
private int distributionHash;
- private Distribution distribution = null;
+ private final Distribution distribution;
private double capacity;
- private Map<Integer, Group> subgroups;
+ private final Map<Integer, Group> subgroups;
private List<ConfiguredNode> nodes;
public Group(int index, String name) {
@@ -63,8 +63,7 @@ public class Group implements Comparable<Group> {
@Override
public boolean equals(Object o) {
if (o == this) return true;
- if ( ! (o instanceof Group)) { return false; }
- Group other = (Group) o;
+ if ( ! (o instanceof Group other)) { return false; }
if ( ! name.equals(other.name)
|| index != other.index
|| (distribution == null ^ other.distribution == null)
@@ -210,7 +209,7 @@ public class Group implements Comparable<Group> {
for (int i=0; i<distributionSpec.length; ++i) {
String token = st.nextToken();
try{
- distributionSpec[i] = (token.equals("*") ? 0 : Integer.valueOf(token));
+ distributionSpec[i] = (token.equals("*") ? 0 : Integer.parseInt(token));
} catch (NumberFormatException e) {
throw new ParseException("Illegal distribution spec \"" + serialized + "\". Copy counts must be integer values in the range 1-255.", i);
}
@@ -243,9 +242,9 @@ public class Group implements Comparable<Group> {
int asterixCount = distributionSpec.length - firstAsterix;
int[][] preCalculations = new int[maxRedundancy + 1][];
for (int i=1; i<=maxRedundancy; ++i) {
- List<Integer> spec = new ArrayList<Integer>();
- for (int j=0; j<distributionSpec.length; ++j) {
- spec.add(distributionSpec[j]);
+ List<Integer> spec = new ArrayList<>();
+ for (int k : distributionSpec) {
+ spec.add(k);
}
int remainingRedundancy = i;
for (int j=0; j<firstAsterix; ++j) {
@@ -277,8 +276,7 @@ public class Group implements Comparable<Group> {
@Override
public boolean equals(Object o) {
if (o == this) return true;
- if ( ! (o instanceof Distribution)) return false;
- Distribution other = (Distribution) o;
+ if ( ! (o instanceof Distribution other)) return false;
return (distributionSpec == other.distributionSpec && preCalculatedResults.length == other.preCalculatedResults.length);
}
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/distribution/GroupVisitor.java b/vdslib/src/main/java/com/yahoo/vdslib/distribution/GroupVisitor.java
index df5a6e5a9d1..1108ce7507d 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/distribution/GroupVisitor.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/distribution/GroupVisitor.java
@@ -3,6 +3,6 @@ package com.yahoo.vdslib.distribution;
public interface GroupVisitor {
- public boolean visitGroup(Group g);
+ boolean visitGroup(Group g);
}
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
index 4bf305e65e0..30a209b6754 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
@@ -24,7 +24,7 @@ public class ClusterState implements Cloneable {
/**
* Maintains a bitset where all non-down nodes have a bit set. All nodes that differ from defaultUp
- * and defaultDown are store explicit in a hash map.
+ * and defaultDown are stored explicitly in a hash map.
*/
private static class Nodes {
private int logicalNodeCount;
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/Diff.java b/vdslib/src/main/java/com/yahoo/vdslib/state/Diff.java
index f4eb9ff8dde..8a4bedddaeb 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/state/Diff.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/state/Diff.java
@@ -8,8 +8,9 @@ import java.util.List;
* TODO: document this
*/
public class Diff {
+
public static class Entry {
- String id;
+ final String id;
// Values set for entries that contain diff themselves
String preContent;
String postContent;
@@ -32,22 +33,22 @@ public class Diff {
public Entry bold() { bold = true; return this; }
public Entry splitLine() { splitLine = true; return this; }
}
- private List<Entry> diff = new LinkedList<Entry>();
+ private final List<Entry> diff = new LinkedList<>();
public void add(Entry e) { diff.add(e); }
public boolean differs() { return (!diff.isEmpty()); }
- class PrintProperties {
+ static class PrintProperties {
boolean insertLineBreaks = false;
- boolean ommitGroupForSingleEntries = true;
+ final boolean ommitGroupForSingleEntries = true;
String lineBreak = "\n";
- String entrySeparator = ", ";
- String idValueSeparator = ": ";
+ final String entrySeparator = ", ";
+ final String idValueSeparator = ": ";
String keyValueSeparator = " => ";
- String singleGroupSeparator = "";
- String groupStart = "[";
- String groupStop = "]";
+ final String singleGroupSeparator = "";
+ final String groupStart = "[";
+ final String groupStop = "]";
String indent = " ";
String boldStart = "";
String boldStop = "";
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/BucketDistributionTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/BucketDistributionTestCase.java
index 7257bf0cc7f..59b5a7ae55a 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/BucketDistributionTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/BucketDistributionTestCase.java
@@ -25,7 +25,7 @@ public class BucketDistributionTestCase {
BucketDistribution bd = new BucketDistribution(NUM_COLUMNS, numBucketBits);
for (int i = 0; i < bd.getNumBuckets(); ++i) {
if (i % 32 == 0) {
- System.out.println("");
+ System.out.println();
System.out.print(" ");
}
System.out.print(bd.getColumn(new BucketId(16, i)));
@@ -37,7 +37,7 @@ public class BucketDistributionTestCase {
if (numBucketBits < MAX_BUCKETBITS) {
System.out.print(",");
}
- System.out.println("");
+ System.out.println();
}
System.out.println(" };");
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/SearchResultTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/SearchResultTestCase.java
index 3f3e8fd0f8b..b675798b374 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/SearchResultTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/SearchResultTestCase.java
@@ -17,12 +17,12 @@ public class SearchResultTestCase {
SearchResult.Hit b = new SearchResult.Hit("b", 0.1);
SearchResult.Hit c = new SearchResult.Hit("c", 1.0);
SearchResult.Hit bb = new SearchResult.Hit("b2", 0.1);
- assertTrue(a.compareTo(a) == 0);
+ assertEquals(0, a.compareTo(a));
assertTrue(a.compareTo(b) > 0);
assertTrue(a.compareTo(c) > 0);
assertTrue(b.compareTo(a) < 0);
- assertTrue(b.compareTo(bb) == 0);
- assertTrue(bb.compareTo(b) == 0);
+ assertEquals(0, b.compareTo(bb));
+ assertEquals(0, bb.compareTo(b));
assertTrue(b.compareTo(c) > 0);
assertTrue(c.compareTo(a) < 0);
assertTrue(c.compareTo(b) < 0);
@@ -47,7 +47,7 @@ public class SearchResultTestCase {
SearchResult.Hit h5 = new SearchResult.HitWithSortBlob(a, b5);
SearchResult.Hit h6 = new SearchResult.HitWithSortBlob(a, b6);
- assertTrue(h1.compareTo(h1) == 0);
+ assertEquals(0, h1.compareTo(h1));
assertTrue(h1.compareTo(h2) < 0);
assertTrue(h1.compareTo(h3) < 0);
assertTrue(h1.compareTo(h4) < 0);
@@ -55,7 +55,7 @@ public class SearchResultTestCase {
assertTrue(h1.compareTo(h6) < 0);
assertTrue(h2.compareTo(h1) > 0);
- assertTrue(h2.compareTo(h2) == 0);
+ assertEquals(0, h2.compareTo(h2));
assertTrue(h2.compareTo(h3) < 0);
assertTrue(h2.compareTo(h4) < 0);
assertTrue(h2.compareTo(h5) < 0);
@@ -63,7 +63,7 @@ public class SearchResultTestCase {
assertTrue(h3.compareTo(h1) > 0);
assertTrue(h3.compareTo(h2) > 0);
- assertTrue(h3.compareTo(h3) == 0);
+ assertEquals(0, h3.compareTo(h3));
assertTrue(h3.compareTo(h4) < 0);
assertTrue(h3.compareTo(h5) < 0);
assertTrue(h3.compareTo(h6) < 0);
@@ -71,7 +71,7 @@ public class SearchResultTestCase {
assertTrue(h4.compareTo(h1) > 0);
assertTrue(h4.compareTo(h2) > 0);
assertTrue(h4.compareTo(h3) > 0);
- assertTrue(h4.compareTo(h4) == 0);
+ assertEquals(0, h4.compareTo(h4));
assertTrue(h4.compareTo(h5) < 0);
assertTrue(h4.compareTo(h6) < 0);
@@ -79,7 +79,7 @@ public class SearchResultTestCase {
assertTrue(h5.compareTo(h2) > 0);
assertTrue(h5.compareTo(h3) > 0);
assertTrue(h5.compareTo(h4) > 0);
- assertTrue(h5.compareTo(h5) == 0);
+ assertEquals(0, h5.compareTo(h5));
assertTrue(h5.compareTo(h6) < 0);
assertTrue(h6.compareTo(h1) > 0);
@@ -87,6 +87,6 @@ public class SearchResultTestCase {
assertTrue(h6.compareTo(h3) > 0);
assertTrue(h6.compareTo(h4) > 0);
assertTrue(h6.compareTo(h5) > 0);
- assertTrue(h6.compareTo(h6) == 0);
+ assertEquals(0, h6.compareTo(h6));
}
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/distribution/CrossPlatformTestFactory.java b/vdslib/src/test/java/com/yahoo/vdslib/distribution/CrossPlatformTestFactory.java
index 70a11ff530f..90128c7c04b 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/distribution/CrossPlatformTestFactory.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/distribution/CrossPlatformTestFactory.java
@@ -20,36 +20,29 @@ public abstract class CrossPlatformTestFactory {
public String getName() { return name; }
- public boolean loadTestResults() throws Exception {
+ public void loadTestResults() throws Exception {
File reference = new File(directory, name + ".reference.results");
if (!reference.exists()) {
- return false;
+ return;
}
- BufferedReader br = new BufferedReader(new FileReader(reference));
- StringBuilder sb = new StringBuilder();
- try{
- while(true) {
+ try (BufferedReader br = new BufferedReader(new FileReader(reference))) {
+ StringBuilder sb = new StringBuilder();
+ while (true) {
String line = br.readLine();
if (line == null) break;
sb.append(line);
}
parse(sb.toString());
- } finally {
- br.close();
}
- return true;
}
public void recordTestResults() throws Exception {
File results = new File(directory, name + ".java.results");
- FileWriter fw = new FileWriter(results);
- try{
+ try (FileWriter fw = new FileWriter(results)) {
fw.write(serialize());
- } finally {
- fw.close();
}
}
- public abstract String serialize() throws Exception;
+ public abstract String serialize();
public abstract void parse(String serialized) throws Exception;
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestCase.java
index 19c9c79522d..6dfffa23aed 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestCase.java
@@ -30,26 +30,27 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class DistributionTestCase {
+
+ private static final int minUsedBits = 16;
+
private DistributionTestFactory test;
/** Build a set of buckets to test that should represent the entire bucket space well. */
- private static List<BucketId> getTestBuckets() { return getTestBuckets(16); }
- private static List<BucketId> getTestBuckets(int minUsedBits) {
+ private static List<BucketId> getTestBuckets() {
List<BucketId> buckets = new ArrayList<>();
- assertTrue(minUsedBits <= 16);
- // Get a set of buckets from the same split level
- for (int i=16; i<=18; ++i) {
- for (int j=0; j<20; ++j) {
+ // Get a set of buckets from the same split level
+ for (int i = 16; i <= 18; ++ i) {
+ for (int j = 0; j < 20; ++ j) {
buckets.add(new BucketId(i, j));
}
}
- // Get a few random buckets at every split level.
+ // Get a few random buckets at every split level.
Random randomized = new Random(413);
long randValue = randomized.nextLong();
- for (int i=minUsedBits; i<58; ++i) {
+ for (int i = minUsedBits; i < 58; ++ i) {
buckets.add(new BucketId(i, randValue));
}
randValue = randomized.nextLong();
- for (int i=minUsedBits; i<58; ++i) {
+ for (int i = minUsedBits; i < 58; ++ i) {
buckets.add(new BucketId(i, randValue));
}
return Collections.unmodifiableList(buckets);
@@ -230,7 +231,7 @@ public class DistributionTestCase {
}
@Test
- public void testSplitBeyondSplitBitDoesntAffectDistribution() throws Exception {
+ public void testSplitBeyondSplitBitDoesntAffectDistribution() {
Random randomized = new Random(7123161);
long val = randomized.nextLong();
test = new DistributionTestFactory("abovesplitbit");
@@ -325,7 +326,7 @@ public class DistributionTestCase {
}
@Test
- public void testHierarchicalDistribution() throws Exception {
+ public void testHierarchicalDistribution() {
test = new DistributionTestFactory("hierarchical-grouping")
.setDistribution(buildHierarchicalConfig(6, 3, 1, "1|2|*", 3));
for (BucketId bucket : getTestBuckets()) {
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestFactory.java b/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestFactory.java
index 78b548e5925..e94e4f04199 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestFactory.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/distribution/DistributionTestFactory.java
@@ -21,7 +21,7 @@ import static org.junit.Assert.assertTrue;
// TODO: Use config builder instead of ConfigGetter to create test config.
public class DistributionTestFactory extends CrossPlatformTestFactory {
- ObjectMapper mapper = new ObjectMapper();
+ final ObjectMapper mapper = new ObjectMapper();
private static final String testDirectory = "src/tests/distribution/testdata";
private int redundancy;
@@ -32,14 +32,14 @@ public class DistributionTestFactory extends CrossPlatformTestFactory {
private String upStates;
private int testsRecorded = 0;
- private List<Test> results = new ArrayList<>();
+ private final List<Test> results = new ArrayList<>();
private int testsVerified = 0;
- enum Failure { NONE, TOO_FEW_BITS, NO_DISTRIBUTORS_AVAILABLE };
+ enum Failure { NONE, TOO_FEW_BITS, NO_DISTRIBUTORS_AVAILABLE }
static public class Test {
- private BucketId bucket;
- private List<Integer> nodes;
+ private final BucketId bucket;
+ private final List<Integer> nodes;
private Failure failure;
public Test(BucketId bucket) {
@@ -50,8 +50,7 @@ public class DistributionTestFactory extends CrossPlatformTestFactory {
@Override
public boolean equals(Object other) {
- if (!(other instanceof Test)) return false;
- Test t = (Test) other;
+ if (!(other instanceof Test t)) return false;
return (bucket.equals(t.bucket)
&& nodes.equals(t.nodes)
&& failure.equals(t.failure));
@@ -81,19 +80,14 @@ public class DistributionTestFactory extends CrossPlatformTestFactory {
return nodes;
}
- public Test assertFailure(Failure f) {
- assertEquals(f, failure);
- return this;
- }
public Test assertNodeCount(int count) {
if (count > 0) assertEquals(toString(), Failure.NONE, failure);
assertEquals(toString(), count, nodes.size());
return this;
}
- public Test assertNodeUsed(int node) {
+ public void assertNodeUsed(int node) {
assertEquals(toString(), Failure.NONE, failure);
assertTrue(toString(), nodes.contains(node));
- return this;
}
}
@@ -166,9 +160,7 @@ public class DistributionTestFactory extends CrossPlatformTestFactory {
int node = d.getIdealDistributorNode(state, bucket, upStates);
t.nodes.add(node);
} else {
- for (int i : d.getIdealStorageNodes(state, bucket, upStates)) {
- t.nodes.add(i);
- }
+ t.nodes.addAll(d.getIdealStorageNodes(state, bucket, upStates));
}
} catch (Distribution.TooFewBucketBitsInUseException e) {
t.failure = Failure.TOO_FEW_BITS;
@@ -184,7 +176,7 @@ public class DistributionTestFactory extends CrossPlatformTestFactory {
return t;
}
- public String serialize() throws Exception {
+ public String serialize() {
ObjectNode test = new ObjectNode(mapper.getNodeFactory())
.put("cluster-state", state.toString())
.put("distribution", new StorDistributionConfig(distributionConfig).toString())
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/distribution/GroupTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/distribution/GroupTestCase.java
index 353f2bf4ebc..ce9d4dcedff 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/distribution/GroupTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/distribution/GroupTestCase.java
@@ -31,7 +31,7 @@ public class GroupTestCase {
private void assertDistributionFailure(String spec, int redundancy, String expectedError) {
try{
- Group.Distribution distribution = new Group.Distribution(spec, redundancy);
+ new Group.Distribution(spec, redundancy);
fail("Failed to fail parsing of spec \"" + spec + "\", redundancy " + redundancy + " with failure: " + expectedError);
} catch (Exception e) {
assertEquals(expectedError, e.getMessage());
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
index 77dd37b3ebf..c4ff28b75b1 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
@@ -9,7 +9,9 @@ import java.util.function.BiFunction;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
public class ClusterStateTestCase{
@@ -55,16 +57,16 @@ public class ClusterStateTestCase{
assertEquals(state, new ClusterState("storage:0"));
assertEquals(state, new ClusterState("distributor:0"));
- assertFalse(state.equals(new ClusterState("version:1")));
- assertFalse(state.equals(new ClusterState("cluster:d")));
- assertFalse(state.equals(new ClusterState("bits:20")));
- assertFalse(state.equals(new ClusterState("storage:1")));
- assertFalse(state.equals(new ClusterState("distributor:1")));
+ assertNotEquals(state, new ClusterState("version:1"));
+ assertNotEquals(state, new ClusterState("cluster:d"));
+ assertNotEquals(state, new ClusterState("bits:20"));
+ assertNotEquals(state, new ClusterState("storage:1"));
+ assertNotEquals(state, new ClusterState("distributor:1"));
{
ClusterState state1 = new ClusterState("distributor:3 .1.s:d .2.s:m storage:3 .1.s:i .2.s:r");
ClusterState state2 = new ClusterState("distributor:3 .1.s:d .2.s:m storage:3 .1.s:i .2.s:m");
- assertFalse(state1.equals(state2));
+ assertNotEquals(state1, state2);
assertFalse(state1.similarTo(state2));
assertFalse(state1.similarToIgnoringInitProgress(state2));
}
@@ -72,7 +74,7 @@ public class ClusterStateTestCase{
{
ClusterState state1 = new ClusterState("cluster:d");
ClusterState state2 = new ClusterState("cluster:d version:1 bits:20 distributor:1 storage:1 .0.s:d");
- assertFalse(state1.equals(state2));
+ assertNotEquals(state1, state2);
assertTrue(state1.similarTo(state2));
assertTrue(state1.similarToIgnoringInitProgress(state2));
}
@@ -80,12 +82,12 @@ public class ClusterStateTestCase{
{
ClusterState state1 = new ClusterState("distributor:3 .1.s:d .2.s:m storage:3 .1.s:i .2.s:r");
ClusterState state2 = new ClusterState("distributor:3 storage:3");
- assertFalse(state1.equals(state2));
+ assertNotEquals(state1, state2);
assertFalse(state1.similarTo(state2));
assertFalse(state1.similarToIgnoringInitProgress(state2));
}
- assertFalse(state.equals("class not instance of ClusterState"));
+ assertNotEquals("class not instance of ClusterState", state);
assertFalse(state.similarTo("class not instance of ClusterState"));
assertEquals(state, state);
@@ -200,36 +202,39 @@ public class ClusterStateTestCase{
ClusterState state3 = new ClusterState("distributor:9 storage:2");
assertEquals("storage: [4: Down => Up, 5: Down => Up], distributor: [7: Up => Down, 8: Up => Down]", state1.getTextualDifference(state2));
- assertEquals("storage: [<br>\n" +
- "&nbsp;4: <b>Down</b> =&gt; <b>Up</b>, <br>\n" +
- "&nbsp;5: <b>Down</b> =&gt; <b>Up</b><br>\n" +
- "], distributor: [<br>\n" +
- "&nbsp;7: <b>Up</b> =&gt; <b>Down</b>, <br>\n" +
- "&nbsp;8: <b>Up</b> =&gt; <b>Down</b><br>\n" +
- "]", state1.getHtmlDifference(state2));
+ assertEquals("""
+ storage: [<br>
+ &nbsp;4: <b>Down</b> =&gt; <b>Up</b>, <br>
+ &nbsp;5: <b>Down</b> =&gt; <b>Up</b><br>
+ ], distributor: [<br>
+ &nbsp;7: <b>Up</b> =&gt; <b>Down</b>, <br>
+ &nbsp;8: <b>Up</b> =&gt; <b>Down</b><br>
+ ]""", state1.getHtmlDifference(state2));
assertEquals("storage: [2: Up => Down, 3: Up => Down, 4: Up => Down, 5: Up => Down], distributor: [7: Down => Up, 8: Down => Up]", state2.getTextualDifference(state3));
- assertEquals("storage: [<br>\n" +
- "&nbsp;2: <b>Up</b> =&gt; <b>Down</b>, <br>\n" +
- "&nbsp;3: <b>Up</b> =&gt; <b>Down</b>, <br>\n" +
- "&nbsp;4: <b>Up</b> =&gt; <b>Down</b>, <br>\n" +
- "&nbsp;5: <b>Up</b> =&gt; <b>Down</b><br>\n" +
- "], distributor: [<br>\n" +
- "&nbsp;7: <b>Down</b> =&gt; <b>Up</b>, <br>\n" +
- "&nbsp;8: <b>Down</b> =&gt; <b>Up</b><br>\n" +
- "]", state2.getHtmlDifference(state3));
+ assertEquals("""
+ storage: [<br>
+ &nbsp;2: <b>Up</b> =&gt; <b>Down</b>, <br>
+ &nbsp;3: <b>Up</b> =&gt; <b>Down</b>, <br>
+ &nbsp;4: <b>Up</b> =&gt; <b>Down</b>, <br>
+ &nbsp;5: <b>Up</b> =&gt; <b>Down</b><br>
+ ], distributor: [<br>
+ &nbsp;7: <b>Down</b> =&gt; <b>Up</b>, <br>
+ &nbsp;8: <b>Down</b> =&gt; <b>Up</b><br>
+ ]""", state2.getHtmlDifference(state3));
state1.setVersion(123);
state1.setNodeState(new Node(NodeType.STORAGE, 2), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.2f).setDescription("Booting"));
state2.setDistributionBits(21);
assertEquals("version: 123 => 0, bits: 16 => 21, storage: [2: [Initializing => Up, description: Booting => ], 4: Down => Up, 5: Down => Up], distributor: [7: Up => Down, 8: Up => Down]", state1.getTextualDifference(state2));
- assertEquals("version: 123 =&gt; 0, bits: 16 =&gt; 21, storage: [<br>\n" +
- "&nbsp;2: [<b>Initializing</b> =&gt; <b>Up</b>, description: Booting =&gt; ], <br>\n" +
- "&nbsp;4: <b>Down</b> =&gt; <b>Up</b>, <br>\n" +
- "&nbsp;5: <b>Down</b> =&gt; <b>Up</b><br>\n" +
- "], distributor: [<br>\n" +
- "&nbsp;7: <b>Up</b> =&gt; <b>Down</b>, <br>\n" +
- "&nbsp;8: <b>Up</b> =&gt; <b>Down</b><br>\n" +
- "]", state1.getHtmlDifference(state2));
+ assertEquals("""
+ version: 123 =&gt; 0, bits: 16 =&gt; 21, storage: [<br>
+ &nbsp;2: [<b>Initializing</b> =&gt; <b>Up</b>, description: Booting =&gt; ], <br>
+ &nbsp;4: <b>Down</b> =&gt; <b>Up</b>, <br>
+ &nbsp;5: <b>Down</b> =&gt; <b>Up</b><br>
+ ], distributor: [<br>
+ &nbsp;7: <b>Up</b> =&gt; <b>Down</b>, <br>
+ &nbsp;8: <b>Up</b> =&gt; <b>Down</b><br>
+ ]""", state1.getHtmlDifference(state2));
}
@Test
@@ -254,40 +259,40 @@ public class ClusterStateTestCase{
try {
new ClusterState("badtokenwithoutcolon");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState(".0.s:d");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("cluster:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("cluster:m");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("version:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("distributor:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("storage:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("distributor:2 .3.s:d");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
new ClusterState("storage:2 .3.s:d");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
}
@Test
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
index 1ce3655b394..3eff07e80b9 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
@@ -4,11 +4,12 @@ package com.yahoo.vdslib.state;
import org.junit.Test;
import java.text.ParseException;
-import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
public class NodeStateTestCase {
@@ -56,36 +57,36 @@ public class NodeStateTestCase {
assertEquals(ns, NodeState.deserialize(NodeType.STORAGE, ": s:m sbadkey:u bbadkey:2 cbadkey:2.0 rbadkey:2 ibadkey:0.5 tbadkey:2 mbadkey:message dbadkey:2 unknownkey:somevalue"));
try {
NodeState.deserialize(NodeType.STORAGE, "s:m badtokenwithoutcolon");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m c:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m i:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m t:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m t:-1");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m d:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m d.badkey:badvalue");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
try {
NodeState.deserialize(NodeType.STORAGE, "s:m d.1:badindex");
- assertTrue("Should fail", false);
- } catch (Exception e) {}
+ fail("Should fail");
+ } catch (Exception ignored) {}
ns = new NodeState(NodeType.STORAGE, State.UP).setDescription("Foo bar");
assertEquals("", ns.serialize(2, false));
@@ -127,11 +128,11 @@ public class NodeStateTestCase {
assertTrue(ns1.similarToIgnoringInitProgress(ns4));
assertTrue(ns2.similarToIgnoringInitProgress(ns4));
- assertFalse(ns1.equals(ns2));
- assertFalse(ns2.equals(ns3));
- assertFalse(ns3.equals(ns4));
+ assertNotEquals(ns1, ns2);
+ assertNotEquals(ns2, ns3);
+ assertNotEquals(ns3, ns4);
- assertFalse(ns1.equals("class not instance of NodeState"));
+ assertNotEquals("class not instance of NodeState", ns1);
assertFalse(ns1.similarTo("class not instance of NodeState"));
}
{
@@ -139,7 +140,7 @@ public class NodeStateTestCase {
NodeState ns2 = new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(18);
assertTrue(ns1.similarTo(ns2));
assertTrue(ns1.similarToIgnoringInitProgress(ns2));
- assertFalse(ns1.equals(ns2));
+ assertNotEquals(ns1, ns2);
}
}
@@ -163,12 +164,12 @@ public class NodeStateTestCase {
public void testValidInClusterState() {
try{
new NodeState(NodeType.DISTRIBUTOR, State.UNKNOWN).verifyValidInSystemState(NodeType.DISTRIBUTOR);
- assertTrue("Should not be valid", false);
- } catch (Exception e) {}
+ fail("Should not be valid");
+ } catch (Exception ignored) {}
try{
new NodeState(NodeType.DISTRIBUTOR, State.UP).setCapacity(3).verifyValidInSystemState(NodeType.DISTRIBUTOR);
- assertTrue("Should not be valid", false);
- } catch (Exception e) {}
+ fail("Should not be valid");
+ } catch (Exception ignored) {}
}
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeTest.java b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeTest.java
index 971b4782ab0..e3fc0faecd5 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeTest.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeTest.java
@@ -5,7 +5,9 @@ import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
public class NodeTest {
@@ -29,23 +31,23 @@ public class NodeTest {
assertEquals(n3, n3);
assertEquals(n4, n4);
- assertFalse(n1.equals(n2));
- assertFalse(n1.equals(n3));
- assertFalse(n1.equals(n4));
+ assertNotEquals(n1, n2);
+ assertNotEquals(n1, n3);
+ assertNotEquals(n1, n4);
- assertFalse(n2.equals(n1));
- assertFalse(n2.equals(n3));
- assertFalse(n2.equals(n4));
+ assertNotEquals(n2, n1);
+ assertNotEquals(n2, n3);
+ assertNotEquals(n2, n4);
- assertFalse(n3.equals(n1));
- assertFalse(n3.equals(n2));
- assertFalse(n3.equals(n4));
+ assertNotEquals(n3, n1);
+ assertNotEquals(n3, n2);
+ assertNotEquals(n3, n4);
- assertFalse(n4.equals(n1));
- assertFalse(n4.equals(n2));
- assertFalse(n4.equals(n3));
+ assertNotEquals(n4, n1);
+ assertNotEquals(n4, n2);
+ assertNotEquals(n4, n3);
- assertFalse(n1.equals("class not instance of Node"));
+ assertNotEquals("class not instance of Node", n1);
}
@Test
@@ -62,19 +64,19 @@ public class NodeTest {
try {
new Node("nodewithoutdot");
- assertTrue("Method expected to throw IllegalArgumentException", false);
+ fail("Method expected to throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertEquals("Not a legal node string 'nodewithoutdot'.", e.getMessage());
}
try {
new Node("fleetcontroller.0");
- assertTrue("Method expected to throw IllegalArgumentException", false);
+ fail("Method expected to throw IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertEquals("Unknown node type 'fleetcontroller'. Legal values are 'storage' and 'distributor'.", e.getMessage());
}
try {
new Node("storage.badindex");
- assertTrue("Method expected to throw NumberFormatException", false);
+ fail("Method expected to throw NumberFormatException");
} catch (NumberFormatException e) {
assertEquals("For input string: \"badindex\"", e.getMessage());
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
index 33991ef1a3b..41f54255d9d 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
@@ -20,9 +20,7 @@ import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
-import java.time.Instant;
import java.util.Base64;
-import java.util.List;
import java.util.Optional;
import static com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId.fromDottedString;
@@ -150,7 +148,7 @@ public class EntityBindingsMapper {
docEntity.unknownAttributes());
}
- public static String toIdentityDocmentData(IdentityDocument identityDocument) {
+ public static String toIdentityDocumentData(IdentityDocument identityDocument) {
IdentityDocumentEntity documentEntity = new IdentityDocumentEntity(
identityDocument.providerUniqueId().asDottedString(),
identityDocument.providerService().getFullName(),
@@ -160,7 +158,7 @@ public class EntityBindingsMapper {
identityDocument.ipAddresses(),
identityDocument.identityType().id(),
Optional.ofNullable(identityDocument.clusterType()).map(ClusterType::toConfigValue).orElse(null),
- identityDocument.ztsUrl(),
+ identityDocument.ztsUrl().toString(),
identityDocument.serviceIdentity().getFullName());
try {
byte[] bytes = mapper.writeValueAsBytes(documentEntity);
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
index 00d82177367..4bfff58b928 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.athenz.identityprovider.api;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
@@ -14,7 +15,7 @@ import java.util.Set;
*/
public record IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIdentity providerService, String configServerHostname,
String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- IdentityType identityType, ClusterType clusterType, String ztsUrl,
+ IdentityType identityType, ClusterType clusterType, URI ztsUrl,
AthenzIdentity serviceIdentity, Map<String, Object> unknownAttributes) {
public IdentityDocument {
@@ -30,7 +31,7 @@ public record IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIde
public IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIdentity providerService, String configServerHostname,
String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- IdentityType identityType, ClusterType clusterType, String ztsUrl,
+ IdentityType identityType, ClusterType clusterType, URI ztsUrl,
AthenzIdentity serviceIdentity) {
this(providerUniqueId, providerService, configServerHostname, instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, Map.of());
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
index 194854cfc3b..8970a74934a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
@@ -7,9 +7,9 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -20,7 +20,7 @@ import java.util.Set;
@JsonInclude(JsonInclude.Include.NON_NULL)
public record IdentityDocumentEntity(String providerUniqueId, String providerService,
String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- String identityType, String clusterType, String ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) {
+ String identityType, String clusterType, URI ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) {
@JsonCreator
public IdentityDocumentEntity(@JsonProperty("provider-unique-id") String providerUniqueId,
@@ -34,7 +34,7 @@ public record IdentityDocumentEntity(String providerUniqueId, String providerSer
@JsonProperty("zts-url") String ztsUrl,
@JsonProperty("service-identity") String serviceIdentity) {
this(providerUniqueId, providerService, configServerHostname,
- instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, new HashMap<>());
+ instanceHostname, createdAt, ipAddresses, identityType, clusterType, URI.create(ztsUrl), serviceIdentity, new HashMap<>());
}
@JsonProperty("provider-unique-id") @Override public String providerUniqueId() { return providerUniqueId; }
@@ -45,7 +45,7 @@ public record IdentityDocumentEntity(String providerUniqueId, String providerSer
@JsonProperty("ip-addresses") @Override public Set<String> ipAddresses() { return ipAddresses; }
@JsonProperty("identity-type") @Override public String identityType() { return identityType; }
@JsonProperty("cluster-type") @Override public String clusterType() { return clusterType; }
- @JsonProperty("zts-url") @Override public String ztsUrl() { return ztsUrl; }
+ @JsonProperty("zts-url") @Override public URI ztsUrl() { return ztsUrl; }
@JsonProperty("service-identity") @Override public String serviceIdentity() { return serviceIdentity; }
@JsonAnyGetter @Override public Map<String, Object> unknownAttributes() { return unknownAttributes; }
@JsonAnySetter public void set(String name, Object value) { unknownAttributes.put(name, value); }
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
index e00ab9978f6..9bf91eff60a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
@@ -7,6 +7,7 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
@@ -19,7 +20,7 @@ import java.util.Set;
public record LegacySignedIdentityDocumentEntity (
String signature, int signingKeyVersion, String providerUniqueId, String providerService, int documentVersion,
String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- String identityType, String clusterType, String ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) implements SignedIdentityDocumentEntity {
+ String identityType, String clusterType, URI ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) implements SignedIdentityDocumentEntity {
@JsonCreator
public LegacySignedIdentityDocumentEntity(@JsonProperty("signature") String signature,
@@ -36,7 +37,7 @@ public record LegacySignedIdentityDocumentEntity (
@JsonProperty("zts-url") String ztsUrl,
@JsonProperty("service-identity") String serviceIdentity) {
this(signature, signingKeyVersion, providerUniqueId, providerService, documentVersion, configServerHostname,
- instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, new HashMap<>());
+ instanceHostname, createdAt, ipAddresses, identityType, clusterType, URI.create(ztsUrl), serviceIdentity, new HashMap<>());
}
@JsonProperty("signature") @Override public String signature() { return signature; }
@@ -50,7 +51,7 @@ public record LegacySignedIdentityDocumentEntity (
@JsonProperty("ip-addresses") @Override public Set<String> ipAddresses() { return ipAddresses; }
@JsonProperty("identity-type") @Override public String identityType() { return identityType; }
@JsonProperty("cluster-type") @Override public String clusterType() { return clusterType; }
- @JsonProperty("zts-url") @Override public String ztsUrl() { return ztsUrl; }
+ @JsonProperty("zts-url") @Override public URI ztsUrl() { return ztsUrl; }
@JsonProperty("service-identity") @Override public String serviceIdentity() { return serviceIdentity; }
@JsonAnyGetter @Override public Map<String, Object> unknownAttributes() { return unknownAttributes; }
@JsonAnySetter public void set(String name, Object value) { unknownAttributes.put(name, value); }
diff --git a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
index 276815f263d..45963aaaeb3 100644
--- a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
+++ b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
@@ -15,15 +15,15 @@ import com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument;
import com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId;
import org.junit.jupiter.api.Test;
+import java.net.URI;
import java.security.KeyPair;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashSet;
-import java.util.List;
import static com.yahoo.vespa.athenz.identityprovider.api.IdentityType.TENANT;
-import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION;
import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION;
+import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -43,7 +43,7 @@ public class IdentityDocumentSignerTest {
private static final Instant createdAt = Instant.EPOCH;
private static final HashSet<String> ipAddresses = new HashSet<>(Arrays.asList("1.2.3.4", "::1"));
private static final ClusterType clusterType = ClusterType.CONTAINER;
- private static final String ztsUrl = "https://foo";
+ private static final URI ztsUrl = URI.create("https://foo");
private static final AthenzIdentity serviceIdentity = new AthenzService("vespa", "node");
@Test
@@ -67,7 +67,7 @@ public class IdentityDocumentSignerTest {
IdentityDocument identityDocument = new IdentityDocument(
id, providerService, configserverHostname,
instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity);
- String data = EntityBindingsMapper.toIdentityDocmentData(identityDocument);
+ String data = EntityBindingsMapper.toIdentityDocumentData(identityDocument);
String signature =
signer.generateSignature(data, keyPair.getPrivate());
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index a66aec154d9..9b3dbf64be3 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -89,7 +89,7 @@ org.apache.commons:commons-math3:3.6.1
org.apache.curator:curator-client:5.4.0
org.apache.curator:curator-framework:5.4.0
org.apache.curator:curator-recipes:5.4.0
-org.apache.felix:org.apache.felix.framework:7.0.1
+org.apache.felix:org.apache.felix.framework:7.0.5
org.apache.felix:org.apache.felix.log:1.0.1
org.apache.httpcomponents:httpclient:4.5.14
org.apache.httpcomponents:httpcore:4.4.16
diff --git a/vespa-documentgen-plugin/etc/complex/book.sd b/vespa-documentgen-plugin/etc/complex/book.sd
index addc556fc9b..8f071ab63d1 100644
--- a/vespa-documentgen-plugin/etc/complex/book.sd
+++ b/vespa-documentgen-plugin/etc/complex/book.sd
@@ -47,18 +47,15 @@ search book {
field author type string {
bolding: on
- # index-to: default, author
indexing: summary | index
}
field isbn type string {
- # index-to: default, isbn
indexing: summary | index
}
field year type int {
indexing: summary | index
}
field description type string {
- # index-to: default, description
indexing: summary | index
summary: dynamic
}
diff --git a/vespa-documentgen-plugin/etc/complex/common.sd b/vespa-documentgen-plugin/etc/complex/common.sd
index d07f8ca9281..3ab6e18c267 100644
--- a/vespa-documentgen-plugin/etc/complex/common.sd
+++ b/vespa-documentgen-plugin/etc/complex/common.sd
@@ -6,7 +6,6 @@ search common {
}
field title type string {
bolding: on
- # index-to: default, title
indexing: index|summary
summary-to: smallsum
}
diff --git a/vespa-documentgen-plugin/etc/complex/music2.sd b/vespa-documentgen-plugin/etc/complex/music2.sd
index 327fbeec04b..17736ab4e79 100644
--- a/vespa-documentgen-plugin/etc/complex/music2.sd
+++ b/vespa-documentgen-plugin/etc/complex/music2.sd
@@ -3,19 +3,16 @@ search music2 {
document music2 inherits common {
field artist type string {
bolding: on
- # index-to: default, artist
indexing: index|summary
}
field disp_song type string {
indexing: summary
}
field song type string {
- # index-to: default, song
indexing: index
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/vespa-documentgen-plugin/etc/localapp/book.sd b/vespa-documentgen-plugin/etc/localapp/book.sd
index a6f1ed9286f..8d7dd1e054e 100644
--- a/vespa-documentgen-plugin/etc/localapp/book.sd
+++ b/vespa-documentgen-plugin/etc/localapp/book.sd
@@ -46,18 +46,15 @@ search book {
field author type string {
bolding: on
- # index-to: default, author
indexing: summary | index
}
field isbn type string {
- # index-to: default, isbn
indexing: summary | index
}
field year type int {
indexing: summary | index
}
field description type string {
- # index-to: default, description
indexing: summary | index
summary: dynamic
}
diff --git a/vespa-documentgen-plugin/etc/localapp/common.sd b/vespa-documentgen-plugin/etc/localapp/common.sd
index ab55e08af0f..a7828432c6b 100644
--- a/vespa-documentgen-plugin/etc/localapp/common.sd
+++ b/vespa-documentgen-plugin/etc/localapp/common.sd
@@ -6,7 +6,6 @@ search common {
}
field title type string {
bolding: on
- # index-to: default, title
indexing: index|summary
summary-to: smallsum
}
diff --git a/vespa-documentgen-plugin/etc/localapp/music.sd b/vespa-documentgen-plugin/etc/localapp/music.sd
index cdcfdea6b75..0ce004857c2 100644
--- a/vespa-documentgen-plugin/etc/localapp/music.sd
+++ b/vespa-documentgen-plugin/etc/localapp/music.sd
@@ -3,19 +3,16 @@ search music {
document music inherits common {
field artist type string {
bolding: on
- # index-to: default, artist
indexing: index|summary
}
field disp_song type string {
indexing: summary
}
field song type string {
- # index-to: default, song
indexing: index
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/vespa-documentgen-plugin/etc/localapp/video.sd b/vespa-documentgen-plugin/etc/localapp/video.sd
index 2f1f1f84512..03c5c82ed64 100644
--- a/vespa-documentgen-plugin/etc/localapp/video.sd
+++ b/vespa-documentgen-plugin/etc/localapp/video.sd
@@ -3,26 +3,21 @@ search video {
document video inherits common {
field director type string {
bolding: on
- # index-to: default, director
indexing: index|summary
}
field disp_actor type string {
bolding: on
- # index-to: default, disp_actor
indexing: index|summary
}
field actor type string {
bolding: on
- # index-to: default, actor
indexing: index|summary
}
field fmt type string {
- # index-to: default, fmt
indexing: index|summary
}
field isbn type string {
bolding: on
- # index-to: default, isbn
indexing: index|summary
}
field year type int {
diff --git a/vespa-enforcer-extensions/pom.xml b/vespa-enforcer-extensions/pom.xml
index 96df2c2ac33..4c006cb393f 100644
--- a/vespa-enforcer-extensions/pom.xml
+++ b/vespa-enforcer-extensions/pom.xml
@@ -16,15 +16,13 @@
<dependencies>
<dependency>
- <groupId>org.apache.maven.enforcer</groupId>
- <artifactId>enforcer-api</artifactId>
- <version>3.0.0</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
<groupId>org.apache.maven.shared</groupId>
<artifactId>maven-dependency-tree</artifactId>
- <version>3.1.1</version>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.maven.enforcer</groupId>
+ <artifactId>enforcer-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
@@ -50,6 +48,22 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-shade-plugin</artifactId>
+ <executions>
+ <execution>
+ <phase>package</phase>
+ <goals>
+ <goal>shade</goal>
+ </goals>
+ <configuration>
+ <createDependencyReducedPom>false</createDependencyReducedPom>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
</plugins>
</build>
diff --git a/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependencies.java b/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependencies.java
index 4ff363a3e4e..05ae7005c1f 100644
--- a/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependencies.java
+++ b/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependencies.java
@@ -31,6 +31,7 @@ import java.util.regex.Pattern;
*
* @author bjorncs
*/
+@SuppressWarnings("deprecation")
public class EnforceDependencies implements EnforcerRule {
private List<String> allowedDependencies = List.of();
diff --git a/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependenciesAllProjects.java b/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependenciesAllProjects.java
index 3787d84e4be..3db1019a2b1 100644
--- a/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependenciesAllProjects.java
+++ b/vespa-enforcer-extensions/src/main/java/com/yahoo/vespa/maven/plugin/enforcer/EnforceDependenciesAllProjects.java
@@ -38,6 +38,7 @@ import java.util.stream.Stream;
/**
* @author bjorncs
*/
+@SuppressWarnings("deprecation")
public class EnforceDependenciesAllProjects implements EnforcerRule {
private static final String WRITE_SPEC_PROP = "dependencyEnforcer.writeSpec";
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
index 7b7d8712308..197b7721eca 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
@@ -42,7 +42,7 @@ public class FeedClientBuilderImpl implements FeedClientBuilder {
SSLContext sslContext;
HostnameVerifier hostnameVerifier;
int connectionsPerEndpoint = 8;
- int maxStreamsPerConnection = 32;
+ int maxStreamsPerConnection = 128;
FeedClient.RetryStrategy retryStrategy = defaultRetryStrategy;
FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Duration.ofSeconds(10));
Path certificateFile;
@@ -201,7 +201,6 @@ public class FeedClientBuilderImpl implements FeedClientBuilder {
@Override
public FeedClientBuilderImpl setProxy(URI uri) {
- log.warning("Proxy configuration ignored - not supported yet");
this.proxy = uri;
return this;
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
index c2181821de6..f228717eba5 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpFeedClient.java
@@ -321,7 +321,7 @@ class HttpFeedClient implements FeedClient {
.map(Boolean::parseBoolean)
.orElse(Optional.ofNullable(System.getProperty(name))
.map(Boolean::parseBoolean)
- .orElse(false));
+ .orElse(true));
}
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
index 9f5523b062c..30dc1ab0d07 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
@@ -5,11 +5,15 @@ package ai.vespa.feed.client.impl;
import ai.vespa.feed.client.FeedClientBuilder.Compression;
import ai.vespa.feed.client.HttpResponse;
import org.eclipse.jetty.client.HttpClient;
+import org.eclipse.jetty.client.HttpProxy;
+import org.eclipse.jetty.client.MultiplexConnectionPool;
+import org.eclipse.jetty.client.Origin;
+import org.eclipse.jetty.client.api.Authentication;
import org.eclipse.jetty.client.api.Request;
import org.eclipse.jetty.client.api.Response;
import org.eclipse.jetty.client.api.Result;
-import org.eclipse.jetty.client.util.AbstractRequestContent;
import org.eclipse.jetty.client.util.BufferingResponseListener;
+import org.eclipse.jetty.client.util.BytesRequestContent;
import org.eclipse.jetty.http.HttpField;
import org.eclipse.jetty.http.HttpHeader;
import org.eclipse.jetty.http.HttpMethod;
@@ -17,8 +21,8 @@ import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.http2.client.HTTP2Client;
import org.eclipse.jetty.http2.client.http.HttpClientTransportOverHTTP2;
import org.eclipse.jetty.io.ClientConnector;
-import org.eclipse.jetty.util.Callback;
import org.eclipse.jetty.util.HttpCookieStore;
+import org.eclipse.jetty.util.Pool;
import org.eclipse.jetty.util.Promise;
import org.eclipse.jetty.util.SocketAddressResolver;
import org.eclipse.jetty.util.component.AbstractLifeCycle;
@@ -31,12 +35,14 @@ import java.io.UncheckedIOException;
import java.net.Inet4Address;
import java.net.InetSocketAddress;
import java.net.URI;
-import java.nio.ByteBuffer;
import java.time.Duration;
+import java.util.Collections;
import java.util.List;
-import java.util.Optional;
+import java.util.Map;
+import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.zip.GZIPOutputStream;
@@ -67,25 +73,46 @@ class JettyCluster implements Cluster {
@Override
public void dispatch(HttpRequest req, CompletableFuture<HttpResponse> vessel) {
- Endpoint endpoint = findLeastBusyEndpoint(endpoints);
- long reqTimeoutMillis = req.timeout() != null
- ? req.timeout().toMillis() * 11 / 10 + 1000 : IDLE_TIMEOUT.toMillis();
- Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
- .version(HttpVersion.HTTP_2)
- .method(HttpMethod.fromString(req.method()))
- .headers(hs -> req.headers().forEach((k, v) -> hs.add(k, v.get())))
- .idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
- .timeout(reqTimeoutMillis, MILLISECONDS);
- if (req.body() != null) {
- FeedContent content = new FeedContent(compression, req.body());
- content.contentEncoding().ifPresent(ce -> jettyReq.headers(hs -> hs.add(ce)));
- jettyReq.body(content);
- }
- jettyReq.send(new BufferingResponseListener() {
- @Override
- public void onComplete(Result result) {
- if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
- else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
+ client.getExecutor().execute(() -> {
+ Endpoint endpoint = findLeastBusyEndpoint(endpoints);
+ try {
+ endpoint.inflight.incrementAndGet();
+ long reqTimeoutMillis = req.timeout() != null
+ ? req.timeout().toMillis() * 11 / 10 + 1000 : IDLE_TIMEOUT.toMillis();
+ Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
+ .version(HttpVersion.HTTP_2)
+ .method(HttpMethod.fromString(req.method()))
+ .headers(hs -> req.headers().forEach((k, v) -> {
+ if (!isProxyHeader(k)) hs.add(k, v.get());
+ }))
+ .idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
+ .timeout(reqTimeoutMillis, MILLISECONDS);
+ if (req.body() != null) {
+ boolean shouldCompress = compression == gzip || compression == auto && req.body().length > 512;
+ byte[] bytes;
+ if (shouldCompress) {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10);
+ try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) {
+ zip.write(req.body());
+ } catch (IOException e) { throw new UncheckedIOException(e); }
+ bytes = buffer.toByteArray();
+ jettyReq.headers(hs -> hs.add(HttpHeader.CONTENT_ENCODING, "gzip"));
+ } else {
+ bytes = req.body();
+ }
+ jettyReq.body(new BytesRequestContent(APPLICATION_JSON.asString(), bytes));
+ }
+ jettyReq.send(new BufferingResponseListener() {
+ @Override
+ public void onComplete(Result result) {
+ endpoint.inflight.decrementAndGet();
+ if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
+ else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
+ }
+ });
+ } catch (Exception e) {
+ endpoint.inflight.decrementAndGet();
+ vessel.completeExceptionally(e);
}
});
}
@@ -106,31 +133,68 @@ class JettyCluster implements Cluster {
clientSslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
ClientConnector connector = new ClientConnector();
- int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 16), 4);
+ int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 32), 8);
connector.setExecutor(new QueuedThreadPool(threads));
connector.setSslContextFactory(clientSslCtxFactory);
+ connector.setIdleTimeout(IDLE_TIMEOUT);
+ connector.setConnectTimeout(Duration.ofSeconds(10));
HTTP2Client h2Client = new HTTP2Client(connector);
h2Client.setMaxConcurrentPushedStreams(b.maxStreamsPerConnection);
- HttpClient httpClient = new HttpClient(new HttpClientTransportOverHTTP2(h2Client));
- httpClient.setMaxConnectionsPerDestination(b.connectionsPerEndpoint);
+ // Set the HTTP/2 flow control windows very large to cause TCP congestion instead of HTTP/2 flow control congestion.
+ int initialWindow = Integer.MAX_VALUE;
+ h2Client.setInitialSessionRecvWindow(initialWindow);
+ h2Client.setInitialStreamRecvWindow(initialWindow);
+ HttpClientTransportOverHTTP2 transport = new HttpClientTransportOverHTTP2(h2Client);
+ transport.setConnectionPoolFactory(dest -> {
+ MultiplexConnectionPool pool = new MultiplexConnectionPool(
+ dest, Pool.StrategyType.RANDOM, b.connectionsPerEndpoint, false, dest, Integer.MAX_VALUE);
+ pool.preCreateConnections(b.connectionsPerEndpoint);
+ return pool;
+ });
+ HttpClient httpClient = new HttpClient(transport);
httpClient.setFollowRedirects(false);
httpClient.setUserAgentField(
new HttpField(HttpHeader.USER_AGENT, String.format("vespa-feed-client/%s (Jetty)", Vespa.VERSION)));
- httpClient.setMaxRequestsQueuedPerDestination(Integer.MAX_VALUE);
- httpClient.setConnectTimeout(Duration.ofSeconds(10).toMillis());
// Stop client from trying different IP address when TLS handshake fails
httpClient.setSocketAddressResolver(new Ipv4PreferringResolver(httpClient, Duration.ofSeconds(10)));
httpClient.setCookieStore(new HttpCookieStore.Empty());
- httpClient.setIdleTimeout(IDLE_TIMEOUT.toMillis());
- try {
- httpClient.start();
- } catch (Exception e) {
- throw new IOException(e);
- }
+ if (b.proxy != null) addProxyConfiguration(b, httpClient);
+ try { httpClient.start(); } catch (Exception e) { throw new IOException(e); }
return httpClient;
}
+ private static void addProxyConfiguration(FeedClientBuilderImpl b, HttpClient httpClient) throws IOException {
+ Origin.Address address = new Origin.Address(b.proxy.getHost(), b.proxy.getPort());
+ if (b.proxy.getScheme().equals("https")) {
+ SslContextFactory.Client proxySslCtxFactory = new SslContextFactory.Client();
+ if (b.hostnameVerifier != null) proxySslCtxFactory.setHostnameVerifier(b.hostnameVerifier);
+ // Disable built-in hostname verification in the JDK's TLS implementation
+ proxySslCtxFactory.setEndpointIdentificationAlgorithm(null);
+ try { proxySslCtxFactory.start(); } catch (Exception e) { throw new IOException(e); }
+ httpClient.getProxyConfiguration().addProxy(
+ new HttpProxy(address, proxySslCtxFactory, new Origin.Protocol(Collections.singletonList("h2"), false)));
+ } else {
+ httpClient.getProxyConfiguration().addProxy(
+ new HttpProxy(address, false, new Origin.Protocol(Collections.singletonList("h2c"), false)));
+ }
+ Map<String, Supplier<String>> proxyHeaders = new TreeMap<>();
+ b.requestHeaders.forEach((k, v) -> { if (isProxyHeader(k)) proxyHeaders.put(k, v); });
+ if (!proxyHeaders.isEmpty()) {
+ for (URI endpoint : b.endpoints) {
+ httpClient.getAuthenticationStore().addAuthenticationResult(new Authentication.Result() {
+ @Override public URI getURI() { return URI.create(endpointUri(endpoint)); }
+ @Override public void apply(Request r) {
+ r.headers(hs -> proxyHeaders.forEach((k, v) -> hs.add(k, v.get())));
+ }
+ });
+
+ }
+ }
+ }
+
+ private static boolean isProxyHeader(String h) { return h.equalsIgnoreCase(HttpHeader.PROXY_AUTHORIZATION.asString()); }
+
private static Endpoint findLeastBusyEndpoint(List<Endpoint> endpoints) {
Endpoint leastBusy = endpoints.get(0);
int minInflight = leastBusy.inflight.get();
@@ -149,6 +213,10 @@ class JettyCluster implements Cluster {
return u.getPort() == -1 ? u.getScheme().equals("http") ? 80 : 443 : u.getPort();
}
+ private static String endpointUri(URI uri) {
+ return String.format("%s://%s:%s", uri.getScheme(), uri.getHost(), portOf(uri));
+ }
+
private static class JettyResponse implements HttpResponse {
final Response response;
final byte[] content;
@@ -163,50 +231,7 @@ class JettyCluster implements Cluster {
private static class Endpoint {
final AtomicInteger inflight = new AtomicInteger();
final String uri;
- Endpoint(URI uri) { this.uri = String.format("%s://%s:%s", uri.getScheme(), uri.getHost(), portOf(uri)); }
- }
-
- private static class FeedContent extends AbstractRequestContent {
- final Compression compression;
- final byte[] body;
-
- FeedContent(Compression compression, byte[] body) {
- super(APPLICATION_JSON.asString());
- this.compression = compression;
- this.body = body;
- }
-
- @Override public boolean isReproducible() { return true; }
- @Override public long getLength() { return shouldCompress() ? -1 : body.length; }
- Optional<HttpField> contentEncoding() {
- return shouldCompress() ? Optional.of(new HttpField(HttpHeader.CONTENT_ENCODING, "gzip")) : Optional.empty();
- }
-
- @Override
- public Subscription newSubscription(Consumer consumer, boolean emitInitialContent) {
- return new SubscriptionImpl(consumer, emitInitialContent);
- }
-
- boolean shouldCompress() { return compression == gzip || compression == auto && body.length > 512; }
-
- class SubscriptionImpl extends AbstractSubscription {
- SubscriptionImpl(Consumer consumer, boolean emitInitialContent) { super(consumer, emitInitialContent); }
-
- @Override
- protected boolean produceContent(Producer producer) {
- byte[] bytes;
- if (shouldCompress()) {
- ByteArrayOutputStream buffer = new ByteArrayOutputStream(1 << 10);
- try (GZIPOutputStream zip = new GZIPOutputStream(buffer)) {
- zip.write(body);
- } catch (IOException e) { throw new UncheckedIOException(e); }
- bytes = buffer.toByteArray();
- } else {
- bytes = body;
- }
- return producer.produce(ByteBuffer.wrap(bytes), true, Callback.NOOP);
- }
- }
+ Endpoint(URI uri) { this.uri = endpointUri(uri); }
}
private static class Ipv4PreferringResolver extends AbstractLifeCycle implements SocketAddressResolver {
diff --git a/vespaclient-core/src/main/java/com/yahoo/clientmetrics/MessageTypeMetricSet.java b/vespaclient-core/src/main/java/com/yahoo/clientmetrics/MessageTypeMetricSet.java
index 46ad5ebfab6..8b798d4b76e 100644
--- a/vespaclient-core/src/main/java/com/yahoo/clientmetrics/MessageTypeMetricSet.java
+++ b/vespaclient-core/src/main/java/com/yahoo/clientmetrics/MessageTypeMetricSet.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.clientmetrics;
+import com.yahoo.concurrent.Timer;
import com.yahoo.documentapi.messagebus.protocol.DocumentIgnoredReply;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
import com.yahoo.messagebus.Reply;
@@ -12,21 +13,24 @@ import java.util.Map;
import java.util.stream.Stream;
/**
-* @author thomasg
+* @author Thomas Gundersen
*/
public class MessageTypeMetricSet {
+
public long latency_total;
public long latency_min = Long.MAX_VALUE;
public long latency_max = Long.MIN_VALUE;
public long count = 0;
public long ignored = 0;
public long errorCount = 0;
+ public final Timer timer;
private final Map<String, Long> errorCounts = new HashMap<>();
private final String msgName;
- public MessageTypeMetricSet(String msgName) {
+ MessageTypeMetricSet(String msgName, Timer timer) {
this.msgName = msgName;
+ this.timer = timer;
}
public String getMessageName() {
@@ -55,7 +59,7 @@ public class MessageTypeMetricSet {
private void updateSuccessMetrics(Reply r) {
if (!(r instanceof DocumentIgnoredReply)) {
if (r.getMessage().getTimeReceived() != 0) {
- long latency = (SystemTimer.INSTANCE.milliTime() - r.getMessage().getTimeReceived());
+ long latency = (timer.milliTime() - r.getMessage().getTimeReceived());
latency_max = Math.max(latency_max, latency);
latency_min = Math.min(latency_min, latency);
latency_total += latency;
diff --git a/vespaclient-core/src/main/java/com/yahoo/clientmetrics/RouteMetricSet.java b/vespaclient-core/src/main/java/com/yahoo/clientmetrics/RouteMetricSet.java
index ebf6246b034..61fd5dfdca3 100644
--- a/vespaclient-core/src/main/java/com/yahoo/clientmetrics/RouteMetricSet.java
+++ b/vespaclient-core/src/main/java/com/yahoo/clientmetrics/RouteMetricSet.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.clientmetrics;
+import com.yahoo.concurrent.SystemTimer;
+import com.yahoo.concurrent.Timer;
import com.yahoo.messagebus.Reply;
import java.util.HashMap;
@@ -12,6 +14,7 @@ import java.util.Map;
public class RouteMetricSet {
private final String route;
+ private final Timer timer;
private final ProgressCallback callback;
private final Map<Integer, MessageTypeMetricSet> typeMap = new HashMap<>();
@@ -20,18 +23,23 @@ public class RouteMetricSet {
void done(RouteMetricSet route);
}
- public RouteMetricSet(String route, ProgressCallback callback) {
+ public RouteMetricSet(String route, Timer timer, ProgressCallback callback) {
this.route = route;
+ this.timer = timer;
this.callback = callback;
}
+ public RouteMetricSet(String route, ProgressCallback callback) {
+ this(route, SystemTimer.INSTANCE, callback);
+ }
+
public Map<Integer, MessageTypeMetricSet> getMetrics() { return typeMap; }
public void addReply(Reply r) {
MessageTypeMetricSet type = typeMap.get(r.getMessage().getType());
if (type == null) {
String msgName = r.getMessage().getClass().getSimpleName().replace("Message", "");
- type = new MessageTypeMetricSet(msgName);
+ type = new MessageTypeMetricSet(msgName, timer);
typeMap.put(r.getMessage().getType(), type);
}
diff --git a/vespaclient-java/src/test/java/com/yahoo/vespafeeder/BenchmarkProgressPrinterTest.java b/vespaclient-java/src/test/java/com/yahoo/vespafeeder/BenchmarkProgressPrinterTest.java
index 6eba29fe9cb..d5244e97118 100644
--- a/vespaclient-java/src/test/java/com/yahoo/vespafeeder/BenchmarkProgressPrinterTest.java
+++ b/vespaclient-java/src/test/java/com/yahoo/vespafeeder/BenchmarkProgressPrinterTest.java
@@ -20,11 +20,11 @@ public class BenchmarkProgressPrinterTest {
ByteArrayOutputStream output = new ByteArrayOutputStream();
ManualTimer timer = new ManualTimer();
BenchmarkProgressPrinter printer = new BenchmarkProgressPrinter(timer, new PrintStream(output));
- RouteMetricSet metrics = new RouteMetricSet("foobar", printer);
+ RouteMetricSet metrics = new RouteMetricSet("foobar", timer, printer);
{
EmptyReply reply = new EmptyReply();
- reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(1));
+ reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
@@ -32,13 +32,13 @@ public class BenchmarkProgressPrinterTest {
{
EmptyReply reply = new EmptyReply();
- reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(2));
+ reply.setMessage(PutDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
{
EmptyReply reply = new EmptyReply();
- reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(3));
+ reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
metrics.addReply(reply);
}
@@ -46,7 +46,7 @@ public class BenchmarkProgressPrinterTest {
{
EmptyReply reply = new EmptyReply();
- reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(4));
+ reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
reply.addError(new com.yahoo.messagebus.Error(32, "foo"));
metrics.addReply(reply);
}
@@ -55,7 +55,7 @@ public class BenchmarkProgressPrinterTest {
{
EmptyReply reply = new EmptyReply();
- reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(5));
+ reply.setMessage(UpdateDocumentMessage.createEmpty().setTimeReceived(-1));
reply.addError(new com.yahoo.messagebus.Error(64, "bar"));
metrics.addReply(reply);
}
diff --git a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
index fcbc89307b8..3e9d704e11c 100644
--- a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
+++ b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
@@ -194,14 +194,14 @@ public class Compressor {
public long warmup(double seconds) {
byte [] input = new byte[0x4000];
new Random().nextBytes(input);
- long timeDone = System.nanoTime() + (long)(seconds*1000000000);
+ long startTime = System.nanoTime();
long compressedBytes = 0;
byte [] decompressed = new byte [input.length];
LZ4FastDecompressor fastDecompressor = factory.fastDecompressor();
LZ4SafeDecompressor safeDecompressor = factory.safeDecompressor();
LZ4Compressor fastCompressor = factory.fastCompressor();
LZ4Compressor highCompressor = factory.highCompressor();
- while (System.nanoTime() < timeDone) {
+ while (System.nanoTime() - startTime < seconds * 1e9) {
byte [] compressedFast = fastCompressor.compress(input);
byte [] compressedHigh = highCompressor.compress(input);
fastDecompressor.decompress(compressedFast, decompressed);
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/SystemTimer.java b/vespajlib/src/main/java/com/yahoo/concurrent/SystemTimer.java
index 8111d52a10f..c2fca806a85 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/SystemTimer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/SystemTimer.java
@@ -42,21 +42,18 @@ public enum SystemTimer implements Timer {
SystemTimer() {
long napTime = adjustTimeoutByDetectedHz(Duration.ofMillis(1)).toMillis();
- millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- Thread thread = new Thread() {
-
- @Override
- public void run() {
- while (true) {
- millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- try {
- Thread.sleep(napTime);
- } catch (InterruptedException e) {
- break;
- }
+ long creationNanos = System.nanoTime();
+ millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - creationNanos);
+ Thread thread = new Thread(() -> {
+ while (true) {
+ millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - creationNanos);
+ try {
+ Thread.sleep(napTime);
+ } catch (InterruptedException e) {
+ break;
}
}
- };
+ });
thread.setDaemon(true);
thread.setName("vespa-system-timer");
thread.start();
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/Timer.java b/vespajlib/src/main/java/com/yahoo/concurrent/Timer.java
index c41c762c989..9328039aae6 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/Timer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/Timer.java
@@ -20,16 +20,8 @@ public interface Timer {
* @return The current value of the timer, in milliseconds.
*/
long milliTime();
- Timer monotonic = () -> TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- static Timer wrap(Clock original) {
- return new Timer() {
- private final Clock clock = original;
-
- @Override
- public long milliTime() {
- return clock.millis();
- }
- }; }
-
+ long creationNanos = System.nanoTime(); // Avoid monotonic timer overflow for the first 146 years of JVM uptime.
+ Timer monotonic = () -> TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - creationNanos);
+ static Timer wrap(Clock original) { return original::millis; }
default Instant instant() { return Instant.ofEpochMilli(milliTime()); }
}
diff --git a/vespajlib/src/main/java/com/yahoo/yolean/Exceptions.java b/vespajlib/src/main/java/com/yahoo/yolean/Exceptions.java
index 4f3f048eb0c..c8564a9dac5 100644
--- a/vespajlib/src/main/java/com/yahoo/yolean/Exceptions.java
+++ b/vespajlib/src/main/java/com/yahoo/yolean/Exceptions.java
@@ -27,6 +27,7 @@ public class Exceptions {
for (; t != null; t = t.getCause()) {
message = getMessage(t);
if (message == null) continue;
+ if (message.isEmpty()) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
diff --git a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
index b842d009ce8..2c5af5c02ec 100644
--- a/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
+++ b/vespalib/src/tests/util/rcuvector/rcuvector_test.cpp
@@ -10,6 +10,7 @@
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <random>
+#include <thread>
using namespace vespalib;
@@ -20,6 +21,18 @@ using vespalib::makeLambdaTask;
using MyMemoryAllocator = vespalib::alloc::test::MemoryAllocatorObserver;
using AllocStats = MyMemoryAllocator::Stats;
+namespace {
+
+void consider_yield(uint32_t i)
+{
+ if ((i % 1111) == 0) {
+ // Need to yield sometimes to avoid livelock when running unit test with valgrind
+ std::this_thread::yield();
+ }
+}
+
+}
+
bool
assertUsage(const MemoryUsage & exp, const MemoryUsage & act)
{
@@ -452,12 +465,15 @@ StressFixture::read_work()
std::mt19937 gen(rd());
std::uniform_int_distribution<uint32_t> distrib(0, read_area - 1);
std::vector<int> old(read_area);
+ uint32_t i = 0;
while (!stop_read.load(std::memory_order_relaxed)) {
uint32_t idx = distrib(gen);
auto guard = generation_handler.takeGuard();
int value = arr.acquire_elem_ref(idx).load_acquire();
EXPECT_LE(old[idx], value);
old[idx] = value;
+ consider_yield(i);
+ ++i;
}
}
@@ -478,6 +494,7 @@ StressFixture::write_work(uint32_t cnt)
uint32_t idx = distrib(gen);
arr[idx].store_release(arr[idx].load_relaxed() + 1);
commit();
+ consider_yield(i);
}
}