aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bundle-plugin-test/test-bundles/main/pom.xml1
-rw-r--r--client/js/app/yarn.lock253
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java19
-rw-r--r--config-model/src/main/javacc/SchemaParser.jj10
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java38
-rw-r--r--config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java2
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java10
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java13
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java5
-rw-r--r--container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java96
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java21
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificates.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java18
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java102
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java6
-rw-r--r--jdisc_core/pom.xml2
-rw-r--r--jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java129
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java53
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java19
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java35
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java101
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java40
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java8
-rw-r--r--parent/pom.xml2
-rw-r--r--renovate.json20
-rw-r--r--searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp72
-rw-r--r--searchlib/src/tests/query/streaming_query_large_test.cpp6
-rw-r--r--searchlib/src/vespa/searchcommon/common/schema.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/group.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/grouping.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributecontext.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributemanager.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingchange.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/diskindex.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp17
-rw-r--r--searchlib/src/vespa/searchlib/docstore/chunk.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/engine/propertiesmap.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/expression/resultvector.h10
-rw-r--r--searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/fef/objectstore.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/fef/properties.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/fef/tablemanager.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/fef/test/rankresult.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/grouping/collect.h2
-rw-r--r--searchlib/src/vespa/searchlib/grouping/groupengine.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistparams.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp98
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp7
-rw-r--r--socket_test/pom.xml2
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/rankmanager.h4
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp6
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/common/document.cpp6
-rw-r--r--streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp20
-rw-r--r--streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp7
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp4
-rw-r--r--streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp14
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java6
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java5
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java8
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java7
-rw-r--r--vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java8
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java63
-rw-r--r--vespajlib/src/main/java/com/yahoo/compress/Compressor.java4
110 files changed, 1140 insertions, 777 deletions
diff --git a/bundle-plugin-test/test-bundles/main/pom.xml b/bundle-plugin-test/test-bundles/main/pom.xml
index 21399291442..c4dd3407607 100644
--- a/bundle-plugin-test/test-bundles/main/pom.xml
+++ b/bundle-plugin-test/test-bundles/main/pom.xml
@@ -24,7 +24,6 @@
<!-- Added to verify that module-info.class can be handled by bundle-plugin without throwing an exception. -->
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
- <version>2.3.0</version>
</dependency>
</dependencies>
<build>
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index 3e180a63226..ccade9c00cd 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -17,12 +17,24 @@
dependencies:
"@babel/highlight" "^7.18.6"
+"@babel/code-frame@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.5.tgz#234d98e1551960604f1246e6475891a570ad5658"
+ integrity sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==
+ dependencies:
+ "@babel/highlight" "^7.22.5"
+
"@babel/compat-data@^7.22.0":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.3.tgz#cd502a6a0b6e37d7ad72ce7e71a7160a3ae36f7e"
integrity sha512-aNtko9OPOwVESUFp3MZfD8Uzxl7JzSeJpd7npIoxCasU37PFbAQRpKglkaKwlHOyeJdrREpo8TW8ldrkYWwvIQ==
-"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3", "@babel/core@^7.21.4":
+"@babel/compat-data@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.6.tgz#15606a20341de59ba02cd2fcc5086fcbe73bf544"
+ integrity sha512-29tfsWTq2Ftu7MXmimyC0C5FDZv5DYxOZkh3XD3+QW4V/BYuv/LyEsjj3c0hqedEaDt6DBfDvexMKU8YevdqFg==
+
+"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.1.tgz#5de51c5206f4c6f5533562838337a603c1033cfd"
integrity sha512-Hkqu7J4ynysSXxmAahpN1jjRwVJ+NdpraFLIWflgjpVob3KNyK3/tIUc7Q7szed8WMp0JNa7Qtd1E9Oo22F9gA==
@@ -43,6 +55,27 @@
json5 "^2.2.2"
semver "^6.3.0"
+"@babel/core@^7.22.5":
+ version "7.22.8"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.8.tgz#386470abe884302db9c82e8e5e87be9e46c86785"
+ integrity sha512-75+KxFB4CZqYRXjx4NlR4J7yGvKumBuZTmV4NV6v09dVXXkuYVYLT68N6HCzLvfJ+fWCxQsntNzKwwIXL4bHnw==
+ dependencies:
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.22.5"
+ "@babel/generator" "^7.22.7"
+ "@babel/helper-compilation-targets" "^7.22.6"
+ "@babel/helper-module-transforms" "^7.22.5"
+ "@babel/helpers" "^7.22.6"
+ "@babel/parser" "^7.22.7"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.8"
+ "@babel/types" "^7.22.5"
+ "@nicolo-ribaudo/semver-v6" "^6.3.3"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.2"
+
"@babel/generator@^7.22.0", "@babel/generator@^7.22.3", "@babel/generator@^7.7.2":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.3.tgz#0ff675d2edb93d7596c5f6728b52615cfc0df01e"
@@ -53,6 +86,16 @@
"@jridgewell/trace-mapping" "^0.3.17"
jsesc "^2.5.1"
+"@babel/generator@^7.22.7":
+ version "7.22.7"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.7.tgz#a6b8152d5a621893f2c9dacf9a4e286d520633d5"
+ integrity sha512-p+jPjMG+SI8yvIaxGgeW24u7q9+5+TGpZh8/CuB7RhBKd7RCy8FayNEFNNKrNK/eUcY/4ExQqLmyrvBXKsIcwQ==
+ dependencies:
+ "@babel/types" "^7.22.5"
+ "@jridgewell/gen-mapping" "^0.3.2"
+ "@jridgewell/trace-mapping" "^0.3.17"
+ jsesc "^2.5.1"
+
"@babel/helper-compilation-targets@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.1.tgz#bfcd6b7321ffebe33290d68550e2c9d7eb7c7a58"
@@ -64,11 +107,27 @@
lru-cache "^5.1.1"
semver "^6.3.0"
+"@babel/helper-compilation-targets@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.6.tgz#e30d61abe9480aa5a83232eb31c111be922d2e52"
+ integrity sha512-534sYEqWD9VfUm3IPn2SLcH4Q3P86XL+QvqdC7ZsFrzyyPF3T4XGiVghF6PTYNdWg6pXuoqXxNQAhbYeEInTzA==
+ dependencies:
+ "@babel/compat-data" "^7.22.6"
+ "@babel/helper-validator-option" "^7.22.5"
+ "@nicolo-ribaudo/semver-v6" "^6.3.3"
+ browserslist "^4.21.9"
+ lru-cache "^5.1.1"
+
"@babel/helper-environment-visitor@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.1.tgz#ac3a56dbada59ed969d712cf527bd8271fe3eba8"
integrity sha512-Z2tgopurB/kTbidvzeBrc2To3PUP/9i5MUe+fU6QJCQDyPwSH2oRapkLw3KGECDYSjhQZCNxEvNvZlLw8JjGwA==
+"@babel/helper-environment-visitor@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz#f06dd41b7c1f44e1f8da6c4055b41ab3a09a7e98"
+ integrity sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==
+
"@babel/helper-function-name@^7.21.0":
version "7.21.0"
resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz#d552829b10ea9f120969304023cd0645fa00b1b4"
@@ -77,6 +136,14 @@
"@babel/template" "^7.20.7"
"@babel/types" "^7.21.0"
+"@babel/helper-function-name@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz#ede300828905bb15e582c037162f99d5183af1be"
+ integrity sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==
+ dependencies:
+ "@babel/template" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
"@babel/helper-hoist-variables@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678"
@@ -84,6 +151,13 @@
dependencies:
"@babel/types" "^7.18.6"
+"@babel/helper-hoist-variables@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb"
+ integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.21.4":
version "7.21.4"
resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz#ac88b2f76093637489e718a90cec6cf8a9b029af"
@@ -91,6 +165,13 @@
dependencies:
"@babel/types" "^7.21.4"
+"@babel/helper-module-imports@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz#1a8f4c9f4027d23f520bd76b364d44434a72660c"
+ integrity sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-module-transforms@^7.21.5", "@babel/helper-module-transforms@^7.22.1":
version "7.22.1"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.1.tgz#e0cad47fedcf3cae83c11021696376e2d5a50c63"
@@ -105,11 +186,30 @@
"@babel/traverse" "^7.22.1"
"@babel/types" "^7.22.0"
-"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.19.0", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.21.5", "@babel/helper-plugin-utils@^7.8.0":
+"@babel/helper-module-transforms@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz#0f65daa0716961b6e96b164034e737f60a80d2ef"
+ integrity sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.22.5"
+ "@babel/helper-module-imports" "^7.22.5"
+ "@babel/helper-simple-access" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.5"
+ "@babel/helper-validator-identifier" "^7.22.5"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
+"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.21.5", "@babel/helper-plugin-utils@^7.8.0":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.21.5.tgz#345f2377d05a720a4e5ecfa39cbf4474a4daed56"
integrity sha512-0WDaIlXKOX/3KfBK/dwP1oQGiPh6rjMkT7HIRv7i5RR2VUMwrx5ZL0dwBkKx7+SW1zwNdgjHd34IMk5ZjTeHVg==
+"@babel/helper-plugin-utils@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295"
+ integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==
+
"@babel/helper-simple-access@^7.21.5":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.21.5.tgz#d697a7971a5c39eac32c7e63c0921c06c8a249ee"
@@ -117,6 +217,13 @@
dependencies:
"@babel/types" "^7.21.5"
+"@babel/helper-simple-access@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz#4938357dc7d782b80ed6dbb03a0fba3d22b1d5de"
+ integrity sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-split-export-declaration@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075"
@@ -124,21 +231,43 @@
dependencies:
"@babel/types" "^7.18.6"
+"@babel/helper-split-export-declaration@^7.22.5", "@babel/helper-split-export-declaration@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c"
+ integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==
+ dependencies:
+ "@babel/types" "^7.22.5"
+
"@babel/helper-string-parser@^7.21.5":
version "7.21.5"
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.21.5.tgz#2b3eea65443c6bdc31c22d037c65f6d323b6b2bd"
integrity sha512-5pTUx3hAJaZIdW99sJ6ZUUgWq/Y+Hja7TowEnLNMm1VivRgZQL3vpBY3qUACVsvw+yQU6+YgfBVmcbLaZtrA1w==
+"@babel/helper-string-parser@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz#533f36457a25814cf1df6488523ad547d784a99f"
+ integrity sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==
+
"@babel/helper-validator-identifier@^7.18.6", "@babel/helper-validator-identifier@^7.19.1":
version "7.19.1"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz#7eea834cf32901ffdc1a7ee555e2f9c27e249ca2"
integrity sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==
+"@babel/helper-validator-identifier@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz#9544ef6a33999343c8740fa51350f30eeaaaf193"
+ integrity sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==
+
"@babel/helper-validator-option@^7.21.0":
version "7.21.0"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz#8224c7e13ace4bafdc4004da2cf064ef42673180"
integrity sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==
+"@babel/helper-validator-option@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz#de52000a15a177413c8234fa3a8af4ee8102d0ac"
+ integrity sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==
+
"@babel/helpers@^7.22.0":
version "7.22.3"
resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.3.tgz#53b74351da9684ea2f694bf0877998da26dd830e"
@@ -148,6 +277,15 @@
"@babel/traverse" "^7.22.1"
"@babel/types" "^7.22.3"
+"@babel/helpers@^7.22.6":
+ version "7.22.6"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.6.tgz#8e61d3395a4f0c5a8060f309fb008200969b5ecd"
+ integrity sha512-YjDs6y/fVOYFV8hAf1rxd1QvR9wJe1pDBZ2AREKq/SDayfPzgk0PBnVuTCE5X1acEpMMNOVUqoe+OwiZGJ+OaA==
+ dependencies:
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.6"
+ "@babel/types" "^7.22.5"
+
"@babel/highlight@^7.18.6":
version "7.18.6"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf"
@@ -157,11 +295,25 @@
chalk "^2.0.0"
js-tokens "^4.0.0"
+"@babel/highlight@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.5.tgz#aa6c05c5407a67ebce408162b7ede789b4d22031"
+ integrity sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==
+ dependencies:
+ "@babel/helper-validator-identifier" "^7.22.5"
+ chalk "^2.0.0"
+ js-tokens "^4.0.0"
+
"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.21.9", "@babel/parser@^7.22.0", "@babel/parser@^7.22.4":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.4.tgz#a770e98fd785c231af9d93f6459d36770993fb32"
integrity sha512-VLLsx06XkEYqBtE5YGPwfSGwfrjnyPP5oiGty3S8pQLFDFLaS8VwWSIxkTXpcvr5zeYLE6+MBNl2npl/YnfofA==
+"@babel/parser@^7.22.5", "@babel/parser@^7.22.7":
+ version "7.22.7"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.7.tgz#df8cf085ce92ddbdbf668a7f186ce848c9036cae"
+ integrity sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q==
+
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d"
@@ -269,19 +421,19 @@
"@babel/helper-plugin-utils" "^7.21.5"
"@babel/helper-simple-access" "^7.21.5"
-"@babel/plugin-transform-react-jsx-self@^7.21.0":
- version "7.21.0"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz#ec98d4a9baafc5a1eb398da4cf94afbb40254a54"
- integrity sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==
+"@babel/plugin-transform-react-jsx-self@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.22.5.tgz#ca2fdc11bc20d4d46de01137318b13d04e481d8e"
+ integrity sha512-nTh2ogNUtxbiSbxaT4Ds6aXnXEipHweN9YRgOX/oNXdf0cCrGn/+2LozFa3lnPV5D90MkjhgckCPBrsoSc1a7g==
dependencies:
- "@babel/helper-plugin-utils" "^7.20.2"
+ "@babel/helper-plugin-utils" "^7.22.5"
-"@babel/plugin-transform-react-jsx-source@^7.19.6":
- version "7.19.6"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.19.6.tgz#88578ae8331e5887e8ce28e4c9dc83fb29da0b86"
- integrity sha512-RpAi004QyMNisst/pvSanoRdJ4q+jMCWyk9zdw/CyLB9j8RXEahodR6l2GyttDRyEVWZtbN+TpLiHJ3t34LbsQ==
+"@babel/plugin-transform-react-jsx-source@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.22.5.tgz#49af1615bfdf6ed9d3e9e43e425e0b2b65d15b6c"
+ integrity sha512-yIiRO6yobeEIaI0RTbIr8iAK9FcBHLtZq0S89ZPjDLQXBA4xvghaKqI0etp/tF3htTM0sazJKKLz9oEiGRtu7w==
dependencies:
- "@babel/helper-plugin-utils" "^7.19.0"
+ "@babel/helper-plugin-utils" "^7.22.5"
"@babel/runtime@^7.10.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.18.3", "@babel/runtime@^7.5.5", "@babel/runtime@^7.8.7":
version "7.22.3"
@@ -299,6 +451,15 @@
"@babel/parser" "^7.21.9"
"@babel/types" "^7.21.5"
+"@babel/template@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.22.5.tgz#0c8c4d944509875849bd0344ff0050756eefc6ec"
+ integrity sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==
+ dependencies:
+ "@babel/code-frame" "^7.22.5"
+ "@babel/parser" "^7.22.5"
+ "@babel/types" "^7.22.5"
+
"@babel/traverse@^7.22.1", "@babel/traverse@^7.7.2":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.4.tgz#c3cf96c5c290bd13b55e29d025274057727664c0"
@@ -315,6 +476,22 @@
debug "^4.1.0"
globals "^11.1.0"
+"@babel/traverse@^7.22.5", "@babel/traverse@^7.22.6", "@babel/traverse@^7.22.8":
+ version "7.22.8"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.8.tgz#4d4451d31bc34efeae01eac222b514a77aa4000e"
+ integrity sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==
+ dependencies:
+ "@babel/code-frame" "^7.22.5"
+ "@babel/generator" "^7.22.7"
+ "@babel/helper-environment-visitor" "^7.22.5"
+ "@babel/helper-function-name" "^7.22.5"
+ "@babel/helper-hoist-variables" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.6"
+ "@babel/parser" "^7.22.7"
+ "@babel/types" "^7.22.5"
+ debug "^4.1.0"
+ globals "^11.1.0"
+
"@babel/types@^7.0.0", "@babel/types@^7.18.6", "@babel/types@^7.20.7", "@babel/types@^7.21.0", "@babel/types@^7.21.4", "@babel/types@^7.21.5", "@babel/types@^7.22.0", "@babel/types@^7.22.3", "@babel/types@^7.22.4", "@babel/types@^7.3.3":
version "7.22.4"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.4.tgz#56a2653ae7e7591365dabf20b76295410684c071"
@@ -324,6 +501,15 @@
"@babel/helper-validator-identifier" "^7.19.1"
to-fast-properties "^2.0.0"
+"@babel/types@^7.22.5":
+ version "7.22.5"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.5.tgz#cd93eeaab025880a3a47ec881f4b096a5b786fbe"
+ integrity sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==
+ dependencies:
+ "@babel/helper-string-parser" "^7.22.5"
+ "@babel/helper-validator-identifier" "^7.22.5"
+ to-fast-properties "^2.0.0"
+
"@bcoe/v8-coverage@^0.2.3":
version "0.2.3"
resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39"
@@ -961,6 +1147,11 @@
resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-5.10.5.tgz#ad620d714e545c6efb7f69d94ce46e3fd2fe01fb"
integrity sha512-FGMq4dGs5HhDAtI0z46uzxzKKPmZ3h5uKUyKg1ZHoFR1mBtcUMbB6FylFmHqKFRWlJ5IXqX9dwmiVrLYUOfTmA==
+"@nicolo-ribaudo/semver-v6@^6.3.3":
+ version "6.3.3"
+ resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/semver-v6/-/semver-v6-6.3.3.tgz#ea6d23ade78a325f7a52750aab1526b02b628c29"
+ integrity sha512-3Yc1fUTs69MG/uZbJlLSI3JISMn2UV2rg+1D/vROUqZyh3l6iYHCs7GMp+M40ZD7yOdDbYjJcU1oTJhrc+dGKg==
+
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5"
@@ -1205,13 +1396,13 @@
"@types/yargs-parser" "*"
"@vitejs/plugin-react@^4":
- version "4.0.0"
- resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.0.tgz#46d1c37c507447d10467be1c111595174555ef28"
- integrity sha512-HX0XzMjL3hhOYm+0s95pb0Z7F8O81G7joUHgfDd/9J/ZZf5k4xX6QAMFkKsHFxaHlf6X7GD7+XuaZ66ULiJuhQ==
+ version "4.0.2"
+ resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.2.tgz#cd25adc113c4c6f504b2e32e28230d399bfba334"
+ integrity sha512-zbnVp3Esfg33zDaoLrjxG+p/dPiOtpvJA+1oOEQwSxMMTRL9zi1eghIcd2WtLjkcKnPsa3S15LzS/OzDn2BOCA==
dependencies:
- "@babel/core" "^7.21.4"
- "@babel/plugin-transform-react-jsx-self" "^7.21.0"
- "@babel/plugin-transform-react-jsx-source" "^7.19.6"
+ "@babel/core" "^7.22.5"
+ "@babel/plugin-transform-react-jsx-self" "^7.22.5"
+ "@babel/plugin-transform-react-jsx-source" "^7.22.5"
react-refresh "^0.14.0"
acorn-jsx@^5.3.2:
@@ -1560,6 +1751,16 @@ browserslist@^4.21.3:
node-releases "^2.0.12"
update-browserslist-db "^1.0.11"
+browserslist@^4.21.9:
+ version "4.21.9"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.9.tgz#e11bdd3c313d7e2a9e87e8b4b0c7872b13897635"
+ integrity sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==
+ dependencies:
+ caniuse-lite "^1.0.30001503"
+ electron-to-chromium "^1.4.431"
+ node-releases "^2.0.12"
+ update-browserslist-db "^1.0.11"
+
bser@2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05"
@@ -1615,6 +1816,11 @@ caniuse-lite@^1.0.30001489:
resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001495.tgz#64a0ccef1911a9dcff647115b4430f8eff1ef2d9"
integrity sha512-F6x5IEuigtUfU5ZMQK2jsy5JqUUlEFRVZq8bO2a+ysq5K7jD6PPc9YXZj78xDNS3uNchesp1Jw47YXEqr+Viyg==
+caniuse-lite@^1.0.30001503:
+ version "1.0.30001513"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001513.tgz#382fe5fbfb0f7abbaf8c55ca3ac71a0307a752e9"
+ integrity sha512-pnjGJo7SOOjAGytZZ203Em95MRM8Cr6jhCXNF/FAXTpCTRTECnqQWLpiTRqrFtdYcth8hf4WECUpkezuYsMVww==
+
capture-exit@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4"
@@ -1907,6 +2113,11 @@ electron-to-chromium@^1.4.411:
resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.423.tgz#99567f3a0563fe0d1d0931e9ce851bca239f6658"
integrity sha512-y4A7YfQcDGPAeSWM1IuoWzXpg9RY1nwHzHSwRtCSQFp9FgAVDgdWlFf0RbdWfLWQ2WUI+bddUgk5RgTjqRE6FQ==
+electron-to-chromium@^1.4.431:
+ version "1.4.453"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.453.tgz#0a81fdc1943db202e8724d9f61369a71f0dd51e8"
+ integrity sha512-BU8UtQz6CB3T7RIGhId4BjmjJVXQDujb0+amGL8jpcluFJr6lwspBOvkUbnttfpZCm4zFMHmjrX1QrdPWBBMjQ==
+
emittery@^0.13.1:
version "0.13.1"
resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad"
@@ -3774,9 +3985,9 @@ node-int64@^0.4.0:
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
node-releases@^2.0.12:
- version "2.0.12"
- resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.12.tgz#35627cc224a23bfb06fb3380f2b3afaaa7eb1039"
- integrity sha512-QzsYKWhXTWx8h1kIvqfnC++o0pEmpRQA/aenALsL2F4pqNVr7YzcdMlDij5WBnwftRbJCNJL/O7zdKaxKPHqgQ==
+ version "2.0.13"
+ resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d"
+ integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==
normalize-path@^2.1.1:
version "2.1.1"
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index 00a1078b294..098d917c4e0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -161,6 +161,11 @@ public class HostSystem extends TreeConfigProducer<Host> {
deployLogger.log(level, message);
}
+ @Override
+ public void logApplicationPackage(Level level, String message) {
+ deployLogger.logApplicationPackage(level, message);
+ }
+
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
index 5bb73643de5..ea4988f3029 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
@@ -290,28 +290,9 @@ public class NodesSpecification {
.loadBalancerSettings(zoneEndpoint)
.stateful(stateful)
.build();
- logInsufficientDiskResources(clusterId, clusterType, logger);
return hostSystem.allocateHosts(cluster, Capacity.from(min, max, groupSize, required, canFail, cloudAccount, info), logger);
}
- /** Log a message if requested disk may not fit core/heap dumps */
- private void logInsufficientDiskResources(ClusterSpec.Id clusterId, ClusterSpec.Type clusterType, DeployLogger deployLogger) {
- NodeResources resources = min.nodeResources();
- if (resources.diskGbIsUnspecified() || resources.memoryGbIsUnspecified()) return;
- double minDiskGb = resources.memoryGb() * switch (clusterType) {
- case combined, content -> 3;
- case container -> 2;
- default -> 0; // No constraint on other types
- };
- if (resources.diskGb() < minDiskGb) {
- // TODO(mpolden): Consider enforcing this on Vespa 9
- deployLogger.logApplicationPackage(Level.WARNING, "Requested disk (" + resources.diskGb() +
- "Gb) in " + clusterId + " is not large enough to fit " +
- "core/heap dumps. Minimum recommended disk resources " +
- "is " + minDiskGb + "Gb");
- }
- }
-
private static Pair<NodeResources, NodeResources> nodeResources(ModelElement nodesElement) {
ModelElement resources = nodesElement.child("resources");
if (resources != null) {
diff --git a/config-model/src/main/javacc/SchemaParser.jj b/config-model/src/main/javacc/SchemaParser.jj
index 9a38fdc673e..b2cb258c0ab 100644
--- a/config-model/src/main/javacc/SchemaParser.jj
+++ b/config-model/src/main/javacc/SchemaParser.jj
@@ -992,6 +992,11 @@ void attribute(ParsedField field) :
{
<ATTRIBUTE> [name = identifier()]
{
+ // TODO: Remove support for attribute with different name than field name in Vespa 9
+ if ( ! name.equals(field.name()))
+ deployLogger.logApplicationPackage(Level.WARNING, "Creating an attribute for field '" + field.name() +
+ "' with a different name '" + name + "' than the field name" +
+ " is deprecated, and support will be removed in Vespa 9. Define a field with the wanted name outside the document instead.");
ParsedAttribute attr = field.attributeFor(name);
}
( (<COLON> attributeSetting(attr))
@@ -1506,6 +1511,11 @@ void indexInsideField(ParsedField field) :
{
<INDEX> [indexName = identifier()]
{
+ // TODO: Remove support for index with different name than field name in Vespa 9
+ if ( ! indexName.equals(field.name()))
+ deployLogger.logApplicationPackage(Level.WARNING, "Creating an index for field '" + field.name() +
+ "' with a different name '" + indexName + "' than the field name" +
+ " is deprecated, and support will be removed in Vespa 9. Define a field with the wanted name outside the document instead.");
op = new ParsedIndex(indexName);
}
( (<COLON> indexBody(op) (<COMMA> indexBody(op))*) |
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index b1f47c54d54..c128b9af6e0 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -2577,44 +2577,6 @@ public class ModelProvisioningTest {
assertEquals((long) ((128 - memoryOverheadGb) * GB * 0.08), cfg.flush().memory().each().maxmemory()); // from default node flavor tuning
}
- @Test
- public void warn_on_insufficient_disk_resources() {
- String services = """
- <?xml version='1.0' encoding='utf-8' ?>
- <services>
- <container version='1.0' id='c1'>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='40Gb'/>
- </nodes>
- </container>
- <container version='1.0' id='c2'>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='50Gb'/>
- </nodes>
- </container>
- <content version='1.0' id='c3'>
- <redundancy>1</redundancy>
- <documents>
- <document type='type1' mode='index'/>
- </documents>
- <nodes count='1'>
- <resources vcpu='1' memory='24Gb' disk='50Gb'/>
- </nodes>
- </content>
- </services>
- """;
- VespaModelTester tester = new VespaModelTester();
- tester.addHosts(new NodeResources(1, 24, 50, 1, DiskSpeed.fast), 10);
- TestLogger testLogger = new TestLogger();
- VespaModel model = tester.createModel(services, true, new DeployState.Builder().deployLogger(testLogger));
- assertEquals(1, model.getContainerClusters().get("c1").getContainers().size());
- assertEquals(1, model.getContainerClusters().get("c2").getContainers().size());
- assertEquals(1, model.getContentClusters().get("c3").getSearch().getSearchNodes().size());
- assertEquals(List.of(new TestLogger.LogMessage(Level.WARNING, "Requested disk (40.0Gb) in cluster 'c1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 48.0Gb"),
- new TestLogger.LogMessage(Level.WARNING, "Requested disk (50.0Gb) in cluster 'c3' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 72.0Gb")),
- testLogger.msgs());
- }
-
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
diff --git a/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java b/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
index b1d502dec36..7990d76d023 100644
--- a/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/IndexSettingsTestCase.java
@@ -37,7 +37,7 @@ public class IndexSettingsTestCase extends AbstractSchemaTestCase {
}
@Test
- void requireThatInterlavedFeaturesAreSetOnExtraField() throws ParseException {
+ void requireThatInterleavedFeaturesAreSetOnExtraField() throws ParseException {
ApplicationBuilder builder = ApplicationBuilder.createFromString(joinLines(
"search test {",
" document test {",
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
index 5a22056de1b..9d72f274419 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ProvisionLogger.java
@@ -10,6 +10,16 @@ import java.util.logging.Level;
*/
public interface ProvisionLogger {
+ /** Log a message unrelated to the application package, e.g. internal error/status. */
void log(Level level, String message);
+ /**
+ * Log a message related to the application package. These messages should be actionable by the user, f.ex. to
+ * signal usage of invalid/deprecated syntax.
+ * This default implementation just forwards to {@link #log(Level, String)}
+ */
+ default void logApplicationPackage(Level level, String message) {
+ log(level, message);
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java b/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
index fbc179a10fa..5ac7705471c 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/AdaptiveTimeoutHandler.java
@@ -54,7 +54,7 @@ class AdaptiveTimeoutHandler implements TimeoutHandler {
slopedWait += ((adaptiveTimeoutMax - adaptiveTimeoutMin) * (pendingQueries - 1)) / missWidth;
}
long nextAdaptive = (long) slopedWait;
- if (now + nextAdaptive >= deadline) {
+ if (nextAdaptive >= deadline - now) {
return deadline - now;
}
deadline = now + nextAdaptive;
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
index 4e4b77422c1..db7e80a95e5 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
@@ -35,11 +35,11 @@ import java.util.Set;
/**
* A dispatcher communicates with search nodes to perform queries and fill hits.
- *
+ * <p>
* This class allocates {@link SearchInvoker} and {@link FillInvoker} objects based
* on query properties and general system status. The caller can then use the provided
* invocation object to execute the search or fill.
- *
+ * <p>
* This class is multithread safe.
*
* @author bratseth
@@ -111,6 +111,7 @@ public class Dispatcher extends AbstractComponent {
searchCluster.addMonitoring(clusterMonitor);
return items;
}
+
private void initialWarmup(double warmupTime) {
Thread warmup = new Thread(() -> warmup(warmupTime));
warmup.start();
@@ -130,10 +131,10 @@ public class Dispatcher extends AbstractComponent {
private static LoadBalancer.Policy toLoadBalancerPolicy(DispatchConfig.DistributionPolicy.Enum policy) {
return switch (policy) {
- case ROUNDROBIN: yield LoadBalancer.Policy.ROUNDROBIN;
- case BEST_OF_RANDOM_2: yield LoadBalancer.Policy.BEST_OF_RANDOM_2;
- case ADAPTIVE,LATENCY_AMORTIZED_OVER_REQUESTS: yield LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_REQUESTS;
- case LATENCY_AMORTIZED_OVER_TIME: yield LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_TIME;
+ case ROUNDROBIN -> LoadBalancer.Policy.ROUNDROBIN;
+ case BEST_OF_RANDOM_2 -> LoadBalancer.Policy.BEST_OF_RANDOM_2;
+ case ADAPTIVE,LATENCY_AMORTIZED_OVER_REQUESTS -> LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_REQUESTS;
+ case LATENCY_AMORTIZED_OVER_TIME -> LoadBalancer.Policy.LATENCY_AMORTIZED_OVER_TIME;
};
}
private static List<Node> toNodes(DispatchNodesConfig nodesConfig) {
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index 1be45b01367..9c65cb3d4c0 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -99,7 +99,7 @@ public class SearchCluster implements NodeManager<Node> {
private Collection<Group> groups() { return groups.groups(); }
public int groupsWithSufficientCoverage() {
- return (int)groups().stream().filter(Group::hasSufficientCoverage).count();
+ return (int) groups().stream().filter(Group::hasSufficientCoverage).count();
}
/**
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
index 3e6e092ea70..514f0de4fec 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchGroupsImpl.java
@@ -3,6 +3,7 @@ package com.yahoo.search.dispatch.searchcluster;
import com.google.common.math.Quantiles;
import java.util.Collection;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -38,7 +39,7 @@ public class SearchGroupsImpl implements SearchGroups {
public long medianDocumentsPerGroup() {
if (isEmpty()) return 0;
- var activeDocuments = groups().stream().map(Group::activeDocuments).toList();
- return (long) Quantiles.median().compute(activeDocuments);
+ double[] activeDocuments = groups().stream().mapToDouble(Group::activeDocuments).toArray();
+ return (long) Quantiles.median().computeInPlace(activeDocuments);
}
}
diff --git a/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java b/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
index 63655da0784..450239f7b12 100644
--- a/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/test/QueryTestCase.java
@@ -326,7 +326,6 @@ public class QueryTestCase {
@Test
void testBooleanParameterNoQueryProfile() {
- QueryProfile profile = new QueryProfile("myProfile");
Query query = new Query("/?query=something&ranking.softtimeout.enable=false");
assertFalse(query.properties().getBoolean("ranking.softtimeout.enable"));
assertFalse(query.getRanking().getSoftTimeout().getEnable());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index ceac681255b..65f4c851e3c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -17,6 +17,7 @@ import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData;
@@ -29,6 +30,7 @@ import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.certificate.AssignedCertificate;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue.Priority;
import com.yahoo.vespa.hosted.controller.routing.RoutingId;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicies;
@@ -118,13 +120,22 @@ public class RoutingController {
/** Read and return zone-scoped endpoints for given deployment */
public EndpointList readEndpointsOf(DeploymentId deployment) {
- boolean addTokenEndpoint = createTokenEndpoint.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value();
+ boolean addTokenEndpoint = tokenEndpointEnabled(deployment.applicationId());
Set<Endpoint> endpoints = new LinkedHashSet<>();
- // To discover the cluster name for a zone-scoped endpoint, we need to read routing policies
+ // To discover the cluster name for a zone-scoped endpoint, we need to read the routing policy
for (var policy : routingPolicies.read(deployment)) {
RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(policy.id().zone());
endpoints.addAll(policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint));
- endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod));
+ endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod, Optional.empty()));
+ for (var ge : policy.generatedEndpoints()) {
+ boolean include = switch (ge.authMethod()) {
+ case token -> addTokenEndpoint;
+ case mtls -> true;
+ };
+ if (include) {
+ endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod, Optional.of(ge)));
+ }
+ }
}
return EndpointList.copyOf(endpoints);
}
@@ -142,7 +153,7 @@ public class RoutingController {
/** Returns endpoints declared in {@link DeploymentSpec} for given application */
public EndpointList declaredEndpointsOf(Application application) {
- // TODO(mpolden): Add generated endpoints for global and application scopes. Requires reading routing polices here
+ List<GeneratedEndpoint> generatedEndpoints = readGeneratedEndpoints(application);
Set<Endpoint> endpoints = new LinkedHashSet<>();
DeploymentSpec deploymentSpec = application.deploymentSpec();
for (var spec : deploymentSpec.instances()) {
@@ -154,7 +165,7 @@ public class RoutingController {
.map(zone -> new DeploymentId(instance, ZoneId.from(Environment.prod, zone.region().get())))
.toList();
RoutingId routingId = RoutingId.of(instance, EndpointId.defaultId());
- endpoints.addAll(computeGlobalEndpoints(routingId, ClusterSpec.Id.from(clusterId), deployments));
+ endpoints.addAll(computeGlobalEndpoints(routingId, ClusterSpec.Id.from(clusterId), deployments, generatedEndpoints));
});
// Add endpoints declared with current syntax
spec.endpoints().forEach(declaredEndpoint -> {
@@ -163,7 +174,7 @@ public class RoutingController {
.map(region -> new DeploymentId(instance,
ZoneId.from(Environment.prod, region)))
.toList();
- endpoints.addAll(computeGlobalEndpoints(routingId, ClusterSpec.Id.from(declaredEndpoint.containerId()), deployments));
+ endpoints.addAll(computeGlobalEndpoints(routingId, ClusterSpec.Id.from(declaredEndpoint.containerId()), deployments, generatedEndpoints));
});
}
// Add application endpoints
@@ -175,13 +186,16 @@ public class RoutingController {
ZoneId zone = deployments.keySet().iterator().next().zoneId(); // Where multiple zones are possible, they all have the same routing method.
RoutingMethod routingMethod = usesSharedRouting(zone) ? RoutingMethod.sharedLayer4 : RoutingMethod.exclusive;
- endpoints.add(Endpoint.of(application.id())
- .targetApplication(EndpointId.of(declaredEndpoint.endpointId()),
- ClusterSpec.Id.from(declaredEndpoint.containerId()),
- deployments)
- .routingMethod(routingMethod)
- .on(Port.fromRoutingMethod(routingMethod))
- .in(controller.system()));
+ Endpoint.EndpointBuilder builder = Endpoint.of(application.id())
+ .targetApplication(EndpointId.of(declaredEndpoint.endpointId()),
+ ClusterSpec.Id.from(declaredEndpoint.containerId()),
+ deployments)
+ .routingMethod(routingMethod)
+ .on(Port.fromRoutingMethod(routingMethod));
+ endpoints.add(builder.in(controller.system()));
+ for (var ge : generatedEndpoints) {
+ endpoints.add(builder.generatedFrom(ge).in(controller.system()));
+ }
}
return EndpointList.copyOf(endpoints);
}
@@ -196,6 +210,10 @@ public class RoutingController {
if (!directEndpoints.isEmpty()) {
zoneEndpoints = directEndpoints; // Use only direct endpoints if we have any
}
+ EndpointList generatedEndpoints = zoneEndpoints.generated();
+ if (!generatedEndpoints.isEmpty()) {
+ zoneEndpoints = generatedEndpoints; // Use generated endpoints if we have any
+ }
if ( ! zoneEndpoints.isEmpty()) {
endpoints.put(deployment.zoneId(), zoneEndpoints.asList());
}
@@ -353,7 +371,7 @@ public class RoutingController {
.map(region -> new DeploymentId(instance.id(), ZoneId.from(Environment.prod, region)))
.toList();
endpointsToRemove.addAll(computeGlobalEndpoints(RoutingId.of(instance.id(), rotation.endpointId()),
- rotation.clusterId(), deployments));
+ rotation.clusterId(), deployments, readGeneratedEndpoints(application)));
}
endpointsToRemove.forEach(endpoint -> controller.nameServiceForwarder()
.removeRecords(Record.Type.CNAME,
@@ -362,12 +380,16 @@ public class RoutingController {
Optional.of(application.id())));
}
- /** Generate endpoints for all authenticaiton methods, using given application part */
+ /** Generate endpoints for all authentication methods, using given application part */
public List<GeneratedEndpoint> generateEndpoints(String applicationPart, ApplicationId instance) {
- boolean enabled = randomizedEndpoints.with(FetchVector.Dimension.APPLICATION_ID, instance.serializedForm()).value();
- if (!enabled) {
+ if (!randomizedEndpointsEnabled(instance)) {
return List.of();
}
+ return generateEndpoints(applicationPart);
+ }
+
+
+ private List<GeneratedEndpoint> generateEndpoints(String applicationPart) {
return Arrays.stream(Endpoint.AuthMethod.values())
.map(method -> new GeneratedEndpoint(GeneratedEndpoint.createPart(controller.random(true)),
applicationPart,
@@ -375,6 +397,23 @@ public class RoutingController {
.toList();
}
+ /** This is only suitable for use in declared endpoints, which ignore the randomly generated cluster part */
+ private List<GeneratedEndpoint> readGeneratedEndpoints(Application application) {
+ boolean includeTokenEndpoint = application.productionInstances().values().stream()
+ .map(Instance::id)
+ .anyMatch(this::tokenEndpointEnabled);
+ Optional<String> randomizedId = controller.curator().readAssignedCertificate(application.id(), Optional.empty())
+ .map(AssignedCertificate::certificate)
+ .flatMap(EndpointCertificate::randomizedId);
+ if (randomizedId.isEmpty()) {
+ return List.of();
+ }
+ return generateEndpoints(randomizedId.get()).stream().filter(endpoint -> switch (endpoint.authMethod()) {
+ case token -> includeTokenEndpoint;
+ case mtls -> true;
+ }).toList();
+ }
+
/**
* Assigns one or more global rotations to given application, if eligible. The given application is implicitly
* stored, ensuring that the assigned rotation(s) are persisted when this returns.
@@ -412,7 +451,7 @@ public class RoutingController {
}
/** Compute global endpoints for given routing ID, application and deployments */
- private List<Endpoint> computeGlobalEndpoints(RoutingId routingId, ClusterSpec.Id cluster, List<DeploymentId> deployments) {
+ private List<Endpoint> computeGlobalEndpoints(RoutingId routingId, ClusterSpec.Id cluster, List<DeploymentId> deployments, List<GeneratedEndpoint> generatedEndpoints) {
var endpoints = new ArrayList<Endpoint>();
var directMethods = 0;
var availableRoutingMethods = routingMethodsOfAll(deployments);
@@ -421,15 +460,26 @@ public class RoutingController {
throw new IllegalArgumentException("Invalid routing methods for " + routingId + ": Exceeded maximum " +
"direct methods");
}
- endpoints.add(Endpoint.of(routingId.instance())
- .target(routingId.endpointId(), cluster, deployments)
- .on(Port.fromRoutingMethod(method))
- .routingMethod(method)
- .in(controller.system()));
+ Endpoint.EndpointBuilder builder = Endpoint.of(routingId.instance())
+ .target(routingId.endpointId(), cluster, deployments)
+ .on(Port.fromRoutingMethod(method))
+ .routingMethod(method);
+ endpoints.add(builder.in(controller.system()));
+ for (var ge : generatedEndpoints) {
+ endpoints.add(builder.generatedFrom(ge).in(controller.system()));
+ }
}
return endpoints;
}
+ public boolean tokenEndpointEnabled(ApplicationId instance) {
+ return createTokenEndpoint.with(FetchVector.Dimension.APPLICATION_ID, instance.serializedForm()).value();
+ }
+
+ public boolean randomizedEndpointsEnabled(ApplicationId instance) {
+ return randomizedEndpoints.with(FetchVector.Dimension.APPLICATION_ID, instance.serializedForm()).value();
+ }
+
/** Whether legacy global DNS names should be available for given application */
private static boolean requiresLegacyNames(DeploymentSpec deploymentSpec, InstanceName instanceName) {
return deploymentSpec.instance(instanceName)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
index a3381819778..1a4095001ff 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
@@ -45,11 +45,11 @@ public class Endpoint {
private final boolean legacy;
private final RoutingMethod routingMethod;
private final AuthMethod authMethod;
- private final boolean generated;
+ private final Optional<GeneratedEndpoint> generated;
private Endpoint(TenantAndApplicationId application, Optional<InstanceName> instanceName, EndpointId id,
ClusterSpec.Id cluster, URI url, List<Target> targets, Scope scope, Port port, boolean legacy,
- RoutingMethod routingMethod, boolean certificateName, AuthMethod authMethod, boolean generated) {
+ RoutingMethod routingMethod, boolean certificateName, AuthMethod authMethod, Optional<GeneratedEndpoint> generated) {
Objects.requireNonNull(application, "application must be non-null");
Objects.requireNonNull(instanceName, "instanceName must be non-null");
Objects.requireNonNull(cluster, "cluster must be non-null");
@@ -59,6 +59,7 @@ public class Endpoint {
Objects.requireNonNull(port, "port must be non-null");
Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
Objects.requireNonNull(authMethod, "authMethod must be non-null");
+ Objects.requireNonNull(generated, "generated must be non-null");
this.id = requireEndpointId(id, scope, certificateName);
this.cluster = requireCluster(cluster, certificateName);
this.instance = requireInstance(instanceName, scope);
@@ -139,7 +140,7 @@ public class Endpoint {
}
/** Returns whether this endpoint is generated by the system */
- public boolean generated() {
+ public Optional<GeneratedEndpoint> generated() {
return generated;
}
@@ -198,11 +199,11 @@ public class Endpoint {
}
private static String generatedPart(GeneratedEndpoint generated, String name, Scope scope, String separator) {
- if (scope.multiDeployment()) {
+ return switch (scope) {
// Endpoints with these scopes have a name part that is explicitly configured through deployment.xml
- return sanitize(namePart(name, separator)) + generated.applicationPart();
- }
- return generated.clusterPart() + separator + generated.applicationPart();
+ case weighted, global, application -> sanitize(namePart(name, separator)) + generated.applicationPart();
+ case zone -> generated.clusterPart() + separator + generated.applicationPart();
+ };
}
private static String sanitize(String part) { // TODO: Reject reserved words
@@ -218,7 +219,7 @@ public class Endpoint {
String scopeSymbol = scopeSymbol(scope, system, generated);
if (scope == Scope.global) return scopeSymbol;
if (scope == Scope.application) return scopeSymbol;
- if (generated.isPresent()) return scopeSymbol;
+ if (scope == Scope.zone && generated.isPresent()) return scopeSymbol;
ZoneId zone = targets.stream().map(target -> target.deployment.zoneId()).min(comparing(ZoneId::value)).get();
String region = zone.region().value();
@@ -596,7 +597,7 @@ public class Endpoint {
}
/** Sets the generated ID to use when building this */
- public EndpointBuilder generatedEndpoint(GeneratedEndpoint generated) {
+ public EndpointBuilder generatedFrom(GeneratedEndpoint generated) {
this.generated = Optional.of(generated);
this.authMethod = generated.authMethod();
return this;
@@ -633,7 +634,7 @@ public class Endpoint {
routingMethod,
certificateName,
authMethod,
- generated.isPresent());
+ generated);
}
private Scope requireUnset(Scope scope) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
index 5026fea7847..e554bb2361a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
@@ -49,7 +49,7 @@ public class EndpointList extends AbstractFilteringList<Endpoint, EndpointList>
endpoint.instance().get().equals(instance));
}
- /** Returns the subset of endpoints which target all of the given deployments */
+ /** Returns the subset of endpoints which target all the given deployments */
public EndpointList targets(List<DeploymentId> deployments) {
return matching(endpoint -> endpoint.deployments().containsAll(deployments));
}
@@ -66,7 +66,7 @@ public class EndpointList extends AbstractFilteringList<Endpoint, EndpointList>
/** Returns the subset of endpoints generated by the system */
public EndpointList generated() {
- return matching(Endpoint::generated);
+ return matching(endpoint -> endpoint.generated().isPresent());
}
/** Returns the subset of endpoints that require a rotation */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificates.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificates.java
index 12beaa635ac..bc4dc74afff 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificates.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificates.java
@@ -11,7 +11,6 @@ import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -36,7 +35,7 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
-import static com.yahoo.vespa.hosted.controller.certificate.UnassignedCertificate.*;
+import static com.yahoo.vespa.hosted.controller.certificate.UnassignedCertificate.State;
/**
* Looks up stored endpoint certificate, provisions new certificates if none is found,
@@ -56,7 +55,6 @@ public class EndpointCertificates {
private final Clock clock;
private final EndpointCertificateProvider certificateProvider;
private final EndpointCertificateValidator certificateValidator;
- private final BooleanFlag useRandomizedCert;
private final BooleanFlag useAlternateCertProvider;
private final StringFlag endpointCertificateAlgo;
private final static Duration GCP_CERTIFICATE_EXPIRY_TIME = Duration.ofDays(100); // 100 days, 10 more than notAfter time
@@ -64,7 +62,6 @@ public class EndpointCertificates {
public EndpointCertificates(Controller controller, EndpointCertificateProvider certificateProvider,
EndpointCertificateValidator certificateValidator) {
this.controller = controller;
- this.useRandomizedCert = Flags.RANDOMIZED_ENDPOINT_NAMES.bindTo(controller.flagSource());
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
this.curator = controller.curator();
@@ -140,7 +137,7 @@ public class EndpointCertificates {
}
private Optional<EndpointCertificate> getOrProvision(Instance instance, ZoneId zone, DeploymentSpec deploymentSpec) {
- if (useRandomizedCert.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value()) {
+ if (controller.routing().randomizedEndpointsEnabled(instance.id())) {
return Optional.of(assignFromPool(instance, zone));
}
Optional<AssignedCertificate> assignedCertificate = curator.readAssignedCertificate(TenantAndApplicationId.from(instance.id()), Optional.of(instance.id().instance()));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
index 9e2933f60fd..8f9b9b70639 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.container.jdisc.secretstore.SecretNotFoundException;
import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.jdisc.Metric;
@@ -50,7 +49,7 @@ public class CertificatePoolMaintainer extends ControllerMaintainer {
private final BooleanFlag useAlternateCertProvider;
public CertificatePoolMaintainer(Controller controller, Metric metric, Duration interval) {
- super(controller, interval, null, Set.of(SystemName.Public, SystemName.PublicCd));
+ super(controller, interval);
this.controller = controller;
this.secretStore = controller.secretStore();
this.certPoolSize = Flags.CERT_POOL_SIZE.bindTo(controller.flagSource());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
index d1311006cde..6c7ee4d0d85 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/certificate/EndpointCertificatesHandler.java
@@ -8,7 +8,6 @@ import com.yahoo.restapi.RestApiException;
import com.yahoo.restapi.StringResponse;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -42,15 +41,15 @@ public class EndpointCertificatesHandler extends ThreadedHttpRequestHandler {
private final CuratorDb curator;
private final BooleanFlag useAlternateCertProvider;
private final StringFlag endpointCertificateAlgo;
- private final BooleanFlag useRandomizedCert;
+ private final Controller controller;
public EndpointCertificatesHandler(Executor executor, ServiceRegistry serviceRegistry, CuratorDb curator, Controller controller) {
super(executor);
this.endpointCertificateProvider = serviceRegistry.endpointCertificateProvider();
this.curator = curator;
+ this.controller = controller;
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
- this.useRandomizedCert = Flags.RANDOMIZED_ENDPOINT_NAMES.bindTo(controller.flagSource());
}
public HttpResponse handle(HttpRequest request) {
@@ -74,7 +73,7 @@ public class EndpointCertificatesHandler extends ThreadedHttpRequestHandler {
public StringResponse reRequestEndpointCertificateFor(String instanceId, boolean ignoreExisting) {
ApplicationId applicationId = ApplicationId.fromFullString(instanceId);
- if (useRandomizedCert.with(FetchVector.Dimension.APPLICATION_ID, instanceId).value()) {
+ if (controller.routing().randomizedEndpointsEnabled(applicationId)) {
throw new IllegalArgumentException("Cannot re-request certificate. " + instanceId + " is assigned certificate from a pool");
}
try (var lock = curator.lock(TenantAndApplicationId.from(applicationId))) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index a8fc0b8dffb..30c832a7747 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -7,9 +7,6 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.transaction.Mutex;
-import com.yahoo.vespa.flags.BooleanFlag;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.ClusterId;
@@ -63,12 +60,10 @@ public class RoutingPolicies {
private final Controller controller;
private final CuratorDb db;
- private final BooleanFlag createTokenEndpoint;
public RoutingPolicies(Controller controller) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
this.db = controller.curator();
- this.createTokenEndpoint = Flags.ENABLE_DATAPLANE_PROXY.bindTo(controller.flagSource());
try (var lock = db.lockRoutingPolicies()) { // Update serialized format
for (var policy : db.readRoutingPolicies().entrySet()) {
db.writeRoutingPolicies(policy.getKey(), policy.getValue());
@@ -191,7 +186,7 @@ public class RoutingPolicies {
if (endpoint.scope() != Endpoint.Scope.global) throw new IllegalArgumentException("Endpoint " + endpoint + " is not global");
if (deployment.isPresent() && !endpoint.deployments().contains(deployment.get())) return;
- Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(policies, inactiveZones);
+ Collection<RegionEndpoint> regionEndpoints = computeRegionEndpoints(endpoint, policies, inactiveZones);
Set<AliasTarget> latencyTargets = new LinkedHashSet<>();
Set<AliasTarget> inactiveLatencyTargets = new LinkedHashSet<>();
for (var regionEndpoint : regionEndpoints) {
@@ -243,12 +238,15 @@ public class RoutingPolicies {
}
/** Compute region endpoints and their targets from given policies */
- private Collection<RegionEndpoint> computeRegionEndpoints(List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
+ private Collection<RegionEndpoint> computeRegionEndpoints(Endpoint parent, List<RoutingPolicy> policies, Set<ZoneId> inactiveZones) {
+ if (!parent.scope().multiDeployment()) {
+ throw new IllegalArgumentException(parent + " has unexpected scope");
+ }
Map<Endpoint, RegionEndpoint> endpoints = new LinkedHashMap<>();
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
- Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive);
+ Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, parent.generated());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
long weight = 1;
if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
@@ -396,7 +394,7 @@ public class RoutingPolicies {
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy, LoadBalancer loadBalancer, DeploymentId deploymentId) {
- boolean addTokenEndpoint = createTokenEndpoint.with(FetchVector.Dimension.APPLICATION_ID, deploymentId.applicationId().serializedForm()).value();
+ boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(deploymentId.applicationId());
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, addTokenEndpoint)) {
var name = RecordName.from(endpoint.dnsName());
var record = policy.canonicalName().isPresent() ?
@@ -471,7 +469,7 @@ public class RoutingPolicies {
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
- boolean addTokenEndpoint = createTokenEndpoint.with(FetchVector.Dimension.APPLICATION_ID, allocation.deployment.applicationId().serializedForm()).value();
+ boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(allocation.deployment.applicationId());
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
index e1ce7c2c451..0233e7502ef 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
@@ -128,20 +128,22 @@ public record RoutingPolicy(RoutingPolicyId id,
endpoints.add(tokenEndpoint);
}
for (var generatedEndpoint : generatedEndpoints) {
- GeneratedEndpoint endpointToInclude = switch (generatedEndpoint.authMethod()) {
- case token -> includeTokenEndpoint ? generatedEndpoint : null;
- case mtls -> generatedEndpoint;
+ boolean include = switch (generatedEndpoint.authMethod()) {
+ case token -> includeTokenEndpoint;
+ case mtls -> true;
};
- if (endpointToInclude != null) {
- endpoints.add(builder.generatedEndpoint(endpointToInclude).in(system));
+ if (include) {
+ endpoints.add(builder.generatedFrom(generatedEndpoint).in(system));
}
}
return endpoints;
}
/** Returns the region endpoint of this */
- public Endpoint regionEndpointIn(SystemName system, RoutingMethod routingMethod) {
- return endpoint(routingMethod).targetRegion(id.cluster(), id.zone()).in(system);
+ public Endpoint regionEndpointIn(SystemName system, RoutingMethod routingMethod, Optional<GeneratedEndpoint> generated) {
+ Endpoint.EndpointBuilder builder = endpoint(routingMethod).targetRegion(id.cluster(), id.zone());
+ generated.ifPresent(builder::generatedFrom);
+ return builder.in(system);
}
@Override
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
index 23c029845bb..477aca86b9c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
@@ -264,6 +264,13 @@ public class EndpointTest {
.targetRegion(ClusterSpec.Id.from("c1"), prodZone)
.routingMethod(RoutingMethod.exclusive)
.on(Port.tls())
+ .in(SystemName.Public),
+ "https://c1.cafed00d.us-north-2.w.vespa-app.cloud/",
+ Endpoint.of(instance1)
+ .targetRegion(ClusterSpec.Id.from("c1"), prodZone)
+ .routingMethod(RoutingMethod.exclusive)
+ .generatedFrom(new GeneratedEndpoint("deadbeef", "cafed00d", Endpoint.AuthMethod.mtls))
+ .on(Port.tls())
.in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString()));
@@ -351,26 +358,26 @@ public class EndpointTest {
var tests = Map.of(
// Zone endpoint in main, unlike named endpoints, this includes the scope symbol 'z'
"cafed00d.deadbeef.z.vespa.oath.cloud",
- Endpoint.of(instance1).target(ClusterSpec.Id.from("c1"), deployment).generatedEndpoint(ge)
+ Endpoint.of(instance1).target(ClusterSpec.Id.from("c1"), deployment).generatedFrom(ge)
.routingMethod(RoutingMethod.sharedLayer4).on(Port.tls()).in(SystemName.main),
// Zone endpoint in public
"cafed00d.deadbeef.z.vespa-app.cloud",
- Endpoint.of(instance1).target(ClusterSpec.Id.from("c1"), deployment).generatedEndpoint(ge)
+ Endpoint.of(instance1).target(ClusterSpec.Id.from("c1"), deployment).generatedFrom(ge)
.routingMethod(RoutingMethod.exclusive).on(Port.tls()).in(SystemName.Public),
// Global endpoint in public
"foo.deadbeef.g.vespa-app.cloud",
Endpoint.of(instance1).target(EndpointId.of("foo"), ClusterSpec.Id.from("c1"), List.of(deployment))
- .generatedEndpoint(ge)
+ .generatedFrom(ge)
.routingMethod(RoutingMethod.exclusive).on(Port.tls()).in(SystemName.Public),
// Global endpoint in public, with default ID
"deadbeef.g.vespa-app.cloud",
Endpoint.of(instance1).target(EndpointId.defaultId(), ClusterSpec.Id.from("c1"), List.of(deployment))
- .generatedEndpoint(ge)
+ .generatedFrom(ge)
.routingMethod(RoutingMethod.exclusive).on(Port.tls()).in(SystemName.Public),
// Application endpoint in public
"bar.deadbeef.a.vespa-app.cloud",
Endpoint.of(TenantAndApplicationId.from(instance1)).targetApplication(EndpointId.of("bar"), deployment)
- .generatedEndpoint(ge)
+ .generatedFrom(ge)
.routingMethod(RoutingMethod.exclusive).on(Port.tls()).in(SystemName.Public)
);
tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.dnsName()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index f6ea43f9dd9..f6ed8fd7323 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -11,6 +11,7 @@ import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.RoutingMethod;
@@ -202,16 +203,21 @@ public class RoutingPoliciesTest {
context.flushDnsUpdates();
// Weight of inactive zone is set to zero
- tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, ImmutableMap.of(zone1, 1L,
- zone3, 1L,
- zone4, 0L));
+ ApplicationId application2 = context.instanceId();
+ EndpointId endpointId2 = EndpointId.of("r0");
+ Map<ZoneId, Long> zoneWeights1 = ImmutableMap.of(zone1, 1L,
+ zone3, 1L,
+ zone4, 0L);
+ tester.assertTargets(application2, endpointId2, ClusterSpec.Id.from("c0"), 0, zoneWeights1);
// Other zone in shared region is set out. Entire record group for the region is removed as all zones in the
// region are out (weight sum = 0)
tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone3), RoutingStatus.Value.out,
RoutingStatus.Agent.tenant);
context.flushDnsUpdates();
- tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, ImmutableMap.of(zone1, 1L));
+ ApplicationId application1 = context.instanceId();
+ EndpointId endpointId1 = EndpointId.of("r0");
+ tester.assertTargets(application1, endpointId1, ClusterSpec.Id.from("c0"), 0, ImmutableMap.of(zone1, 1L));
// Everything is set back in
tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone3), RoutingStatus.Value.in,
@@ -219,9 +225,12 @@ public class RoutingPoliciesTest {
tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone4), RoutingStatus.Value.in,
RoutingStatus.Agent.tenant);
context.flushDnsUpdates();
- tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, ImmutableMap.of(zone1, 1L,
- zone3, 1L,
- zone4, 1L));
+ ApplicationId application = context.instanceId();
+ EndpointId endpointId = EndpointId.of("r0");
+ Map<ZoneId, Long> zoneWeights = ImmutableMap.of(zone1, 1L,
+ zone3, 1L,
+ zone4, 1L);
+ tester.assertTargets(application, endpointId, ClusterSpec.Id.from("c0"), 0, zoneWeights);
}
@Test
@@ -1009,7 +1018,7 @@ public class RoutingPoliciesTest {
}
@Test
- public void generated_zone_endpoints() {
+ public void generated_endpoints() {
var tester = new RoutingPoliciesTester(SystemName.Public);
var context = tester.newDeploymentContext("tenant1", "app1", "default");
tester.controllerTester().flagSource().withBooleanFlag(Flags.RANDOMIZED_ENDPOINT_NAMES.id(), true);
@@ -1020,24 +1029,47 @@ public class RoutingPoliciesTest {
var zone1 = ZoneId.from("prod", "aws-us-east-1c");
var zone2 = ZoneId.from("prod", "aws-eu-west-1a");
ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
- .region(zone2.region())
- .build();
+ .region(zone2.region())
+ .endpoint("foo", "c0")
+ .applicationEndpoint("bar", "c0", Map.of(zone1.region().value(), Map.of(InstanceName.defaultName(), 1)))
+ .build();
tester.provisionLoadBalancers(clustersPerZone, context.instanceId(), zone1, zone2);
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
// Deployment creates generated zone names
List<String> expectedRecords = List.of(
- "a9c8c045.cafed00d.z.vespa-app.cloud",
+ // save me, jebus!
+ "b22ab332.cafed00d.z.vespa-app.cloud",
+ "bar.app1.tenant1.a.vespa-app.cloud",
+ "bar.cafed00d.a.vespa-app.cloud",
+ "c0.app1.tenant1.aws-eu-west-1.w.vespa-app.cloud",
"c0.app1.tenant1.aws-eu-west-1a.z.vespa-app.cloud",
+ "c0.app1.tenant1.aws-us-east-1.w.vespa-app.cloud",
"c0.app1.tenant1.aws-us-east-1c.z.vespa-app.cloud",
- "e144a11b.cafed00d.z.vespa-app.cloud"
+ "c0.cafed00d.aws-eu-west-1.w.vespa-app.cloud",
+ "c0.cafed00d.aws-us-east-1.w.vespa-app.cloud",
+ "dd0971b4.cafed00d.z.vespa-app.cloud",
+ "foo.app1.tenant1.g.vespa-app.cloud",
+ "foo.cafed00d.g.vespa-app.cloud"
);
assertEquals(expectedRecords, tester.recordNames());
assertEquals(2, tester.policiesOf(context.instanceId()).size());
for (var zone : List.of(zone1, zone2)) {
- EndpointList endpoints = tester.controllerTester().controller().routing().readEndpointsOf(context.deploymentIdIn(zone));
+ EndpointList endpoints = tester.controllerTester().controller().routing().readEndpointsOf(context.deploymentIdIn(zone)).scope(Endpoint.Scope.zone);
assertEquals(1, endpoints.generated().size());
}
+ // Ordinary endpoints point to expected targets
+ tester.assertTargets(context.instanceId(), EndpointId.of("foo"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(zone1, 1L, zone2, 1L));
+ tester.assertTargets(context.application().id(), EndpointId.of("bar"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(context.deploymentIdIn(zone1), 1));
+ // Generated endpoints point to expected targets
+ tester.assertTargets(context.instanceId(), EndpointId.of("foo"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(zone1, 1L, zone2, 1L),
+ true);
+ tester.assertTargets(context.application().id(), EndpointId.of("bar"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(context.deploymentIdIn(zone1), 1),
+ true);
// Next deployment does not change generated names
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
@@ -1191,15 +1223,25 @@ public class RoutingPoliciesTest {
tester.controllerTester().flagSource().withBooleanFlag(Flags.ENABLE_DATAPLANE_PROXY.id(), enabled);
}
- /** Assert that an application endpoint points to given targets and weights */
private void assertTargets(TenantAndApplicationId application, EndpointId endpointId, ClusterSpec.Id cluster,
int loadBalancerId, Map<DeploymentId, Integer> deploymentWeights) {
+ assertTargets(application, endpointId, cluster, loadBalancerId, deploymentWeights, false);
+ }
+
+ /** Assert that an application endpoint points to given targets and weights */
+ private void assertTargets(TenantAndApplicationId application, EndpointId endpointId, ClusterSpec.Id cluster,
+ int loadBalancerId, Map<DeploymentId, Integer> deploymentWeights, boolean generated) {
Map<String, List<DeploymentId>> deploymentsByDnsName = new HashMap<>();
for (var deployment : deploymentWeights.keySet()) {
EndpointList applicationEndpoints = tester.controller().routing().readDeclaredEndpointsOf(application)
.named(endpointId, Endpoint.Scope.application)
.targets(deployment)
.cluster(cluster);
+ if (generated) {
+ applicationEndpoints = applicationEndpoints.generated();
+ } else {
+ applicationEndpoints = applicationEndpoints.not().generated();
+ }
assertEquals(1,
applicationEndpoints.size(),
"Expected a single endpoint with ID '" + endpointId + "'");
@@ -1218,9 +1260,14 @@ public class RoutingPoliciesTest {
});
}
- /** Assert that an instance endpoint points to given targets and weights */
private void assertTargets(ApplicationId instance, EndpointId endpointId, ClusterSpec.Id cluster,
int loadBalancerId, Map<ZoneId, Long> zoneWeights) {
+ assertTargets(instance, endpointId, cluster, loadBalancerId, zoneWeights, false);
+ }
+
+ /** Assert that a global endpoint points to given zones and weights */
+ private void assertTargets(ApplicationId instance, EndpointId endpointId, ClusterSpec.Id cluster,
+ int loadBalancerId, Map<ZoneId, Long> zoneWeights, boolean generated) {
Set<String> latencyTargets = new HashSet<>();
Map<String, List<ZoneId>> zonesByRegionEndpoint = new HashMap<>();
for (var zone : zoneWeights.keySet()) {
@@ -1228,7 +1275,12 @@ public class RoutingPoliciesTest {
EndpointList regionEndpoints = tester.controller().routing().readEndpointsOf(deployment)
.cluster(cluster)
.scope(Endpoint.Scope.weighted);
- Endpoint regionEndpoint = regionEndpoints.first().orElseThrow(() -> new IllegalArgumentException("No region endpoint found for " + cluster + " in " + deployment));
+ if (generated) {
+ regionEndpoints = regionEndpoints.generated();
+ } else {
+ regionEndpoints = regionEndpoints.not().generated();
+ }
+ Endpoint regionEndpoint = regionEndpoints.first().orElseThrow(() -> new IllegalArgumentException("No" + (generated ? " generated" : "") + " region endpoint found for " + cluster + " in " + deployment));
zonesByRegionEndpoint.computeIfAbsent(regionEndpoint.dnsName(), (k) -> new ArrayList<>())
.add(zone);
}
@@ -1246,16 +1298,22 @@ public class RoutingPoliciesTest {
latencyTargets.add(latencyTarget);
});
List<DeploymentId> deployments = zoneWeights.keySet().stream().map(z -> new DeploymentId(instance, z)).toList();
- String globalEndpoint = tester.controller().routing().readDeclaredEndpointsOf(instance)
- .named(endpointId, Endpoint.Scope.global)
- .targets(deployments)
- .primary()
+ EndpointList global = tester.controller().routing().readDeclaredEndpointsOf(instance)
+ .named(endpointId, Endpoint.Scope.global)
+ .targets(deployments);
+ if (generated) {
+ global = global.generated();
+ } else {
+ global = global.not().generated();
+ }
+ String globalEndpoint = global.primary()
.map(Endpoint::dnsName)
.orElse("<none>");
assertEquals(latencyTargets, Set.copyOf(aliasDataOf(globalEndpoint)), "Global endpoint " + globalEndpoint + " points to expected latency targets");
}
+ /** Assert that a global endpoint points to given zones */
private void assertTargets(ApplicationId application, EndpointId endpointId, int loadBalancerId, ZoneId... zones) {
Map<ZoneId, Long> zoneWeights = new LinkedHashMap<>();
for (var zone : zones) {
@@ -1264,10 +1322,6 @@ public class RoutingPoliciesTest {
assertTargets(application, endpointId, ClusterSpec.Id.from("c" + loadBalancerId), loadBalancerId, zoneWeights);
}
- private void assertTargets(ApplicationId application, EndpointId endpointId, int loadBalancerId, Map<ZoneId, Long> zoneWeights) {
- assertTargets(application, endpointId, ClusterSpec.Id.from("c" + loadBalancerId), loadBalancerId, zoneWeights);
- }
-
}
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index ae281dc708f..c3788a20ddc 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -382,12 +382,6 @@ public class Flags {
"Takes effect at redeployment",
ZONE_ID, APPLICATION_ID);
- public static final UnboundBooleanFlag NEW_IDDOC_LAYOUT = defineFeatureFlag(
- "new_iddoc_layout", true, List.of("tokle", "bjorncs", "olaa"), "2023-04-24", "2023-12-30",
- "Whether to use new identity document layout",
- "Takes effect on node reboot",
- HOSTNAME, APPLICATION_ID, VESPA_VERSION);
-
public static final UnboundBooleanFlag RANDOMIZED_ENDPOINT_NAMES = defineFeatureFlag(
"randomized-endpoint-names", false, List.of("andreer"), "2023-04-26", "2023-07-30",
"Whether to use randomized endpoint names",
diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml
index cdc1eb76b6d..fa9bf05fad3 100644
--- a/jdisc_core/pom.xml
+++ b/jdisc_core/pom.xml
@@ -39,7 +39,6 @@
<!-- Newer version than the one in rt.jar, including the ElementTraversal class needed by Xerces (Aug 2015, still valid Sep 2017) -->
<groupId>xml-apis</groupId>
<artifactId>xml-apis</artifactId>
- <version>1.4.01</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
@@ -262,7 +261,6 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
- <version>3.0.0-M6</version>
<executions>
<execution>
<goals>
diff --git a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
index a2aade05059..4c33bbb563f 100644
--- a/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
+++ b/jdisc_core/src/test/java/com/yahoo/jdisc/core/ExportPackagesIT.java
@@ -1,5 +1,6 @@
package com.yahoo.jdisc.core;
+import com.google.common.collect.Sets;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -7,11 +8,13 @@ import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Paths;
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.HashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
-import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -49,6 +52,60 @@ public class ExportPackagesIT {
"javax.activation.jar"
).map(f -> JAR_PATH + f).toList();
+ private static final Pattern PACKAGE_PATTERN = Pattern.compile("([^;,]+);\\s*version=\"([^\"]*)\"(?:,\\s*([^;,]+);\\s*uses:=\"([^\"]*)\")?");
+
+ record PackageInfo(String packageName, String version, List<String> clauses) implements Comparable<PackageInfo> {
+
+ PackageInfo withoutVersion() {
+ return new PackageInfo(packageName, "", clauses);
+ }
+
+ @Override
+ public String toString() {
+ return packageName + ":" + version;
+ }
+
+ @Override
+ public int compareTo(PackageInfo o) {
+ int pkg = packageName.compareTo(o.packageName);
+ return (pkg != 0) ? pkg : version.compareTo(o.version);
+ }
+ }
+
+ record PackageSet(List<PackageInfo> packages) {
+ PackageSet removeJavaVersion() {
+ return new PackageSet(packages.stream()
+ .map(p -> p.version.contains(".JavaSE_") ? p.withoutVersion() : p)
+ .toList());
+ }
+
+ PackageSet removeNewPackageOnJava20() {
+ return new PackageSet(packages.stream()
+ .filter(p -> ! p.packageName.contains("java.lang.foreign"))
+ .filter(p -> ! p.packageName.contains("com.sun.jna"))
+ .toList());
+ }
+
+ boolean isEquivalentTo(PackageSet other) {
+ var thisPackages = new HashSet<>(removeJavaVersion().removeNewPackageOnJava20().packages);
+ var otherPackages = new HashSet<>(other.removeJavaVersion().removeNewPackageOnJava20().packages);
+ return thisPackages.equals(otherPackages);
+ }
+
+ PackageSet minus(PackageSet other) {
+ var thisPackages = new HashSet<>(removeJavaVersion().removeNewPackageOnJava20().packages);
+ var otherPackages = new HashSet<>(other.removeJavaVersion().removeNewPackageOnJava20().packages);
+ Set<PackageInfo> diff = Sets.difference(thisPackages, otherPackages);
+ return new PackageSet(diff.stream().sorted().toList());
+ }
+
+ @Override
+ public String toString() {
+ return packages.stream().map(PackageInfo::toString)
+ .collect(Collectors.joining(",\n ", " [", "]"));
+ }
+ }
+
@TempDir
public static File tempFolder;
@@ -62,60 +119,60 @@ public class ExportPackagesIT {
String expectedValue = expectedProperties.getProperty(ExportPackages.EXPORT_PACKAGES);
assertNotNull(expectedValue, "Missing exportPackages property in file.");
- Set<String> actualPackages = removeNewPackageOnJava20(removeJavaVersion(getPackages(actualValue)));
- Set<String> expectedPackages = removeNewPackageOnJava20(removeJavaVersion(getPackages(expectedValue)));
- if (!actualPackages.equals(expectedPackages)) {
+ var expectedPackages = parsePackages(expectedValue).removeJavaVersion();
+ var actualPackages = parsePackages(actualValue).removeJavaVersion()
+ .removeNewPackageOnJava20();
+
+ if (!actualPackages.isEquivalentTo(expectedPackages)) {
StringBuilder message = getDiff(actualPackages, expectedPackages);
message.append("\n\nIf this test fails due to an intentional change in exported packages, run the following command:\n")
.append("$ cp jdisc_core/target/classes/exportPackages.properties jdisc_core/src/test/resources/")
.append("\n\nNote that removing exported packages usually requires a new major version of Vespa.\n");
fail(message.toString());
}
+ // TODO: check that actualValue equals expectedValue. Problem is that exportPackages.properties is not deterministic.
}
- private static Set<String> removeJavaVersion(Set<String> packages) {
- return packages.stream().map(p -> p.replaceAll(".JavaSE_\\d+", "")).collect(Collectors.toSet());
- }
-
- private static Set<String> removeNewPackageOnJava20(Set<String> packages) {
- return packages.stream()
- .filter(p -> ! p.contains("java.lang.foreign"))
- .filter(p -> ! p.contains("com.sun.jna"))
- .collect(Collectors.toSet());
- }
-
- private static StringBuilder getDiff(Set<String> actual, Set<String> expected) {
+ private static StringBuilder getDiff(PackageSet actual, PackageSet expected) {
StringBuilder sb = new StringBuilder();
- Set<String> onlyInActual = onlyInSet1(actual, expected);
- if (! onlyInActual.isEmpty()) {
+
+ var onlyInActual = actual.minus(expected);
+ if (! onlyInActual.packages().isEmpty()) {
sb.append("\nexportPackages.properties contained ")
- .append(onlyInActual.size())
+ .append(onlyInActual.packages.size())
.append(" unexpected packages:\n")
- .append(onlyInActual.stream().collect(Collectors.joining(",\n ", " [", "]")));
+ .append(onlyInActual);
}
- Set<String> onlyInExpected = onlyInSet1(expected, actual);
- if (! onlyInExpected.isEmpty()) {
+ var onlyInExpected = expected.minus(actual);
+ if (! onlyInExpected.packages.isEmpty()) {
sb.append("\nexportPackages.properties did not contain ")
- .append(onlyInExpected.size())
+ .append(onlyInExpected.packages.size())
.append(" expected packages:\n")
- .append(onlyInExpected.stream().collect(Collectors.joining(",\n ", " [", "]")));
+ .append(onlyInExpected);
}
return sb;
}
- // Returns a sorted set for readability.
- private static Set<String> onlyInSet1(Set<String> set1, Set<String> set2) {
- return set1.stream()
- .filter(s -> ! set2.contains(s))
- .collect(Collectors.toCollection(TreeSet::new));
- }
+ public static PackageSet parsePackages(String input) {
+ List<PackageInfo> packages = new ArrayList<>();
- private static Set<String> getPackages(String propertyValue) {
- return Arrays.stream(propertyValue.split(","))
- .map(String::trim)
- .filter(s -> ! s.isEmpty())
- .collect(Collectors.toSet());
+ Matcher matcher = PACKAGE_PATTERN.matcher(input);
+ while (matcher.find()) {
+ String packageName = matcher.group(1);
+ String version = matcher.group(2);
+ String dependencyPackage = matcher.group(3);
+ String dependencyClause = matcher.group(4);
+
+ List<String> clauses = new ArrayList<>();
+ if (dependencyPackage != null && dependencyClause != null) {
+ clauses.add(dependencyPackage + ";" + dependencyClause);
+ }
+
+ PackageInfo packageInfo = new PackageInfo(packageName, version, clauses);
+ packages.add(packageInfo);
+ }
+ return new PackageSet(packages);
}
private static Properties getPropertiesFromFile(File file) throws IOException {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
index 1d3fcb5fbf8..b6ec0ebbd94 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
@@ -43,7 +43,6 @@ import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.StandardCopyOption;
import java.security.KeyPair;
import java.security.PrivateKey;
import java.security.cert.X509Certificate;
@@ -76,7 +75,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
private static final String CONTAINER_SIA_DIRECTORY = "/var/lib/sia";
private static final String LEGACY_SIA_DIRECTORY = "/opt/vespa/var/vespa/sia";
- private final URI ztsEndpoint;
private final Path ztsTrustStorePath;
private final Timer timer;
private final String certificateDnsSuffix;
@@ -87,14 +85,12 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
// Used as an optimization to ensure ZTS is not DDoS'ed on continuously failing refresh attempts
private final Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>();
- public AthenzCredentialsMaintainer(URI ztsEndpoint,
- Path ztsTrustStorePath,
+ public AthenzCredentialsMaintainer(Path ztsTrustStorePath,
ConfigServerInfo configServerInfo,
String certificateDnsSuffix,
ServiceIdentityProvider hostIdentityProvider,
FlagSource flagSource,
Timer timer) {
- this.ztsEndpoint = ztsEndpoint;
this.ztsTrustStorePath = ztsTrustStorePath;
this.certificateDnsSuffix = certificateDnsSuffix;
this.hostIdentityProvider = hostIdentityProvider;
@@ -231,14 +227,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
var keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA);
var athenzRole = AthenzRole.fromResourceNameString(role);
- var containerIdentitySslContext = new SslContextBuilder()
- .withKeyStore(privateKeyFile, certificateFile)
- .withTrustStore(ztsTrustStorePath)
- .build();
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(identityDocument))
- .withSslContext(containerIdentitySslContext)
- .withHostnameVerifier(ztsHostNameVerifier)
- .build()) {
+ try (ZtsClient ztsClient = ztsClient(identityDocument.ztsUrl(), privateKeyFile, certificateFile, ztsHostNameVerifier)) {
var csrGenerator = new CsrGenerator(certificateDnsSuffix, identityDocument.providerService().getFullName());
var csr = csrGenerator.generateRoleCsr(
identity, athenzRole, identityDocument.providerUniqueId(), identityDocument.clusterType(), keyPair);
@@ -318,7 +307,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
// Allow all zts hosts while removing SIS
HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true;
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withIdentityProvider(hostIdentityProvider).withHostnameVerifier(ztsHostNameVerifier).build()) {
+ try (ZtsClient ztsClient = ztsClient(doc.ztsUrl(), hostIdentityProvider.privateKeyPath(), hostIdentityProvider.certificatePath(), ztsHostNameVerifier)) {
InstanceIdentity instanceIdentity =
ztsClient.registerInstance(
doc.providerService(),
@@ -331,15 +320,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
}
}
- /**
- * Return zts url from identity document, fallback to ztsEndpoint
- */
- private URI ztsEndpoint(IdentityDocument doc) {
- return Optional.ofNullable(doc.ztsUrl())
- .filter(s -> !s.isBlank())
- .map(URI::create)
- .orElse(ztsEndpoint);
- }
private void refreshIdentity(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile,
ContainerPath identityDocumentFile, IdentityDocument doc, IdentityType identityType, AthenzIdentity identity) {
KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA);
@@ -347,14 +327,10 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
Pkcs10Csr csr = csrGenerator.generateInstanceCsr(
identity, doc.providerUniqueId(), doc.ipAddresses(), doc.clusterType(), keyPair);
- SSLContext containerIdentitySslContext = new SslContextBuilder().withKeyStore(privateKeyFile, certificateFile)
- .withTrustStore(ztsTrustStorePath)
- .build();
-
try {
// Allow all zts hosts while removing SIS
HostnameVerifier ztsHostNameVerifier = (hostname, sslSession) -> true;
- try (ZtsClient ztsClient = new DefaultZtsClient.Builder(ztsEndpoint(doc)).withSslContext(containerIdentitySslContext).withHostnameVerifier(ztsHostNameVerifier).build()) {
+ try (ZtsClient ztsClient = ztsClient(doc.ztsUrl(), privateKeyFile, certificateFile, ztsHostNameVerifier)) {
InstanceIdentity instanceIdentity =
ztsClient.refreshInstance(
doc.providerService(),
@@ -439,17 +415,26 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
var certsDirectory = legacySiaDirectory.resolve("certs");
Files.createDirectories(keysDirectory);
Files.createDirectories(certsDirectory);
- writeFile(certsDirectory.resolve(certificateFile.getFileName()), new String(Files.readAllBytes(certificateFile)));
- writeFile(keysDirectory.resolve(privateKeyFile.getFileName()), new String(Files.readAllBytes(privateKeyFile)));
+ writeFile(certsDirectory.resolve(certificateFile.getFileName()), Files.readString(certificateFile));
+ writeFile(keysDirectory.resolve(privateKeyFile.getFileName()), Files.readString(privateKeyFile));
}
- /*
- Get the document version to ask for
- */
+ /** Get the document version to ask for */
private int documentVersion(NodeAgentContext context) {
return SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION;
}
+ private ZtsClient ztsClient(URI ztsEndpoint, Path privateKeyFile, Path certificateFile, HostnameVerifier hostnameVerifier) {
+ SSLContext sslContext = new SslContextBuilder()
+ .withKeyStore(privateKeyFile, certificateFile)
+ .withTrustStore(ztsTrustStorePath)
+ .build();
+ return new DefaultZtsClient.Builder(ztsEndpoint)
+ .withSslContext(sslContext)
+ .withHostnameVerifier(hostnameVerifier)
+ .build();
+ }
+
private List<String> getRoleList(NodeAgentContext context) {
try {
return identityDocumentClient.getNodeRoles(context.hostname().value());
@@ -463,7 +448,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
NODE("vespa-node-identity-document.json"),
TENANT("vespa-tenant-identity-document.json");
- private String identityDocument;
+ private final String identityDocument;
IdentityType(String identityDocument) {
this.identityDocument = identityDocument;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 9da66413b9c..f3d69fdf103 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -205,7 +205,7 @@ public class NodeRepository extends AbstractComponent {
*/
public boolean exclusiveAllocation(ClusterSpec clusterSpec) {
return clusterSpec.isExclusive() ||
- ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
+ ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name()));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
index a2ef76e84d0..40d1d50e0e8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
@@ -195,6 +195,7 @@ public class AllocatableClusterResources {
else { // Return the cheapest flavor satisfying the requested resources, if any
NodeResources cappedWantedResources = applicationLimits.cap(wantedResources.nodeResources());
Optional<AllocatableClusterResources> best = Optional.empty();
+ Optional<AllocatableClusterResources> bestDisregardingDiskLimit = Optional.empty();
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
@@ -202,7 +203,9 @@ public class AllocatableClusterResources {
// Adjust where we don't need exact match to the flavor
if (flavor.resources().storageType() == NodeResources.StorageType.remote) {
- double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive).diskGb();
+ double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, true).diskGb();
+ if (diskGb > applicationLimits.max().nodeResources().diskGb() || diskGb < applicationLimits.min().nodeResources().diskGb()) // TODO: Remove when disk limit is enforced
+ diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, false).diskGb();
advertisedResources = advertisedResources.withDiskGb(diskGb);
realResources = realResources.withDiskGb(diskGb);
}
@@ -213,14 +216,24 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
+
var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
advertisedResources,
wantedResources,
clusterSpec);
+
+ if ( ! systemLimits.isWithinAdvertisedDiskLimits(advertisedResources, clusterSpec)) { // TODO: Remove when disk limit is enforced
+ if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get())) {
+ bestDisregardingDiskLimit = Optional.of(candidate);
+ }
+ continue;
+ }
if (best.isEmpty() || candidate.preferableTo(best.get())) {
best = Optional.of(candidate);
}
}
+ if (best.isEmpty())
+ best = bestDisregardingDiskLimit;
return best;
}
}
@@ -234,7 +247,7 @@ public class AllocatableClusterResources {
boolean bestCase) {
var systemLimits = new NodeResourceLimits(nodeRepository);
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive, bestCase);
- advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive); // Ask for something legal
+ advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase); // What we'll really get
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index b56e8d1b247..2287b768dee 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.Optional;
@@ -63,9 +64,8 @@ public class AllocationOptimizer {
availableRealHostResources,
nodeRepository);
if (allocatableResources.isEmpty()) continue;
- if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get())) {
+ if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
bestAllocation = allocatableResources;
- }
}
}
return bestAllocation;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index bfd06d744f6..8a39f309935 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -9,11 +9,11 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+
import java.util.Map;
import java.util.TreeMap;
@@ -115,10 +115,6 @@ public class CapacityPolicies {
return versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())).with(architecture);
}
- if (zone.environment() == Environment.dev && zone.system() == SystemName.cd) {
- return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 4, 50, 0.3)));
- }
-
if (clusterSpec.type() == ClusterSpec.Type.content) {
return zone.cloud().dynamicProvisioning()
? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index ffd2805bcff..ad91bdef478 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -81,11 +81,10 @@ public class NodeRepositoryProvisioner implements Provisioner {
* The nodes are ordered by increasing index number.
*/
@Override
- public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
- ProvisionLogger logger) {
+ public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
log.log(Level.FINE, "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
- validate(application, cluster, requested);
+ validate(application, cluster, requested, logger);
int groups;
NodeResources resources;
@@ -113,7 +112,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
requireCompatibleResources(resources, cluster));
}
- private void validate(ApplicationId application, ClusterSpec cluster, Capacity requested) {
+ private void validate(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
nodeResourceLimits.ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), application, cluster);
@@ -121,6 +120,18 @@ public class NodeRepositoryProvisioner implements Provisioner {
if ( ! requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: Gpu capacity cannot have ranges");
+
+ logInsufficientDiskResources(cluster, requested, logger);
+ }
+
+ private void logInsufficientDiskResources(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
+ var resources = requested.minResources().nodeResources();
+ if ( ! nodeResourceLimits.isWithinAdvertisedDiskLimits(resources, cluster)) {
+ logger.logApplicationPackage(Level.WARNING, "Requested disk (" + resources.diskGb() +
+ "Gb) in " + cluster.id() + " is not large enough to fit " +
+ "core/heap dumps. Minimum recommended disk resources " +
+ "is 2x memory for containers and 3x memory for content");
+ }
}
private NodeResources getNodeResources(ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 9ded1a2735c..8c5a7b6c61e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -2,14 +2,17 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.util.Locale;
+import java.util.logging.Level;
/**
* Defines the resource limits for nodes in various zones
@@ -35,6 +38,12 @@ public class NodeResourceLimits {
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
+ // TODO: Remove this when we are ready to fail, not just warn on this. */
+ public boolean isWithinAdvertisedDiskLimits(NodeResources requested, ClusterSpec cluster) {
+ if (requested.diskGbIsUnspecified() || requested.memoryGbIsUnspecified()) return true;
+ return requested.diskGb() >= minAdvertisedDiskGb(requested, cluster);
+ }
+
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
public boolean isWithinRealLimits(NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true; // Resource limits only apply to tenant nodes
@@ -52,9 +61,12 @@ public class NodeResourceLimits {
return true;
}
- public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive) {
+ public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive, boolean followRecommendations) {
if (requested.isUnspecified()) return requested;
+ if (followRecommendations) // TODO: Do unconditionally when we enforce this limit
+ requested = requested.withDiskGb(Math.max(minAdvertisedDiskGb(requested, cluster), requested.diskGb()));
+
return requested.withVcpu(Math.max(minAdvertisedVcpu(applicationId, cluster), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(cluster), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
@@ -78,6 +90,15 @@ public class NodeResourceLimits {
return minRealDiskGb() + reservedDiskSpaceGb(requested.storageType(), exclusive);
}
+ // TODO: Move this check into the above when we are ready to fail, not just warn on this. */
+ private double minAdvertisedDiskGb(NodeResources requested, ClusterSpec cluster) {
+ return requested.memoryGb() * switch (cluster.type()) {
+ case combined, content -> 3;
+ case container -> 2;
+ default -> 0; // No constraint on other types
+ };
+ }
+
// Note: Assumes node type 'host'
private long reservedDiskSpaceGb(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && ! zone().cloud().allowHostSharing())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index b6c7324c75c..42b9e53dd8a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.yolean.Exceptions;
import java.util.ArrayList;
import java.util.List;
@@ -39,9 +40,10 @@ class Preparer {
return nodes;
}
catch (NodeAllocationException e) {
+ e.printStackTrace();
throw new NodeAllocationException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
- " in " + application + " " + cluster + ": " + e.getMessage(),
+ " in " + application + " " + cluster + ": " + Exceptions.toMessageString(e),
e.retryable());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java
new file mode 100644
index 00000000000..65abcbef698
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/InMemoryProvisionLogger.java
@@ -0,0 +1,35 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.testutils;
+
+import com.yahoo.config.provision.ProvisionLogger;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Level;
+
+/**
+ * A logger which remembers all messages logged in addition to writing them to standard out.
+ *
+ * @author bratseth
+ */
+public class InMemoryProvisionLogger implements ProvisionLogger {
+
+ private final List<String> systemLog = new ArrayList<>();
+ private final List<String> applicationLog = new ArrayList<>();
+
+ @Override
+ public void log(Level level, String message) {
+ System.out.println("ProvisionLogger system " + level + ": " + message);
+ systemLog.add(level + ": " + message);
+ }
+
+ @Override
+ public void logApplicationPackage(Level level, String message) {
+ System.out.println("ProvisionLogger application " + level + ": " + message);
+ applicationLog.add(level + ": " + message);
+ }
+
+ public List<String> systemLog() { return systemLog; }
+ public List<String> applicationLog() { return applicationLog; }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
index dcde521bfda..3ed01e00ee6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
@@ -261,7 +261,7 @@ public class MockDeployer implements Deployer {
public ClusterSpec cluster() { return cluster; }
private List<HostSpec> prepare(NodeRepositoryProvisioner provisioner) {
- return provisioner.prepare(id, cluster, capacity, null);
+ return provisioner.prepare(id, cluster, capacity, new InMemoryProvisionLogger());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 64c5dff0718..bd31c7578b9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -57,11 +57,15 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_single_content_group() {
- var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
+ var now = new ClusterResources(5, 1, new NodeResources(2, 16, 750, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .build();
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
- 8, 1, 4.0, 9.3, 36.2,
+ 9, 1, 3.6, 8.5, 360.9,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
@@ -83,7 +87,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 7, 1, 1.1, 8.7, 25.4,
+ 8, 1, 1.0, 8.3, 338.4,
fixture.autoscale());
}
@@ -210,7 +214,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 4, 16.0, 25.5,
+ 9, 1, 4, 16.0, 150,
fixture.autoscale());
}
@@ -227,9 +231,9 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_without_traffic_exclusive() {
- var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 10, 0.3));
- var now = new ClusterResources(4, 1, new NodeResources(8, 16, 10, 0.3));
- var max = new ClusterResources(4, 1, new NodeResources(16, 32, 50, 0.3));
+ var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 100, 0.3));
+ var now = new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3));
+ var max = new ClusterResources(4, 1, new NodeResources(16, 32, 500, 0.3));
var fixture = DynamicProvisioningTester.fixture(min, now, max)
.clusterType(ClusterSpec.Type.container)
.awsProdSetup(false)
@@ -238,7 +242,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(duration.negated());
fixture.loader().zeroTraffic(20, 1);
fixture.tester().assertResources("Scaled down",
- 2, 1, 2, 16, 10,
+ 2, 1, 2, 16, 100,
fixture.autoscale());
}
@@ -256,7 +260,7 @@ public class AutoscalingTest {
fixture.completeLastScaling();
fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
- 3, 1, 2, 16, 27.2,
+ 3, 1, 2, 16, 75.0,
fixture.autoscale());
}
@@ -283,7 +287,7 @@ public class AutoscalingTest {
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
- 13, 1, 1.5, 29.1, 26.7,
+ 13, 1, 1.5, 29.1, 87.3,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
@@ -312,7 +316,6 @@ public class AutoscalingTest {
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyCpuLoad(0.8, 120);
- System.out.println("Autoscaling ----------");
assertEquals(DiskSpeed.any, fixture.autoscale(capacity).resources().get().nodeResources().diskSpeed());
}
@@ -384,15 +387,15 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 6, 6, 5.0, 7.4, 10.0,
+ 6, 6, 5.0, 7.4, 22.3,
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_size_limit() {
- var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 10, 1));
- var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
+ var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
+ var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
@@ -401,7 +404,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 8, 4, 4.6, 4.2, 10.0,
+ 12, 6, 2.8, 4.2, 27.5,
fixture.autoscale());
}
@@ -457,7 +460,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 13, 1, 4, 8, 13.6,
+ 13, 1, 4, 8, 100.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -480,9 +483,61 @@ public class AutoscalingTest {
}
@Test
+ public void too_small_disk_compared_to_memory() {
+ var resources = new ClusterResources(2, 1, new NodeResources(1, 10, 19, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(resources))
+ .build();
+ assertEquals(2, fixture.tester().provisionLogger().applicationLog().size()); // tester deploys twice
+ assertEquals("WARNING: Requested disk (19.0Gb) in cluster 'cluster1' is not large enough to fit core/heap dumps. Minimum recommended disk resources is 2x memory for containers and 3x memory for content",
+ fixture.tester().provisionLogger().applicationLog().get(0));
+ }
+
+ @Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1));
+ var now = new ClusterResources(10, 1, new NodeResources(5, 50, 150, 1));
+ var max = new ClusterResources(10, 1, new NodeResources(10, 100, 200, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 11, 1, 13.0, 60.0, 179.9,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 10.0, 66.2, 198.6,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory_exclusive() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1, DiskSpeed.any, StorageType.remote));
+ var now = new ClusterResources(10, 1, new NodeResources(16, 64, 192, 1, DiskSpeed.any, StorageType.remote));
+ var max = new ClusterResources(10, 1, new NodeResources(30, 200, 500, 1, DiskSpeed.any, StorageType.remote));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(false)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 13, 1, 36.0, 72.0, 900.0,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 16.0, 64, 247.5,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
public void test_autoscaling_group_size_unconstrained() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3, 100, 100, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3, 100, 300, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
@@ -492,7 +547,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 10, 5, 7.7, 41.5, 38.5,
+ 10, 5, 7.7, 41.5, 124.6,
fixture.autoscale());
}
@@ -509,7 +564,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 7, 7, 9.4, 78.6, 77.0,
+ 7, 7, 9.4, 78.6, 235.8,
fixture.autoscale());
}
@@ -528,7 +583,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 7, 1, 3.2, 43.3, 40.1,
+ 7, 1, 3.2, 43.3, 129.8,
fixture.autoscale());
}
@@ -548,7 +603,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 5, 1, 1.0, 62.6, 60.1,
+ 5, 1, 1.0, 62.6, 187.7,
fixture.autoscale());
}
@@ -565,7 +620,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
- 8, 2, 13.9, 96.3, 60.1,
+ 8, 2, 13.9, 96.3, 288.8,
fixture.autoscale());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index 4d2816cb14f..00ae9ac5a9d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -30,6 +30,7 @@ import com.yahoo.vespa.hosted.provision.testutils.MockDeployer;
import com.yahoo.vespa.hosted.provision.testutils.ServiceMonitorStub;
import com.yahoo.vespa.service.duper.InfraApplication;
import com.yahoo.vespa.service.duper.TenantHostApplication;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import java.time.Clock;
import java.time.Duration;
@@ -270,7 +271,7 @@ public class NodeFailTester {
}
public void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
- List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, null);
+ List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, new InMemoryProvisionLogger());
try (var lock = provisioner.lock(applicationId)) {
NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 1b677224295..8aaf0eb20e7 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -75,7 +75,7 @@ public class ScalingSuggestionsMaintainerTest {
assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 14.2 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index e1e83ad2fb3..c99728b714b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.Architecture;
@@ -316,13 +317,6 @@ public class DynamicProvisioningTest {
tester.assertNodes("Allocation specifies memory in the advertised amount",
2, 1, 2, 20, 40,
app1, cluster1);
-
- // Redeploy the same
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40),
- resources(4, 1, 2, 20, 40)));
- tester.assertNodes("Allocation specifies memory in the advertised amount",
- 2, 1, 2, 20, 40,
- app1, cluster1);
}
@Test
@@ -505,7 +499,7 @@ public class DynamicProvisioningTest {
}
@Test
- public void gpu_host() {
+ public void gpu_host() {
List<Flavor> flavors = List.of(new Flavor("gpu", new NodeResources(4, 16, 125, 10, fast, local,
Architecture.x86_64, new NodeResources.GpuResources(1, 16))));
ProvisioningTester tester = new ProvisioningTester.Builder().dynamicProvisioning(true, false)
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
index b043a1cfb0f..de9b3a4db33 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
@@ -24,9 +24,7 @@ import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.Fixture;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
-import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
-import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import java.time.Duration;
import java.util.ArrayList;
@@ -44,7 +42,7 @@ import static org.junit.Assert.assertTrue;
* - Supports autoscaling testing.
*
* TODO: All provisioning testing should migrate to use this, and then the provisionging tester should be collapsed
- * into this.
+ * into this. ... or we should just use autoscalingtester for everything.
*
* @author bratseth
*/
@@ -89,6 +87,8 @@ public class DynamicProvisioningTester {
return flavors;
}
+ public InMemoryProvisionLogger provisionLogger() { return provisioningTester.provisionLogger(); }
+
public static Fixture.Builder fixture() { return new Fixture.Builder(); }
public static Fixture.Builder fixture(ClusterResources min, ClusterResources now, ClusterResources max) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 477101e10e2..a76b576e430 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -498,7 +498,7 @@ public class ProvisioningTest {
@Test
public void test_changing_limits() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.flavors(List.of(hostFlavor))
.build();
@@ -508,52 +508,52 @@ public class ProvisioningTest {
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(8, 4, 4, 20, 40)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(8, 4, 4, 20, 400)));
tester.assertNodes("Initial allocation at min",
- 4, 2, 2, 10, 20,
+ 4, 2, 2, 10, 200,
app1, cluster1);
// Move window above current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 40),
- resources(10, 5, 5, 25, 50)));
+ tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 400),
+ resources(10, 5, 5, 25, 500)));
tester.assertNodes("New allocation at new min",
- 8, 4, 4, 21, 40,
+ 8, 4, 4, 21, 400,
app1, cluster1);
// Move window below current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(6, 3, 3, 15, 25)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(6, 3, 3, 15, 250)));
tester.assertNodes("Allocation preserving resources within new limits",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Widening window does not change allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 15),
- resources(8, 4, 4, 21, 30)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 150),
+ resources(8, 4, 4, 21, 300)));
tester.assertNodes("Same allocation",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Changing limits in opposite directions cause a mixture of min and max
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 10),
- resources(4, 2, 14, 40, 13)));
+ tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 100),
+ resources(4, 2, 14, 40, 130)));
tester.assertNodes("A mix of min and max",
- 4, 1, 10, 30, 13,
+ 4, 1, 10, 30, 130,
app1, cluster1);
// Changing group size
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 10),
- resources(9, 3, 12, 35, 15)));
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 100),
+ resources(9, 3, 12, 35, 150)));
tester.assertNodes("Groups changed",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
// Stop specifying node resources
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(6, 3, NodeResources.unspecified()),
new ClusterResources(9, 3, NodeResources.unspecified())));
tester.assertNodes("No change",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 7e85131eaf4..a3a90d58c2c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -47,6 +47,7 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
+import com.yahoo.vespa.hosted.provision.testutils.InMemoryProvisionLogger;
import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver;
import com.yahoo.vespa.hosted.provision.testutils.MockProvisionServiceProvider;
import com.yahoo.vespa.hosted.provision.testutils.OrchestratorMock;
@@ -93,7 +94,7 @@ public class ProvisioningTester {
private final HostProvisioner hostProvisioner;
private final NodeRepositoryProvisioner provisioner;
private final CapacityPolicies capacityPolicies;
- private final ProvisionLogger provisionLogger;
+ private final InMemoryProvisionLogger provisionLogger;
private final LoadBalancerServiceMock loadBalancerService;
private int nextHost = 0;
@@ -132,7 +133,7 @@ public class ProvisioningTester {
1000);
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, zone, provisionServiceProvider);
this.capacityPolicies = new CapacityPolicies(nodeRepository);
- this.provisionLogger = new NullProvisionLogger();
+ this.provisionLogger = new InMemoryProvisionLogger();
this.loadBalancerService = loadBalancerService;
}
@@ -162,6 +163,7 @@ public class ProvisioningTester {
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); }
+ public InMemoryProvisionLogger provisionLogger() { return provisionLogger; }
public Node node(String hostname) { return nodeRepository.nodes().node(hostname).get(); }
public int decideSize(Capacity capacity, ApplicationId application) {
@@ -773,10 +775,6 @@ public class ProvisioningTester {
}
- private static class NullProvisionLogger implements ProvisionLogger {
- @Override public void log(Level level, String message) { }
- }
-
static class MockResourcesCalculator implements HostResourcesCalculator {
private final int memoryTaxGb;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 62f42b0d035..a6a988052e6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -513,18 +513,18 @@ public class VirtualNodeProvisioningTest {
2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources( 5, 6, 11, 1);
- var newMaxResources = new NodeResources(20, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 6, 18, 1);
+ var newMaxResources = new NodeResources(20, 10, 90, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total (redundancy adjusted) resources",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
}
diff --git a/parent/pom.xml b/parent/pom.xml
index 62ab23614be..6b855ce637f 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -1157,7 +1157,7 @@
<junit.version>5.8.1</junit.version>
<maven-archiver.version>3.6.0</maven-archiver.version>
<maven-assembly-plugin.version>3.3.0</maven-assembly-plugin.version>
- <maven-bundle-plugin.version>5.1.2</maven-bundle-plugin.version>
+ <maven-bundle-plugin.version>5.1.9</maven-bundle-plugin.version>
<maven-compiler-plugin.version>3.10.1</maven-compiler-plugin.version>
<maven-core.version>3.8.7</maven-core.version>
<maven-dependency-plugin.version>3.6.0</maven-dependency-plugin.version> <!-- NOTE: When upgrading, also update explicit versions in tenant base poms! -->
diff --git a/renovate.json b/renovate.json
new file mode 100644
index 00000000000..3c8eace23da
--- /dev/null
+++ b/renovate.json
@@ -0,0 +1,20 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:base"
+ ],
+ "transitiveRemediation": true,
+ "prHourlyLimit": 10,
+ "prConcurrentLimit": 10,
+ "ignorePaths": [],
+ "ignoreDeps": [
+ "com.github.spotbugs:spotbugs-annotations",
+ "com.yahoo.vespa.bundle-plugin:test-bundles",
+ "com.yahoo.vespa.jdisc_core:test_bundles",
+ "com.yahoo.vespa:cloud-tenant-base",
+ "com.yahoo.vespa:parent",
+ "com.yahoo.vespa:zookeeper-server-parent",
+ "github.com/go-json-experiment/json",
+ "javax.servlet:javax.servlet-api"
+ ]
+}
diff --git a/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp b/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
index 6278b216b52..75c49c3a003 100644
--- a/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
+++ b/searchlib/src/apps/vespa-index-inspect/vespa-index-inspect.cpp
@@ -151,12 +151,10 @@ FieldOptions::~FieldOptions() = default;
void
FieldOptions::validateFields(const Schema &schema)
{
- for (std::vector<vespalib::string>::const_iterator
- i = _fields.begin(), ie = _fields.end();
- i != ie; ++i) {
- uint32_t fieldId = schema.getIndexFieldId(*i);
+ for (const auto& field : _fields) {
+ uint32_t fieldId = schema.getIndexFieldId(field);
if (fieldId == Schema::UNKNOWN_FIELD_ID) {
- LOG(error, "No such field: %s", i->c_str());
+ LOG(error, "No such field: %s", field.c_str());
std::_Exit(1);
}
_ids.push_back(fieldId);
@@ -399,10 +397,8 @@ ShowPostingListSubApp::readWordList(const Schema &schema)
_wmv.resize(numFields);
if (!_fieldOptions.empty()) {
- for (std::vector<uint32_t>::const_iterator
- i = _fieldOptions._ids.begin(), ie = _fieldOptions._ids.end();
- i != ie; ++i) {
- SchemaUtil::IndexIterator index(schema, *i);
+ for (auto id : _fieldOptions._ids) {
+ SchemaUtil::IndexIterator index(schema, id);
if (!readWordList(index))
return false;
}
@@ -462,10 +458,8 @@ ShowPostingListSubApp::showTransposedPostingList()
return;
std::vector<PosEntry> entries;
if (!_fieldOptions.empty()) {
- for (std::vector<uint32_t>::const_iterator
- i = _fieldOptions._ids.begin(), ie = _fieldOptions._ids.end();
- i != ie; ++i) {
- SchemaUtil::IndexIterator index(schema, *i);
+ for (auto id : _fieldOptions._ids) {
+ SchemaUtil::IndexIterator index(schema, id);
readPostings(index, entries);
}
} else {
@@ -481,35 +475,34 @@ ShowPostingListSubApp::showTransposedPostingList()
uint32_t prevElemId = static_cast<uint32_t>(-1);
uint32_t prevElementLen = 0;
int32_t prevElementWeight = 0;
- for (std::vector<PosEntry>::const_iterator
- i = entries.begin(), ie = entries.end(); i != ie; ++i) {
- if (i->_docId != prevDocId) {
- std::cout << "docId = " << i->_docId << '\n';
- prevDocId = i->_docId;
+ for (const auto& entry : entries) {
+ if (entry._docId != prevDocId) {
+ std::cout << "docId = " << entry._docId << '\n';
+ prevDocId = entry._docId;
prevFieldId = static_cast<uint32_t>(-1);
}
- if (i->_fieldId != prevFieldId) {
- std::cout << " field = " << i->_fieldId <<
- " \"" << schema.getIndexField(i->_fieldId).getName() <<
+ if (entry._fieldId != prevFieldId) {
+ std::cout << " field = " << entry._fieldId <<
+ " \"" << schema.getIndexField(entry._fieldId).getName() <<
"\"\n";
- prevFieldId = i->_fieldId;
+ prevFieldId = entry._fieldId;
prevElemId = static_cast<uint32_t>(-1);
}
- if (i->_elementId != prevElemId ||
- i->_elementLen != prevElementLen ||
- i->_elementWeight != prevElementWeight) {
- std::cout << " element = " << i->_elementId <<
- ", elementLen = " << i->_elementLen <<
- ", elementWeight = " << i->_elementWeight <<
+ if (entry._elementId != prevElemId ||
+ entry._elementLen != prevElementLen ||
+ entry._elementWeight != prevElementWeight) {
+ std::cout << " element = " << entry._elementId <<
+ ", elementLen = " << entry._elementLen <<
+ ", elementWeight = " << entry._elementWeight <<
'\n';
- prevElemId = i->_elementId;
- prevElementLen = i->_elementLen;
- prevElementWeight = i->_elementWeight;
+ prevElemId = entry._elementId;
+ prevElementLen = entry._elementLen;
+ prevElementWeight = entry._elementWeight;
}
- assert(i->_wordNum != 0);
- assert(i->_wordNum < _wordsv[i->_fieldId].size());
- std::cout << " pos = " << i->_wordPos <<
- ", word = \"" << _wordsv[i->_fieldId][i->_wordNum] << "\"";
+ assert(entry._wordNum != 0);
+ assert(entry._wordNum < _wordsv[entry._fieldId].size());
+ std::cout << " pos = " << entry._wordPos <<
+ ", word = \"" << _wordsv[entry._fieldId][entry._wordNum] << "\"";
std::cout << '\n';
}
}
@@ -588,13 +581,10 @@ ShowPostingListSubApp::showPostingList()
handle->second);
std::vector<TermFieldMatchData> tfmdv(numFields);
TermFieldMatchDataArray tfmda;
- for (std::vector<TermFieldMatchData>::iterator
- tfit = tfmdv.begin(), tfite = tfmdv.end();
- tfit != tfite; ++tfit) {
- tfmda.add(&*tfit);
+ for (auto& tfmd : tfmdv) {
+ tfmda.add(&tfmd);
}
- std::unique_ptr<SearchIterator> sb(handle->second.createIterator(
- handle->first, tfmda));
+ auto sb = handle->second.createIterator(handle->first, tfmda);
sb->initFullRange();
uint32_t docId = 0;
bool first = true;
diff --git a/searchlib/src/tests/query/streaming_query_large_test.cpp b/searchlib/src/tests/query/streaming_query_large_test.cpp
index b39dad43a7b..13af3774e7d 100644
--- a/searchlib/src/tests/query/streaming_query_large_test.cpp
+++ b/searchlib/src/tests/query/streaming_query_large_test.cpp
@@ -29,15 +29,11 @@ namespace {
// a stack overflow if the stack usage increases.
TEST("testveryLongQueryResultingInBug6850778") {
uint32_t NUMITEMS=20000;
-#ifdef VESPA_USE_ADDRESS_SANITIZER
- setMaxStackSize(12_Mi);
-#else
-#ifdef VESPA_USE_THREAD_SANITIZER
+#if defined(VESPA_USE_THREAD_SANITIZER) || defined(VESPA_USE_ADDRESS_SANITIZER)
NUMITEMS = 10000;
#else
setMaxStackSize(4_Mi);
#endif
-#endif
QueryBuilder<SimpleQueryNodeTypes> builder;
for (uint32_t i=0; i <= NUMITEMS; i++) {
builder.addAnd(2);
diff --git a/searchlib/src/vespa/searchcommon/common/schema.cpp b/searchlib/src/vespa/searchcommon/common/schema.cpp
index 1f2f924a4cd..7a3e15dbd6d 100644
--- a/searchlib/src/vespa/searchcommon/common/schema.cpp
+++ b/searchlib/src/vespa/searchcommon/common/schema.cpp
@@ -59,7 +59,7 @@ template <typename T>
uint32_t
getFieldId(vespalib::stringref name, const T &map)
{
- typename T::const_iterator it = map.find(name);
+ auto it = map.find(name);
return (it != map.end()) ? it->second : Schema::UNKNOWN_FIELD_ID;
}
@@ -433,13 +433,12 @@ struct IntersectHelper {
void intersect(const std::vector<T> &set1, const std::vector<T> &set2,
const Map &set2_map,
std::vector<T> &intersection, Map &intersection_map) {
- for (typename std::vector<T>::const_iterator
- it = set1.begin(); it != set1.end(); ++it) {
- typename Map::const_iterator it2 = set2_map.find(it->getName());
+ for (const auto& elem : set1) {
+ auto it2 = set2_map.find(elem.getName());
if (it2 != set2_map.end()) {
- if (is_matching(*it, set2[it2->second])) {
- intersection_map[it->getName()] = intersection.size();
- intersection.push_back(*it);
+ if (is_matching(elem, set2[it2->second])) {
+ intersection_map[elem.getName()] = intersection.size();
+ intersection.push_back(elem);
}
}
}
diff --git a/searchlib/src/vespa/searchlib/aggregation/group.cpp b/searchlib/src/vespa/searchlib/aggregation/group.cpp
index 5a16756f0d7..60afcc96ef5 100644
--- a/searchlib/src/vespa/searchlib/aggregation/group.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/group.cpp
@@ -98,7 +98,7 @@ Group::Value::groupSingle(const ResultNode & selectResult, HitRank rank, const G
}
GroupHash & childMap = *_childInfo._childMap;
Group * group(nullptr);
- GroupHash::iterator found = childMap.find(selectResult);
+ auto found = childMap.find(selectResult);
if (found == childMap.end()) { // group not present in child map
if (level.allowMoreGroups(childMap.size())) {
group = new Group(level.getGroupPrototype());
diff --git a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
index e9df10d3a61..96cfb29a693 100644
--- a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
@@ -309,7 +309,7 @@ bool
Grouping::needResort() const
{
bool resort(_root.needResort());
- for (GroupingLevelList::const_iterator it(_levels.begin()), mt(_levels.end()); !resort && (it != mt); ++it) {
+ for (auto it(_levels.begin()), mt(_levels.end()); !resort && (it != mt); ++it) {
resort = it->needResort();
}
return (resort && getTopN() <= 0);
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
index c467590fe69..2fac2350735 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
@@ -99,12 +99,12 @@ public:
}
}
void and_hits_into(BitVector & result,uint32_t begin_id) override {
- typename Map::iterator end = _map.end();
+ auto end = _map.end();
result.foreach_truebit([&, end](uint32_t key) { if ( _map.find(_attr.getToken(key)) == end) { result.clearBit(key); }}, begin_id);
}
void doSeek(uint32_t docId) override {
- typename Map::const_iterator pos = _map.find(_attr.getToken(docId));
+ auto pos = _map.find(_attr.getToken(docId));
if (pos != _map.end()) {
_weight = pos->second;
setDocId(docId);
diff --git a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
index 97a7dc8bcb1..443fc8369d3 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributecontext.cpp
@@ -12,7 +12,7 @@ namespace search {
const IAttributeVector *
AttributeContext::getAttribute(AttributeMap & map, const string & name, bool stableEnum) const
{
- AttributeMap::const_iterator itr = map.find(name);
+ auto itr = map.find(name);
if (itr != map.end()) {
if (itr->second) {
return itr->second->attribute();
diff --git a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
index 8c1b453c354..c85d77ff70a 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributemanager.cpp
@@ -33,7 +33,7 @@ waitBaseDir(const string &baseDir)
std::unique_lock<std::mutex> guard(baseDirLock);
bool waited = false;
- BaseDirSet::iterator it = baseDirSet.find(baseDir);
+ auto it = baseDirSet.find(baseDir);
while (it != baseDirSet.end()) {
if (!waited) {
waited = true;
@@ -57,7 +57,7 @@ dropBaseDir(const string &baseDir)
return;
std::lock_guard<std::mutex> guard(baseDirLock);
- BaseDirSet::iterator it = baseDirSet.find(baseDir);
+ auto it = baseDirSet.find(baseDir);
if (it == baseDirSet.end()) {
LOG(error, "AttributeManager: Cannot drop basedir %s, already dropped", baseDir.c_str());
} else {
@@ -114,8 +114,8 @@ AttributeManager::~AttributeManager()
uint64_t AttributeManager::getMemoryFootprint() const
{
uint64_t sum(0);
- for(AttributeMap::const_iterator it(_attributes.begin()), mt(_attributes.end()); it != mt; it++) {
- sum += it->second->getStatus().getAllocated();
+ for (const auto& elem : _attributes) {
+ sum += elem.second->getStatus().getAllocated();
}
return sum;
@@ -125,7 +125,7 @@ const AttributeManager::VectorHolder *
AttributeManager::findAndLoadAttribute(const string & name) const
{
const VectorHolder * loadedVector(nullptr);
- AttributeMap::const_iterator found = _attributes.find(name);
+ auto found = _attributes.find(name);
if (found != _attributes.end()) {
AttributeVector & vec = *found->second;
if ( ! vec.isLoaded() ) {
@@ -173,7 +173,7 @@ bool
AttributeManager::add(const AttributeManager::VectorHolder & vector)
{
bool retval(true);
- AttributeMap::iterator found = _attributes.find(vector->getName());
+ auto found = _attributes.find(vector->getName());
if (found == _attributes.end()) {
vector->setInterlock(_interlock);
_attributes[vector->getName()] = vector;
@@ -186,8 +186,8 @@ void
AttributeManager::getAttributeList(AttributeList & list) const
{
list.reserve(_attributes.size());
- for(AttributeMap::const_iterator it(_attributes.begin()), mt(_attributes.end()); it != mt; it++) {
- list.push_back(AttributeGuard(it->second));
+ for (const auto& elem : _attributes) {
+ list.push_back(AttributeGuard(elem.second));
}
}
@@ -224,7 +224,7 @@ AttributeManager::addVector(const string & name, const Config & config)
LOG(error, "Attribute Vector '%s' has type conflict", name.c_str());
}
} else {
- AttributeMap::iterator found = _attributes.find(name);
+ auto found = _attributes.find(name);
if (found != _attributes.end()) {
const VectorHolder & vh(found->second);
if ( vh.get() &&
diff --git a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
index 4aa9ad01f2c..62eaf05a2e3 100644
--- a/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/posting_list_merger.cpp
@@ -64,7 +64,7 @@ PostingListMerger<DataT>::merge(PostingVector &v, PostingVector &temp, const Sta
size_t aEnd = startPos[i * 2 + 1];
size_t bStart = aEnd;
size_t bEnd = startPos[i * 2 + 2];
- typename PostingVector::const_iterator it = v.begin();
+ auto it = v.begin();
std::merge(it + aStart, it + aEnd,
it + bStart, it + bEnd,
temp.begin() + aStart);
diff --git a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
index a49a17470d7..dca79f045a0 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingchange.cpp
@@ -25,15 +25,14 @@ struct CompareValue {
void
removeDupAdditions(PostingChange<AttributePosting>::A &additions)
{
- using Iterator = PostingChange<AttributePosting>::A::iterator;
if (additions.empty())
return;
if (additions.size() == 1)
return;
std::sort(additions.begin(), additions.end());
- Iterator i = additions.begin();
- Iterator ie = additions.end();
- Iterator d = i;
+ auto i = additions.begin();
+ auto ie = additions.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (d->_key == i->_key)
break;
@@ -53,15 +52,14 @@ removeDupAdditions(PostingChange<AttributePosting>::A &additions)
void
removeDupAdditions(PostingChange<AttributeWeightPosting>::A &additions)
{
- using Iterator = PostingChange<AttributeWeightPosting>::A::iterator;
if (additions.empty())
return;
if (additions.size() == 1u)
return;
std::sort(additions.begin(), additions.end());
- Iterator i = additions.begin();
- Iterator ie = additions.end();
- Iterator d = i;
+ auto i = additions.begin();
+ auto ie = additions.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (d->_key == i->_key)
break;
@@ -85,15 +83,14 @@ removeDupAdditions(PostingChange<AttributeWeightPosting>::A &additions)
void
removeDupRemovals(std::vector<uint32_t> &removals)
{
- using Iterator = std::vector<uint32_t>::iterator;
if (removals.empty())
return;
if (removals.size() == 1u)
return;
std::sort(removals.begin(), removals.end());
- Iterator i = removals.begin();
- Iterator ie = removals.end();
- Iterator d = i;
+ auto i = removals.begin();
+ auto ie = removals.end();
+ auto d = i;
for (++i; i != ie; ++i, ++d) {
if (*d == *i)
break;
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
index 835aaadc559..444c935b6f8 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
@@ -1184,8 +1184,7 @@ lookupOverflow(uint64_t wordNum) const
assert(!_overflows.empty());
- OverflowVector::const_iterator lb =
- std::lower_bound(_overflows.begin(), _overflows.end(), wordNum);
+ auto lb = std::lower_bound(_overflows.begin(), _overflows.end(), wordNum);
assert(lb != _overflows.end());
assert(lb->_wordNum == wordNum);
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index ea7fd5ee76c..022bc789e38 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -32,9 +32,9 @@ BitVectorCache::computeCountVector(KeySet & keys, CountVector & v) const
{
std::shared_lock guard(_mutex);
keySets.resize(_chunks.size());
- Key2Index::const_iterator end(_keys.end());
+ auto end = _keys.end();
for (Key k : keys) {
- Key2Index::const_iterator found = _keys.find(k);
+ auto found = _keys.find(k);
if (found != end) {
const KeyMeta & m = found->second;
keySets[m.chunkId()].insert(m.chunkIndex());
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp b/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
index f6dd5a318ae..efdbedd0941 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectordictionary.cpp
@@ -86,8 +86,7 @@ BitVectorDictionary::lookup(uint64_t wordNum)
{
WordSingleKey key;
key._wordNum = wordNum;
- std::vector<WordSingleKey>::const_iterator itr =
- std::lower_bound(_entries.begin(), _entries.end(), key);
+ auto itr = std::lower_bound(_entries.begin(), _entries.end(), key);
if (itr == _entries.end() || key < *itr) {
return BitVector::UP();
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
index 90bcaabc7a5..b1757c0e831 100644
--- a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp
@@ -342,7 +342,7 @@ public:
}
const DiskIndex::LookupResult &
lookup(const vespalib::string & word, uint32_t fieldId) {
- Cache::const_iterator it = _cache.find(word);
+ auto it = _cache.find(word);
if (it == _cache.end()) {
_cache[word] = _diskIndex.lookup(_fieldIds, word);
it = _cache.find(word);
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
index d01030ee975..20a5a76905f 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
@@ -317,8 +317,6 @@ FieldReaderStripInfo::scan_element_lengths(uint32_t scan_chunk)
void
FieldReaderStripInfo::read()
{
- using Element = search::index::WordDocElementFeatures;
-
for (;;) {
FieldReader::read();
DocIdAndFeatures &features = _docIdAndFeatures;
@@ -328,8 +326,7 @@ FieldReaderStripInfo::read()
assert(!features.has_raw_data());
uint32_t numElements = features.elements().size();
assert(numElements > 0);
- std::vector<Element>::iterator element =
- features.elements().begin();
+ auto element = features.elements().begin();
if (_hasElements) {
if (!_hasElementWeights) {
for (uint32_t elementDone = 0; elementDone < numElements; ++elementDone, ++element) {
diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
index 3a1b7928c93..07d31e16f66 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp
@@ -179,19 +179,16 @@ Zc4PostingWriter<bigEndian>::flush_word_no_skip()
const uint64_t *features = _featureWriteContext.getComprBuf();
uint64_t featureOffset = 0;
- std::vector<DocIdAndFeatureSize>::const_iterator dit = _docIds.begin();
- std::vector<DocIdAndFeatureSize>::const_iterator dite = _docIds.end();
-
- for (; dit != dite; ++dit) {
- uint32_t docId = dit->_doc_id;
- uint32_t featureSize = dit->_features_size;
+ for (const auto& elem : _docIds) {
+ uint32_t docId = elem._doc_id;
+ uint32_t featureSize = elem._features_size;
e.encodeExpGolomb(docId - baseDocId, docIdK);
baseDocId = docId + 1;
if (_encode_interleaved_features) {
- assert(dit->_field_length > 0);
- e.encodeExpGolomb(dit->_field_length - 1, K_VALUE_ZCPOSTING_FIELD_LENGTH);
- assert(dit->_num_occs > 0);
- e.encodeExpGolomb(dit->_num_occs - 1, K_VALUE_ZCPOSTING_NUM_OCCS);
+ assert(elem._field_length > 0);
+ e.encodeExpGolomb(elem._field_length - 1, K_VALUE_ZCPOSTING_FIELD_LENGTH);
+ assert(elem._num_occs > 0);
+ e.encodeExpGolomb(elem._num_occs - 1, K_VALUE_ZCPOSTING_NUM_OCCS);
}
if (featureSize != 0) {
e.writeBits(features + (featureOffset >> 6),
diff --git a/searchlib/src/vespa/searchlib/docstore/chunk.cpp b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
index 35166cf8d78..60255af3521 100644
--- a/searchlib/src/vespa/searchlib/docstore/chunk.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/chunk.cpp
@@ -102,17 +102,17 @@ vespalib::ConstBufferRef
Chunk::getLid(uint32_t lid) const
{
vespalib::ConstBufferRef buf;
- for (LidList::const_iterator it(_lids.begin()), mt(_lids.end()); it != mt; it++) {
- if (it->getLid() == lid) {
+ for (const auto& elem : _lids) {
+ if (elem.getLid() == lid) {
#if 1
uint32_t bLid(0), bLen(0);
- vespalib::nbostream is(getData().data() + it->getOffset(), it->size());
+ vespalib::nbostream is(getData().data() + elem.getOffset(), elem.size());
is >> bLid >> bLen;
assert(bLid == lid);
- assert(bLen == it->netSize());
- assert((bLen + 2*sizeof(uint32_t)) == it->size());
+ assert(bLen == elem.netSize());
+ assert((bLen + 2*sizeof(uint32_t)) == elem.size());
#endif
- buf = vespalib::ConstBufferRef(getData().data() + it->getNetOffset(), it->netSize());
+ buf = vespalib::ConstBufferRef(getData().data() + elem.getNetOffset(), elem.netSize());
}
}
return buf;
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index bde7492f485..7f63cb4c2d4 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -777,8 +777,7 @@ LogDataStore::preload()
if (!partList.empty()) {
verifyModificationTime(partList);
partList = scanDir(getBaseDir(), ".idx");
- using It = NameIdSet::const_iterator;
- for (It it(partList.begin()), mt(--partList.end()); it != mt; it++) {
+ for (auto it(partList.begin()), mt(--partList.end()); it != mt; it++) {
_fileChunks.push_back(createReadOnlyFile(FileId(_fileChunks.size()), *it));
}
_fileChunks.push_back(isReadOnly()
@@ -824,7 +823,7 @@ LogDataStore::NameIdSet
LogDataStore::findIncompleteCompactedFiles(const NameIdSet & partList) {
NameIdSet incomplete;
if ( !partList.empty()) {
- NameIdSet::const_iterator it = partList.begin();
+ auto it = partList.begin();
for (FileChunk::NameId prev = *it++; it != partList.end(); it++) {
if (prev.next() == *it) {
if (!incomplete.empty() && (*incomplete.rbegin() == prev)) {
@@ -869,15 +868,13 @@ LogDataStore::eraseIncompleteCompactedFiles(NameIdSet partList)
void
LogDataStore::eraseDanglingDatFiles(const NameIdSet &partList, const NameIdSet &datPartList)
{
- using IT = NameIdSet::const_iterator;
-
- IT iib(partList.begin());
- IT ii(iib);
- IT iie(partList.end());
- IT dib(datPartList.begin());
- IT di(dib);
- IT die(datPartList.end());
- IT dirb(die);
+ auto iib = partList.begin();
+ auto ii = iib;
+ auto iie = partList.end();
+ auto dib = datPartList.begin();
+ auto di = dib;
+ auto die = datPartList.end();
+ auto dirb = die;
NameId endMarker(NameId::last());
if (dirb != dib) {
diff --git a/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp b/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
index 5989f952f17..48c95ba92b9 100644
--- a/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
+++ b/searchlib/src/vespa/searchlib/engine/propertiesmap.cpp
@@ -26,7 +26,7 @@ PropertiesMap::lookupCreate(vespalib::stringref name)
const fef::Properties &
PropertiesMap::lookup(vespalib::stringref name) const
{
- PropsMap::const_iterator pos = _propertiesMap.find(name);
+ auto pos = _propertiesMap.find(name);
if (pos == _propertiesMap.end()) {
return _emptyProperties;
}
diff --git a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
index 16f5ee04be4..254d9d030af 100644
--- a/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
+++ b/searchlib/src/vespa/searchlib/expression/documentfieldnode.cpp
@@ -115,7 +115,7 @@ DocumentFieldNode::onPrepare(bool preserveAccurateTypes)
if ( !_fieldPath.empty() ) {
bool nestedMultiValue(false);
- for(document::FieldPath::const_iterator it(_fieldPath.begin()), mt(_fieldPath.end()); !nestedMultiValue && (it != mt); it++) {
+ for (auto it(_fieldPath.begin()), mt(_fieldPath.end()); !nestedMultiValue && (it != mt); it++) {
const FieldPathEntry & fpe = **it;
if (fpe.getType() == document::FieldPathEntry::STRUCT_FIELD) {
const FieldValue & fv = fpe.getFieldValueToSet();
diff --git a/searchlib/src/vespa/searchlib/expression/resultvector.h b/searchlib/src/vespa/searchlib/expression/resultvector.h
index 22fac0b214b..0c71f2f79e6 100644
--- a/searchlib/src/vespa/searchlib/expression/resultvector.h
+++ b/searchlib/src/vespa/searchlib/expression/resultvector.h
@@ -174,8 +174,8 @@ size_t
ResultNodeVectorT<B, C, G>::hash() const
{
size_t h(0);
- for(typename Vector::const_iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
- h ^= it->hash();
+ for(const auto& elem : _result) {
+ h ^= elem.hash();
}
return h;
}
@@ -184,8 +184,8 @@ template <typename B, typename C, typename G>
void
ResultNodeVectorT<B, C, G>::negate()
{
- for(typename Vector::iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
- it->negate();
+ for (auto& elem : _result) {
+ elem.negate();
}
}
@@ -194,7 +194,7 @@ const ResultNode *
ResultNodeVectorT<B, C, G>::find(const ResultNode & key) const
{
G getter;
- typename Vector::const_iterator found = std::lower_bound(_result.begin(), _result.end(), getter(key), typename C::less() );
+ auto found = std::lower_bound(_result.begin(), _result.end(), getter(key), typename C::less() );
if (found != _result.end()) {
typename C::equal equal;
return equal(*found, getter(key)) ? &(*found) : nullptr;
diff --git a/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp b/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
index 55daf6ed5ff..91aca0a19fe 100644
--- a/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/fieldlengthfeature.cpp
@@ -33,11 +33,8 @@ FieldLengthExecutor::execute(uint32_t docId)
{
uint32_t val = 0;
bool validVal = false;
- for (std::vector<TermFieldHandle>::const_iterator
- hi = _fieldHandles.begin(), hie = _fieldHandles.end();
- hi != hie; ++hi)
- {
- const TermFieldMatchData &tfmd = *_md->resolveTermField(*hi);
+ for (auto handle : _fieldHandles) {
+ const TermFieldMatchData &tfmd = *_md->resolveTermField(handle);
if (tfmd.getDocId() == docId) {
FieldPositionsIterator it = tfmd.getIterator();
if (it.valid()) {
diff --git a/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp b/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
index 166280b289d..bd5d5ca952b 100644
--- a/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/flow_completeness_feature.cpp
@@ -103,7 +103,7 @@ struct State {
Path nextP = firstP;
uint32_t pos = edges[j];
nextP.path.push_back(pos);
- TermIdxMap::const_iterator it = matchedTermForPos.find(pos);
+ auto it = matchedTermForPos.find(pos);
if (it == matchedTermForPos.end()) {
return nextP;
} else {
@@ -158,7 +158,7 @@ struct State {
uint32_t pos = positionsForTerm[tix][0];
assert(pos < posLimit);
- TermIdxMap::const_iterator it = matchedTermForPos.find(pos);
+ auto it = matchedTermForPos.find(pos);
if (it == matchedTermForPos.end()) {
++found;
matchedTermForPos[pos] = tix;
diff --git a/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp b/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
index 5caa2bd577e..d2948ad3185 100644
--- a/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/querycompletenessfeature.cpp
@@ -38,10 +38,8 @@ void
QueryCompletenessExecutor::execute(uint32_t docId)
{
uint32_t hit = 0, miss = 0;
- for (std::vector<search::fef::TermFieldHandle>::iterator it = _fieldHandles.begin();
- it != _fieldHandles.end(); ++it)
- {
- const fef::TermFieldMatchData &tfmd = *_md->resolveTermField(*it);
+ for (const auto& handle : _fieldHandles) {
+ const fef::TermFieldMatchData &tfmd = *_md->resolveTermField(handle);
if (tfmd.getDocId() == docId) {
search::fef::FieldPositionsIterator field = tfmd.getIterator();
while (field.valid() && field.getPosition() < _config.fieldBegin) {
diff --git a/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp b/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
index af7c83cdc9a..dcb1227d6b5 100644
--- a/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
+++ b/searchlib/src/vespa/searchlib/fef/blueprintfactory.cpp
@@ -35,7 +35,7 @@ BlueprintFactory::visitDumpFeatures(const IIndexEnvironment &indexEnv,
Blueprint::SP
BlueprintFactory::createBlueprint(const vespalib::string &name) const
{
- BlueprintMap::const_iterator itr = _blueprintMap.find(name);
+ auto itr = _blueprintMap.find(name);
if (itr == _blueprintMap.end()) {
return {};
}
diff --git a/searchlib/src/vespa/searchlib/fef/objectstore.cpp b/searchlib/src/vespa/searchlib/fef/objectstore.cpp
index 4cf185ad55e..c7ef7aa0316 100644
--- a/searchlib/src/vespa/searchlib/fef/objectstore.cpp
+++ b/searchlib/src/vespa/searchlib/fef/objectstore.cpp
@@ -20,7 +20,7 @@ ObjectStore::~ObjectStore()
void
ObjectStore::add(const vespalib::string & key, Anything::UP value)
{
- ObjectMap::iterator found = _objectMap.find(key);
+ auto found = _objectMap.find(key);
if (found != _objectMap.end()) {
delete found->second;
found->second = NULL;
@@ -31,7 +31,7 @@ ObjectStore::add(const vespalib::string & key, Anything::UP value)
const Anything *
ObjectStore::get(const vespalib::string & key) const
{
- ObjectMap::const_iterator found = _objectMap.find(key);
+ auto found = _objectMap.find(key);
return (found != _objectMap.end()) ? found->second : NULL;
}
diff --git a/searchlib/src/vespa/searchlib/fef/properties.cpp b/searchlib/src/vespa/searchlib/fef/properties.cpp
index 70cfe802ad2..2cc4e50b593 100644
--- a/searchlib/src/vespa/searchlib/fef/properties.cpp
+++ b/searchlib/src/vespa/searchlib/fef/properties.cpp
@@ -62,7 +62,7 @@ uint32_t
Properties::count(vespalib::stringref key) const noexcept
{
if (!key.empty()) {
- Map::const_iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node != _data.end()) {
return node->second.size();
}
@@ -74,7 +74,7 @@ Properties &
Properties::remove(vespalib::stringref key)
{
if (!key.empty()) {
- Map::iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node != _data.end()) {
_numValues -= node->second.size();
_data.erase(node);
@@ -86,15 +86,13 @@ Properties::remove(vespalib::stringref key)
Properties &
Properties::import(const Properties &src)
{
- Map::const_iterator itr = src._data.begin();
- Map::const_iterator end = src._data.end();
- for (; itr != end; ++itr) {
- Map::insert_result res = _data.insert(Map::value_type(itr->first, itr->second));
+ for (const auto& elem : src._data) {
+ Map::insert_result res = _data.insert(Map::value_type(elem.first, elem.second));
if ( ! res.second) {
_numValues -= res.first->second.size();
- res.first->second = itr->second;
+ res.first->second = elem.second;
}
- _numValues += itr->second.size();
+ _numValues += elem.second.size();
}
return *this;
}
@@ -124,16 +122,12 @@ uint32_t
Properties::hashCode() const noexcept
{
uint32_t hash = numKeys() + numValues();
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- const Key &key = itr->first;
- const Value &value = itr->second;
- Value::const_iterator v_itr = value.begin();
- Value::const_iterator v_end = value.end();
+ for (const auto& elem : _data) {
+ const Key &key = elem.first;
+ const Value &value = elem.second;
hash += rawHash(key.data(), key.size());
- for (; v_itr != v_end; ++v_itr) {
- hash += rawHash(v_itr->data(), v_itr->size());
+ for (const auto& velem : value) {
+ hash += rawHash(velem.data(), velem.size());
}
}
return hash;
@@ -142,10 +136,8 @@ Properties::hashCode() const noexcept
void
Properties::visitProperties(IPropertiesVisitor &visitor) const
{
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- visitor.visitProperty(itr->first, Property(itr->second));
+ for (const auto& elem : _data) {
+ visitor.visitProperty(elem.first, Property(elem.second));
}
}
@@ -155,15 +147,13 @@ Properties::visitNamespace(vespalib::stringref ns,
{
vespalib::string tmp;
vespalib::string prefix = ns + ".";
- Map::const_iterator itr = _data.begin();
- Map::const_iterator end = _data.end();
- for (; itr != end; ++itr) {
- if ((itr->first.find(prefix) == 0) &&
- (itr->first.size() > prefix.size()))
+ for (const auto& elem : _data) {
+ if ((elem.first.find(prefix) == 0) &&
+ (elem.first.size() > prefix.size()))
{
- tmp = vespalib::stringref(itr->first.data() + prefix.size(),
- itr->first.size() - prefix.size());
- visitor.visitProperty(tmp, Property(itr->second));
+ tmp = vespalib::stringref(elem.first.data() + prefix.size(),
+ elem.first.size() - prefix.size());
+ visitor.visitProperty(tmp, Property(elem.second));
}
}
}
@@ -174,7 +164,7 @@ Properties::lookup(vespalib::stringref key) const noexcept
if (key.empty()) {
return Property();
}
- Map::const_iterator node = _data.find(key);
+ auto node = _data.find(key);
if (node == _data.end()) {
return Property();
}
diff --git a/searchlib/src/vespa/searchlib/fef/tablemanager.cpp b/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
index 6169e712c37..59bc0b5f600 100644
--- a/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
+++ b/searchlib/src/vespa/searchlib/fef/tablemanager.cpp
@@ -12,7 +12,7 @@ const Table *
TableManager::getTable(const vespalib::string & name) const
{
std::lock_guard guard(_lock);
- TableCache::const_iterator itr = _cache.find(name);
+ auto itr = _cache.find(name);
if (itr != _cache.end()) {
return itr->second.get();
}
diff --git a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
index 4709c17408a..beebc8b78a0 100644
--- a/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/matchdatabuilder.cpp
@@ -114,15 +114,11 @@ bool
MatchDataBuilder::apply(uint32_t docId)
{
// For each term, do
- for (TermMap::const_iterator term_iter = _match.begin();
- term_iter != _match.end(); ++term_iter)
- {
- uint32_t termId = term_iter->first;
-
- for (FieldPositions::const_iterator field_iter = term_iter->second.begin();
- field_iter != term_iter->second.end(); ++field_iter)
- {
- uint32_t fieldId = field_iter->first;
+ for (const auto& term_elem : _match) {
+ uint32_t termId = term_elem.first;
+
+ for (const auto& field_elem : term_elem.second) {
+ uint32_t fieldId = field_elem.first;
TermFieldMatchData *match = getTermFieldMatchData(termId, fieldId);
// Make sure there is a corresponding term field match data object.
@@ -134,7 +130,7 @@ MatchDataBuilder::apply(uint32_t docId)
// find field data
MyField field;
- IndexData::const_iterator idxItr = _index.find(fieldId);
+ auto idxItr = _index.find(fieldId);
if (idxItr != _index.end()) {
field = idxItr->second;
}
@@ -144,11 +140,8 @@ MatchDataBuilder::apply(uint32_t docId)
vespalib::string name = info != nullptr ? info->name() : vespalib::make_string("%d", fieldId).c_str();
// For each occurence of that term, in that field, do
- for (Positions::const_iterator occ_iter = field_iter->second.begin();
- occ_iter != field_iter->second.end(); occ_iter++)
- {
+ for (const auto& occ : field_elem.second) {
// Append a term match position to the term match data.
- Position occ = *occ_iter;
match->appendPosition(TermFieldMatchDataPosition(
occ.eid,
occ.pos,
diff --git a/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp b/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
index 4de4c56e3ac..3ac7b857173 100644
--- a/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
+++ b/searchlib/src/vespa/searchlib/fef/test/rankresult.cpp
@@ -25,7 +25,7 @@ RankResult::addScore(const vespalib::string & featureName, feature_t score)
feature_t
RankResult::getScore(const vespalib::string & featureName) const
{
- RankScores::const_iterator itr = _rankScores.find(featureName);
+ auto itr = _rankScores.find(featureName);
if (itr != _rankScores.end()) {
return itr->second;
}
@@ -43,19 +43,18 @@ RankResult::includes(const RankResult & rhs) const
{
double epsilon = std::max(_epsilon, rhs._epsilon);
- RankScores::const_iterator findItr;
- for (RankScores::const_iterator itr = rhs._rankScores.begin(); itr != rhs._rankScores.end(); ++itr) {
- findItr = _rankScores.find(itr->first);
+ for (const auto& score : rhs._rankScores) {
+ auto findItr = _rankScores.find(score.first);
if (findItr == _rankScores.end()) {
- LOG(info, "Did not find expected feature '%s' in this rank result", itr->first.c_str());
+ LOG(info, "Did not find expected feature '%s' in this rank result", score.first.c_str());
return false;
}
- if (itr->second < findItr->second - epsilon ||
- itr->second > findItr->second + epsilon ||
- (std::isnan(findItr->second) && !std::isnan(itr->second)))
+ if (score.second < findItr->second - epsilon ||
+ score.second > findItr->second + epsilon ||
+ (std::isnan(findItr->second) && !std::isnan(score.second)))
{
- LOG(info, "Feature '%s' did not have expected score.", itr->first.c_str());
- LOG(info, "Expected: %f ~ %f", itr->second, epsilon);
+ LOG(info, "Feature '%s' did not have expected score.", score.first.c_str());
+ LOG(info, "Expected: %f ~ %f", score.second, epsilon);
LOG(info, "Actual : %f", findItr->second);
return false;
}
@@ -73,8 +72,8 @@ RankResult::clear()
std::vector<vespalib::string> &
RankResult::getKeys(std::vector<vespalib::string> &ret)
{
- for (RankScores::const_iterator it = _rankScores.begin(); it != _rankScores.end(); ++it) {
- ret.push_back(it->first);
+ for (const auto& score : _rankScores) {
+ ret.push_back(score.first);
}
return ret;
}
@@ -99,8 +98,8 @@ RankResult::getEpsilon() const {
std::ostream & operator<<(std::ostream & os, const RankResult & rhs) {
os << "[";
- for (RankResult::RankScores::const_iterator itr = rhs._rankScores.begin(); itr != rhs._rankScores.end(); ++itr) {
- os << "['" << itr->first << "' = " << itr->second << "]";
+ for (const auto& score : rhs._rankScores) {
+ os << "['" << score.first << "' = " << score.second << "]";
}
return os << "]";
}
diff --git a/searchlib/src/vespa/searchlib/grouping/collect.h b/searchlib/src/vespa/searchlib/grouping/collect.h
index 34906e90324..198daed2e18 100644
--- a/searchlib/src/vespa/searchlib/grouping/collect.h
+++ b/searchlib/src/vespa/searchlib/grouping/collect.h
@@ -23,7 +23,7 @@ protected:
int diff(0);
size_t aOff(getAggrBase(a));
size_t bOff(getAggrBase(b));
- for(std::vector<SortInfo>::const_iterator it(_sortInfo.begin()), mt(_sortInfo.end()); (diff == 0) && (it != mt); it++) {
+ for (auto it(_sortInfo.begin()), mt(_sortInfo.end()); (diff == 0) && (it != mt); it++) {
diff = _aggregator[it->getIndex()].cmp(&_aggrBacking[aOff], &_aggrBacking[bOff]) * it->getSign();
}
return diff;
diff --git a/searchlib/src/vespa/searchlib/grouping/groupengine.cpp b/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
index 853548b47f7..5039082434b 100644
--- a/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
+++ b/searchlib/src/vespa/searchlib/grouping/groupengine.cpp
@@ -50,7 +50,7 @@ GroupRef GroupEngine::group(Children & children, uint32_t docId, double rank)
throw std::runtime_error("Does not know how to handle failed select statements");
}
const ResultNode &selectResult = *selector.getResult();
- Children::iterator found = children.find(selectResult);
+ auto found = children.find(selectResult);
GroupRef gr;
if (found == children.end()) {
if (_request->allowMoreGroups(children.size())) {
@@ -158,8 +158,8 @@ Group::UP GroupEngine::getGroup(GroupRef ref) const
std::vector<GroupRef> v(ch.size());
{
size_t i(0);
- for (Children::const_iterator it(ch.begin()), mt(ch.end()); it != mt; it++) {
- v[i++] = *it;
+ for (const auto& elem : ch) {
+ v[i++] = elem;
}
}
uint64_t maxN(_nextEngine->_request->getPrecision());
diff --git a/searchlib/src/vespa/searchlib/index/postinglistparams.cpp b/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
index 6275399c498..27f2d60d420 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
+++ b/searchlib/src/vespa/searchlib/index/postinglistparams.cpp
@@ -14,9 +14,7 @@ namespace search::index {
bool
PostingListParams::isSet(const vespalib::string &key) const
{
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
return true;
}
@@ -33,9 +31,7 @@ PostingListParams::setStr(const vespalib::string &key,
const vespalib::string &
PostingListParams::getStr(const vespalib::string &key) const
{
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
return it->second;
}
@@ -81,9 +77,7 @@ void
PostingListParams::get(const vespalib::string &key, TYPE &val) const
{
std::istringstream is;
- Map::const_iterator it;
-
- it = _map.find(key);
+ auto it = _map.find(key);
if (it != _map.end()) {
is.str(it->second);
is >> val;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
index 25aff06b5ef..206b92c85d0 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.cpp
@@ -138,12 +138,12 @@ FieldInverter::processAnnotations(const StringFieldValue &value)
}
}
std::sort(_terms.begin(), _terms.end());
- SpanTermVector::const_iterator it = _terms.begin();
- SpanTermVector::const_iterator ite = _terms.end();
+ auto it = _terms.begin();
+ auto ite = _terms.end();
uint32_t wordRef;
bool mustStep = false;
for (; it != ite; ) {
- SpanTermVector::const_iterator it_begin = it;
+ auto it_begin = it;
for (; it != ite && it->first == it_begin->first; ++it) {
if (it->second) { // it->second is a const FieldValue *.
wordRef = saveWord(*it->second);
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp b/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
index 5297646d7f8..1ac715ca92d 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearsearch.cpp
@@ -21,10 +21,8 @@ void setup_fields(uint32_t window, std::vector<T> &matchers, const TermFieldMatc
for (size_t i = 0; i < in.size(); ++i) {
fields.insert(in[i]->getFieldId());
}
- std::set<uint32_t>::const_iterator pos = fields.begin();
- std::set<uint32_t>::const_iterator end = fields.end();
- for (; pos != end; ++pos) {
- matchers.push_back(T(window, *pos, in));
+ for (const auto& elem : fields) {
+ matchers.push_back(T(window, elem, in));
}
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
index 6bba9d96d02..9521bed7827 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeegcompr64filterocc.cpp
@@ -155,12 +155,8 @@ setupT(const FakeWord &fw)
uint64_t lastL4SkipL3SkipPos = 0;
unsigned int l4SkipCnt = 0;
-
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
if (d != de) {
// Prefix support needs counts embedded in posting list
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
index 5bd1d2044fe..a412a779006 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakefilterocc.cpp
@@ -21,27 +21,18 @@ FakeFilterOcc::FakeFilterOcc(const FakeWord &fw)
{
std::vector<uint32_t> fake;
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
-
- while (d != de) {
- fake.push_back(d->_docId);
- ++d;
+ for (const auto& elem : fw._postings) {
+ fake.push_back(elem._docId);
}
std::swap(_uncompressed, fake);
_docIdLimit = fw._docIdLimit;
_hitDocs = fw._postings.size();
}
-
FakeFilterOcc::~FakeFilterOcc()
{
}
-
void
FakeFilterOcc::forceLink()
{
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
index ec476c4f6cf..99d0fb3b3f1 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp
@@ -228,10 +228,9 @@ FakeMemTreeOccMgr::remove(uint32_t wordIdx, uint32_t docId)
void
FakeMemTreeOccMgr::sortUnflushed()
{
- using I = std::vector<PendingOp>::iterator;
uint32_t seq = 0;
- for (I i(_unflushed.begin()), ie(_unflushed.end()); i != ie; ++i) {
- i->setSeq(++seq);
+ for (auto& elem : _unflushed) {
+ elem.setSeq(++seq);
}
std::sort(_unflushed.begin(), _unflushed.end());
}
@@ -241,16 +240,15 @@ void
FakeMemTreeOccMgr::flush()
{
using Aligner = FeatureStore::Aligner;
- using I = std::vector<PendingOp>::iterator;
if (_unflushed.empty())
return;
uint32_t lastWord = std::numeric_limits<uint32_t>::max();
sortUnflushed();
- for (I i(_unflushed.begin()), ie(_unflushed.end()); i != ie; ++i) {
- uint32_t wordIdx = i->getWordIdx();
- uint32_t docId = i->getDocId();
+ for (auto& elem : _unflushed) {
+ uint32_t wordIdx = elem.getWordIdx();
+ uint32_t docId = elem.getDocId();
PostingIdx &pidx(*_postingIdxs[wordIdx].get());
Tree &tree = pidx._tree;
Tree::Iterator &itr = pidx._iterator;
@@ -261,7 +259,7 @@ FakeMemTreeOccMgr::flush()
itr.linearSeek(docId);
}
lastWord = wordIdx;
- if (i->getRemove()) {
+ if (elem.getRemove()) {
if (itr.valid() && itr.getKey() == docId) {
uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData().get_features_relaxed()));
_featureSizes[wordIdx] -= Aligner::align((bits + 7) / 8) * 8;
@@ -269,7 +267,7 @@ FakeMemTreeOccMgr::flush()
}
} else {
if (!itr.valid() || docId < itr.getKey()) {
- tree.insert(itr, docId, PostingListEntryType(i->getFeatureRef(), 0, 1));
+ tree.insert(itr, docId, PostingListEntryType(elem.getFeatureRef(), 0, 1));
}
}
}
@@ -320,13 +318,12 @@ FakeMemTreeOccFactory::~FakeMemTreeOccFactory()
FakePosting::SP
FakeMemTreeOccFactory::make(const FakeWord &fw)
{
- std::map<const FakeWord *, uint32_t>::const_iterator
- i(_mgr._fw2WordIdx.find(&fw));
+ auto itr = _mgr._fw2WordIdx.find(&fw);
- if (i == _mgr._fw2WordIdx.end())
+ if (itr == _mgr._fw2WordIdx.end())
LOG_ABORT("should not be reached");
- uint32_t wordIdx = i->second;
+ uint32_t wordIdx = itr->second;
assert(_mgr._postingIdxs.size() > wordIdx);
@@ -341,8 +338,8 @@ FakeMemTreeOccFactory::setup(const std::vector<const FakeWord *> &fws)
using PostingIdx = FakeMemTreeOccMgr::PostingIdx;
std::vector<FakeWord::RandomizedReader> r;
uint32_t wordIdx = 0;
- std::vector<const FakeWord *>::const_iterator fwi(fws.begin());
- std::vector<const FakeWord *>::const_iterator fwe(fws.end());
+ auto fwi = fws.begin();
+ auto fwe = fws.end();
while (fwi != fwe) {
_mgr._fakeWords.push_back(*fwi);
_mgr._featureSizes.push_back(0);
@@ -355,8 +352,8 @@ FakeMemTreeOccFactory::setup(const std::vector<const FakeWord *> &fws)
}
PostingPriorityQueueMerger<FakeWord::RandomizedReader, FakeWord::RandomizedWriter> heap;
- std::vector<FakeWord::RandomizedReader>::iterator i(r.begin());
- std::vector<FakeWord::RandomizedReader>::iterator ie(r.end());
+ auto i = r.begin();
+ auto ie = r.end();
FlushToken flush_token;
while (i != ie) {
i->read();
@@ -386,8 +383,7 @@ FakeMemTreeOcc2Factory::~FakeMemTreeOcc2Factory() = default;
FakePosting::SP
FakeMemTreeOcc2Factory::make(const FakeWord &fw)
{
- std::map<const FakeWord *, uint32_t>::const_iterator
- i(_mgr._fw2WordIdx.find(&fw));
+ auto i = _mgr._fw2WordIdx.find(&fw);
if (i == _mgr._fw2WordIdx.end())
LOG_ABORT("should not be reached");
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
index f68fb4a9037..4242f71bd60 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
@@ -246,11 +246,11 @@ FakeWord::fakeup(search::BitVector &bitmap,
}
uint32_t field_len = 0;
do {
- DocWordPosFeatureList::iterator ie(wpf.end());
- DocWordPosFeatureList::iterator i(wpf.begin());
+ auto ie = wpf.end();
+ auto i = wpf.begin();
while (i != ie) {
uint32_t lastwordpos = i->_wordPos;
- DocWordPosFeatureList::iterator pi(i);
+ auto pi = i;
++i;
while (i != ie &&
pi->_elementId == i->_elementId) {
@@ -287,11 +287,8 @@ FakeWord::fakeup(search::BitVector &bitmap,
dwf._accPositions = wordPosFeatures.size();
assert(dwf._positions == wpf.size());
postings.push_back(dwf);
- DocWordPosFeatureList::iterator ie(wpf.end());
- DocWordPosFeatureList::iterator i(wpf.begin());
- while (i != ie) {
- wordPosFeatures.push_back(*i);
- ++i;
+ for (const auto& elem : wpf) {
+ wordPosFeatures.push_back(elem);
}
++idx;
if (idx >= docIdLimit)
@@ -318,12 +315,11 @@ FakeWord::fakeupTemps(vespalib::Rand48 &rnd,
void
FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
{
- using DWFL = DocWordFeatureList;
Randomizer randomAdd;
Randomizer randomRem;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
int32_t ref = 0;
while (d != de) {
@@ -338,8 +334,8 @@ FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
++ref;
}
- DWFL::const_iterator ed(_extraPostings.begin());
- DWFL::const_iterator ede(_extraPostings.end());
+ auto ed = _extraPostings.begin();
+ auto ede = _extraPostings.end();
int32_t eref = -1;
uint32_t tref = 0;
@@ -378,9 +374,8 @@ FakeWord::setupRandomizer(vespalib::Rand48 &rnd)
void
FakeWord::addDocIdBias(uint32_t docIdBias)
{
- using DWFL = DocWordFeatureList;
- DWFL::iterator d(_postings.begin());
- DWFL::iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
for (; d != de; ++d) {
d->_docId += docIdBias;
}
@@ -404,14 +399,12 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator,
iterator->initFullRange();
uint32_t docId = 0;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -484,14 +477,12 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator,
iterator->initFullRange();
uint32_t docId = 1;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -556,10 +547,8 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, bool verbose) co
iterator->initFullRange();
uint32_t docId = 1;
- using DWFL = DocWordFeatureList;
-
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -599,14 +588,12 @@ FakeWord::validate(FieldReader &fieldReader,
uint32_t presidue;
bool unpres;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
using TMDPI = TermFieldMatchData::PositionsIterator;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start validate word '%s'\n", _name.c_str());
@@ -633,13 +620,8 @@ FakeWord::validate(FieldReader &fieldReader,
#else
(void) unpres;
- using Elements = WordDocElementFeatures;
- using Positions = WordDocElementWordPosFeatures;
-
- std::vector<Elements>::const_iterator element =
- features.elements().begin();
- std::vector<Positions>::const_iterator position =
- features.word_positions().begin();
+ auto element = features.elements().begin();
+ auto position = features.word_positions().begin();
TermFieldMatchData *tfmd = matchData[0];
assert(tfmd != 0);
@@ -701,12 +683,10 @@ FakeWord::validate(FieldReader &fieldReader,
void
FakeWord::validate(const std::vector<uint32_t> &docIds) const
{
- using DWFL = DocWordFeatureList;
- using DL = std::vector<uint32_t>;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DL::const_iterator di(docIds.begin());
- DL::const_iterator die(docIds.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto di = docIds.begin();
+ auto die = docIds.end();
while (d != de) {
assert(di != die);
@@ -721,9 +701,8 @@ FakeWord::validate(const std::vector<uint32_t> &docIds) const
void
FakeWord::validate(const search::BitVector &bv) const
{
- using DWFL = DocWordFeatureList;
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
uint32_t bitHits = bv.countTrueBits();
assert(bitHits == _postings.size());
(void) bitHits;
@@ -745,13 +724,10 @@ FakeWord::dump(FieldWriter &fieldWriter,
uint32_t residue;
DocIdAndPosOccFeatures features;
- using DWFL = DocWordFeatureList;
- using DWPFL = DocWordPosFeatureList;
-
- DWFL::const_iterator d(_postings.begin());
- DWFL::const_iterator de(_postings.end());
- DWPFL::const_iterator p(_wordPosFeatures.begin());
- DWPFL::const_iterator pe(_wordPosFeatures.end());
+ auto d = _postings.begin();
+ auto de = _postings.end();
+ auto p = _wordPosFeatures.begin();
+ auto pe = _wordPosFeatures.end();
if (verbose)
printf("Start dumping word '%s'\n", _name.c_str());
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
index 3caca05669c..dc6f546fad0 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcbfilterocc.cpp
@@ -64,12 +64,8 @@ FakeZcbFilterOcc::FakeZcbFilterOcc(const FakeWord &fw)
std::vector<uint8_t> bytes;
uint32_t lastDocId = 0u;
-
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
while (d != de) {
if (lastDocId == 0u) {
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
index 24c2f82279e..809746a87e6 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp
@@ -149,14 +149,10 @@ FakeZcFilterOcc::setupT(const FakeWord &fw)
PostingListCounts counts;
Zc4PostingWriter<bigEndian> writer(counts);
- using FW = FakeWord;
- using DWFL = FW::DocWordFeatureList;
- using DWPFL = FW::DocWordPosFeatureList;
-
- DWFL::const_iterator d(fw._postings.begin());
- DWFL::const_iterator de(fw._postings.end());
- DWPFL::const_iterator p(fw._wordPosFeatures.begin());
- DWPFL::const_iterator pe(fw._wordPosFeatures.end());
+ auto d = fw._postings.begin();
+ auto de = fw._postings.end();
+ auto p = fw._wordPosFeatures.begin();
+ auto pe = fw._wordPosFeatures.end();
DocIdAndPosOccFeatures features;
EGPosOccEncodeContext<bigEndian> f1(&_fieldsParams);
EG2PosOccEncodeContext<bigEndian> f0(&_fieldsParams);
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
index d705541b3fc..150d8a3af32 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fpfactory.cpp
@@ -49,7 +49,7 @@ getFPFactory(const std::string &name, const Schema &schema)
if (fpFactoryMap == nullptr)
return nullptr;
- FPFactoryMap::const_iterator i(fpFactoryMap->find(name));
+ auto i = fpFactoryMap->find(name);
if (i != fpFactoryMap->end())
return i->second(schema);
@@ -64,10 +64,9 @@ getPostingTypes()
std::vector<std::string> res;
if (fpFactoryMap != nullptr)
- for (FPFactoryMap::const_iterator i(fpFactoryMap->begin());
- i != fpFactoryMap->end();
- ++i)
- res.push_back(i->first);
+ for (const auto& elem : *fpFactoryMap) {
+ res.push_back(elem.first);
+ }
return res;
}
diff --git a/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp b/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
index 933e14fe379..4197dee6cba 100644
--- a/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
+++ b/searchlib/src/vespa/searchlib/test/mock_attribute_context.cpp
@@ -21,17 +21,13 @@ MockAttributeContext::getAttributeStableEnum(const string &name) const {
}
void
MockAttributeContext::getAttributeList(std::vector<const IAttributeVector *> & list) const {
- Map::const_iterator pos = _vectors.begin();
- Map::const_iterator end = _vectors.end();
- for (; pos != end; ++pos) {
- list.push_back(pos->second);
+ for (const auto& elem : _vectors) {
+ list.push_back(elem.second);
}
}
MockAttributeContext::~MockAttributeContext() {
- Map::iterator pos = _vectors.begin();
- Map::iterator end = _vectors.end();
- for (; pos != end; ++pos) {
- delete pos->second;
+ for (auto& elem : _vectors) {
+ delete elem.second;
}
}
diff --git a/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp b/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
index bc5e8356957..15cb3065b75 100644
--- a/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
+++ b/searchlib/src/vespa/searchlib/test/mock_attribute_manager.cpp
@@ -6,7 +6,7 @@ namespace search::attribute::test {
AttributeVector::SP
MockAttributeManager::findAttribute(const vespalib::string &name) const {
- AttributeMap::const_iterator itr = _attributes.find(name);
+ auto itr = _attributes.find(name);
if (itr != _attributes.end()) {
return itr->second;
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
index 8916a4cf0b5..9ec186f92c0 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
@@ -164,7 +164,7 @@ Session *
TransLogClient::findSession(const vespalib::string & domainName, int sessionId)
{
SessionKey key(domainName, sessionId);
- SessionMap::iterator found(_sessions.find(key));
+ auto found = _sessions.find(key);
Session * session((found != _sessions.end()) ? found->second : nullptr);
return session;
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
index db02f4f037e..ac9c6318fb5 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
@@ -519,8 +519,8 @@ TransLogServer::listDomains(FRT_RPCRequest *req)
vespalib::string domains;
ReadGuard domainGuard(_domainMutex);
- for(DomainList::const_iterator it(_domains.begin()), mt(_domains.end()); it != mt; it++) {
- domains += it->second->name();
+ for (const auto& elem : _domains) {
+ domains += elem.second->name();
domains += "\n";
}
ret.AddInt32(0);
diff --git a/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp b/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
index 7f4733d8a70..b7fc9cfab05 100644
--- a/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
+++ b/searchlib/src/vespa/searchlib/util/posting_priority_queue.hpp
@@ -10,7 +10,6 @@ template <class Reader>
void
PostingPriorityQueue<Reader>::adjust()
{
- using VIT = typename Vector::iterator;
if (!_vec.front().get()->isValid()) {
_vec.erase(_vec.begin()); // Iterator no longer valid
return;
@@ -19,9 +18,9 @@ PostingPriorityQueue<Reader>::adjust()
return;
}
// Peform binary search to find first element higher than changed value
- VIT gt = std::upper_bound(_vec.begin() + 1, _vec.end(), _vec.front());
- VIT to = _vec.begin();
- VIT from = to;
+ auto gt = std::upper_bound(_vec.begin() + 1, _vec.end(), _vec.front());
+ auto to = _vec.begin();
+ auto from = to;
++from;
Ref changed = *to; // Remember changed value
while (from != gt) { // Shift elements to make space for changed value
diff --git a/socket_test/pom.xml b/socket_test/pom.xml
index f68b04d01c8..50965f05cdf 100644
--- a/socket_test/pom.xml
+++ b/socket_test/pom.xml
@@ -22,8 +22,8 @@
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
- <version>2.4</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
diff --git a/streamingvisitors/src/vespa/searchvisitor/rankmanager.h b/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
index 71910c65242..54414f80512 100644
--- a/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
+++ b/streamingvisitors/src/vespa/searchvisitor/rankmanager.h
@@ -44,7 +44,7 @@ public:
bool initRankSetup(const search::fef::BlueprintFactory & factory);
bool setup(const RankManager & manager);
int getIndex(const vespalib::string & key) const {
- Map::const_iterator found(_rpmap.find(key));
+ auto found = _rpmap.find(key);
return (found != _rpmap.end()) ? found->second : 0;
}
@@ -60,7 +60,7 @@ public:
return _indexEnv[getIndex(rankProfile)];
}
const View *getView(const vespalib::string & index) const {
- ViewMap::const_iterator itr = _views.find(index);
+ auto itr = _views.find(index);
if (itr != _views.end()) {
return &itr->second;
}
diff --git a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
index 92e1ff0e460..78d72102fe9 100644
--- a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp
@@ -81,10 +81,8 @@ RankProcessor::initQueryEnvironment()
vespalib::string expandedIndexName = vsm::FieldSearchSpecMap::stripNonFields(term.getTerm()->index());
const RankManager::View *view = _rankManagerSnapshot->getView(expandedIndexName);
if (view != nullptr) {
- RankManager::View::const_iterator iter = view->begin();
- RankManager::View::const_iterator endp = view->end();
- for (; iter != endp; ++iter) {
- qtd.getTermData().addField(*iter).setHandle(_mdLayout.allocTermField(*iter));
+ for (auto field_id : *view) {
+ qtd.getTermData().addField(field_id).setHandle(_mdLayout.allocTermField(field_id));
}
} else {
LOG(warning, "Could not find a view for index '%s'. Ranking no fields.",
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
index 75e07615bd9..2119364c2bc 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
@@ -131,10 +131,10 @@ SearchEnvironment::getEnv(const vespalib::string & searchCluster)
std::lock_guard guard(_lock);
_threadLocals.emplace_back(std::move(envMap));
}
- EnvMap::iterator localFound = _localEnvMap->find(searchCluster);
+ auto localFound = _localEnvMap->find(searchCluster);
if (localFound == _localEnvMap->end()) {
std::lock_guard guard(_lock);
- EnvMap::iterator found = _envMap.find(searchCluster);
+ auto found = _envMap.find(searchCluster);
if (found == _envMap.end()) {
LOG(debug, "Init VSMAdapter with config id = '%s'", searchCluster.c_str());
Env::SP env = std::make_shared<Env>(searchClusterUri, _wordFolder, _transport, _file_distributor_connection_spec);
diff --git a/streamingvisitors/src/vespa/vsm/common/document.cpp b/streamingvisitors/src/vespa/vsm/common/document.cpp
index a345c82ce2d..167a54a75ea 100644
--- a/streamingvisitors/src/vespa/vsm/common/document.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/document.cpp
@@ -23,8 +23,8 @@ vespalib::asciistream & operator << (vespalib::asciistream & os, const FieldRef
vespalib::asciistream & operator << (vespalib::asciistream & os, const StringFieldIdTMap & f)
{
- for (StringFieldIdTMapT::const_iterator it=f._map.begin(), mt=f._map.end(); it != mt; it++) {
- os << it->first << " = " << it->second << '\n';
+ for (const auto& elem : f._map) {
+ os << elem.first << " = " << elem.second << '\n';
}
return os;
}
@@ -49,7 +49,7 @@ void StringFieldIdTMap::add(const vespalib::string & s)
FieldIdT StringFieldIdTMap::fieldNo(const vespalib::string & fName) const
{
- StringFieldIdTMapT::const_iterator found = _map.find(fName);
+ auto found = _map.find(fName);
FieldIdT fNo((found != _map.end()) ? found->second : npos);
return fNo;
}
diff --git a/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp b/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
index 7886c44b2e0..71b48495f5e 100644
--- a/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/documenttypemapping.cpp
@@ -43,7 +43,7 @@ void DocumentTypeMapping::init(const vespalib::string & defaultDocumentType,
bool DocumentTypeMapping::prepareBaseDoc(SharedFieldPathMap & map) const
{
- FieldPathMapMapT::const_iterator found = _fieldMap.find(_defaultDocumentTypeName);
+ auto found = _fieldMap.find(_defaultDocumentTypeName);
if (found != _fieldMap.end()) {
map = std::make_shared<FieldPathMapT>(found->second);
LOG(debug, "Found FieldPathMap for default document type '%s' with %zd elements",
@@ -64,8 +64,8 @@ void DocumentTypeMapping::buildFieldMap(
docTypePtr->getName().c_str(), fieldList.size(), typeId.c_str());
const document::DocumentType & docType = *docTypePtr;
size_t highestFNo(0);
- for (StringFieldIdTMapT::const_iterator it = fieldList.begin(), mt = fieldList.end(); it != mt; it++) {
- highestFNo = std::max(highestFNo, size_t(it->second));
+ for (const auto& elem : fieldList) {
+ highestFNo = std::max(highestFNo, size_t(elem.second));
}
highestFNo++;
FieldPathMapT & fieldMap = _fieldMap[typeId];
@@ -73,20 +73,20 @@ void DocumentTypeMapping::buildFieldMap(
fieldMap.resize(highestFNo);
size_t validCount(0);
- for (StringFieldIdTMapT::const_iterator it = fieldList.begin(), mt = fieldList.end(); it != mt; it++) {
- vespalib::string fname = it->first;
- LOG(debug, "Handling %s -> %d", fname.c_str(), it->second);
+ for (const auto& elem : fieldList) {
+ vespalib::string fname = elem.first;
+ LOG(debug, "Handling %s -> %d", fname.c_str(), elem.second);
try {
- if ((it->first[0] != '[') && (it->first != "summaryfeatures") && (it->first != "rankfeatures") && (it->first != "ranklog") && (it->first != "sddocname") && (it->first != "documentid")) {
+ if ((elem.first[0] != '[') && (elem.first != "summaryfeatures") && (elem.first != "rankfeatures") && (elem.first != "ranklog") && (elem.first != "sddocname") && (elem.first != "documentid")) {
FieldPath fieldPath;
docType.buildFieldPath(fieldPath, fname);
- fieldMap[it->second] = std::move(fieldPath);
+ fieldMap[elem.second] = std::move(fieldPath);
validCount++;
- LOG(spam, "Found %s -> %d in document", fname.c_str(), it->second);
+ LOG(spam, "Found %s -> %d in document", fname.c_str(), elem.second);
}
} catch (const std::exception & e) {
LOG(debug, "Could not get field info for '%s' in documenttype '%s' (id = '%s') : %s",
- it->first.c_str(), docType.getName().c_str(), typeId.c_str(), e.what());
+ elem.first.c_str(), docType.getName().c_str(), typeId.c_str(), e.what());
}
}
_documentTypeFreq.insert(std::make_pair(validCount, docTypePtr));
diff --git a/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp b/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
index b39afd83b5a..93a071deade 100644
--- a/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
+++ b/streamingvisitors/src/vespa/vsm/common/fieldmodifier.cpp
@@ -14,9 +14,9 @@ FieldModifierMap::~FieldModifierMap() { }
FieldModifier *
FieldModifierMap::getModifier(FieldIdT fId) const
{
- FieldModifierMapT::const_iterator itr = _map.find(fId);
+ auto itr = _map.find(fId);
if (itr == _map.end()) {
- return NULL;
+ return nullptr;
}
return itr->second.get();
}
diff --git a/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp b/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
index 3495d46b85b..ee1b3f79aed 100644
--- a/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
+++ b/streamingvisitors/src/vespa/vsm/searcher/utf8suffixstringfieldsearcher.cpp
@@ -32,12 +32,11 @@ UTF8SuffixStringFieldSearcher::matchTerms(const FieldRef & f, const size_t mints
++srcbuf;
}
srcbuf = tokenize(srcbuf, _buf->capacity(), dstbuf, tokenlen);
- for (QueryTermList::iterator it = _qtl.begin(), mt = _qtl.end(); it != mt; ++it) {
- QueryTerm & qt = **it;
+ for (auto qt : _qtl) {
const cmptype_t * term;
- termsize_t tsz = qt.term(term);
+ termsize_t tsz = qt->term(term);
if (matchTermSuffix(term, tsz, dstbuf, tokenlen)) {
- addHit(qt, words);
+ addHit(*qt, words);
}
}
words++;
diff --git a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
index f6ac3a6c88a..5f0be889621 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/fieldsearchspec.cpp
@@ -193,7 +193,7 @@ bool FieldSearchSpecMap::buildFieldsInQuery(const Query & query, StringFieldIdTM
const IndexFieldMapT & fim = dtm.second;
vespalib::string rawIndex(term->index());
vespalib::string index(stripNonFields(rawIndex));
- IndexFieldMapT::const_iterator fIt = fim.find(index);
+ auto fIt = fim.find(index);
if (fIt != fim.end()) {
for(FieldIdT fid : fIt->second) {
const FieldSearchSpec & spec = specMap().find(fid)->second;
@@ -286,7 +286,7 @@ FieldSearchSpecMap::reconfigFromQuery(const Query & query)
for (const auto & termA : qtl) {
for (const auto & ifm : documentTypeMap()) {
- IndexFieldMapT::const_iterator itc = ifm.second.find(termA->index());
+ auto itc = ifm.second.find(termA->index());
if (itc != ifm.second.end()) {
for (FieldIdT fid : itc->second) {
FieldSearchSpec & spec = _specMap.find(fid)->second;
diff --git a/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp b/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
index 2d2d3f24bc6..5d29ca993f2 100644
--- a/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
+++ b/streamingvisitors/src/vespa/vsm/vsm/snippetmodifier.cpp
@@ -18,7 +18,7 @@ namespace {
void
addIfNotPresent(FieldQueryTermMap & map, vsm::FieldIdT fId, QueryTerm * qt)
{
- FieldQueryTermMap::iterator itr = map.find(fId);
+ auto itr = map.find(fId);
if (itr != map.end()) {
QueryTermList & qtl = itr->second;
if (std::find(qtl.begin(), qtl.end(), qt) == qtl.end()) {
@@ -108,16 +108,14 @@ SnippetModifierManager::setup(const QueryTermList& queryTerms,
FieldQueryTermMap fqtm;
// setup modifiers
- for (QueryTermList::const_iterator i = queryTerms.begin(); i != queryTerms.end(); ++i) {
- QueryTerm * qt = *i;
- IndexFieldMapT::const_iterator j = indexMap.find(qt->index());
- if (j != indexMap.end()) {
- for (FieldIdTList::const_iterator k = j->second.begin(); k != j->second.end(); ++k) {
- FieldIdT fId = *k;
+ for (auto qt : queryTerms) {
+ auto itr = indexMap.find(qt->index());
+ if (itr != indexMap.end()) {
+ for (auto fId : itr->second) {
const FieldSearchSpec & spec = specMap.find(fId)->second;
if (spec.searcher().substring() || qt->isSubstring()) { // we need a modifier for this field id
addIfNotPresent(fqtm, fId, qt);
- if (_modifiers.getModifier(fId) == NULL) {
+ if (_modifiers.getModifier(fId) == nullptr) {
LOG(debug, "Create snippet modifier for field id '%u'", fId);
UTF8SubstringSnippetModifier::SP searcher
(new UTF8SubstringSnippetModifier(fId, _searchModifyBuf, _searchOffsetBuf));
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
index 33991ef1a3b..41f54255d9d 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/EntityBindingsMapper.java
@@ -20,9 +20,7 @@ import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
-import java.time.Instant;
import java.util.Base64;
-import java.util.List;
import java.util.Optional;
import static com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId.fromDottedString;
@@ -150,7 +148,7 @@ public class EntityBindingsMapper {
docEntity.unknownAttributes());
}
- public static String toIdentityDocmentData(IdentityDocument identityDocument) {
+ public static String toIdentityDocumentData(IdentityDocument identityDocument) {
IdentityDocumentEntity documentEntity = new IdentityDocumentEntity(
identityDocument.providerUniqueId().asDottedString(),
identityDocument.providerService().getFullName(),
@@ -160,7 +158,7 @@ public class EntityBindingsMapper {
identityDocument.ipAddresses(),
identityDocument.identityType().id(),
Optional.ofNullable(identityDocument.clusterType()).map(ClusterType::toConfigValue).orElse(null),
- identityDocument.ztsUrl(),
+ identityDocument.ztsUrl().toString(),
identityDocument.serviceIdentity().getFullName());
try {
byte[] bytes = mapper.writeValueAsBytes(documentEntity);
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
index 00d82177367..4bfff58b928 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/IdentityDocument.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.athenz.identityprovider.api;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
@@ -14,7 +15,7 @@ import java.util.Set;
*/
public record IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIdentity providerService, String configServerHostname,
String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- IdentityType identityType, ClusterType clusterType, String ztsUrl,
+ IdentityType identityType, ClusterType clusterType, URI ztsUrl,
AthenzIdentity serviceIdentity, Map<String, Object> unknownAttributes) {
public IdentityDocument {
@@ -30,7 +31,7 @@ public record IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIde
public IdentityDocument(VespaUniqueInstanceId providerUniqueId, AthenzIdentity providerService, String configServerHostname,
String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- IdentityType identityType, ClusterType clusterType, String ztsUrl,
+ IdentityType identityType, ClusterType clusterType, URI ztsUrl,
AthenzIdentity serviceIdentity) {
this(providerUniqueId, providerService, configServerHostname, instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, Map.of());
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
index 194854cfc3b..8970a74934a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/IdentityDocumentEntity.java
@@ -7,9 +7,9 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -20,7 +20,7 @@ import java.util.Set;
@JsonInclude(JsonInclude.Include.NON_NULL)
public record IdentityDocumentEntity(String providerUniqueId, String providerService,
String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- String identityType, String clusterType, String ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) {
+ String identityType, String clusterType, URI ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) {
@JsonCreator
public IdentityDocumentEntity(@JsonProperty("provider-unique-id") String providerUniqueId,
@@ -34,7 +34,7 @@ public record IdentityDocumentEntity(String providerUniqueId, String providerSer
@JsonProperty("zts-url") String ztsUrl,
@JsonProperty("service-identity") String serviceIdentity) {
this(providerUniqueId, providerService, configServerHostname,
- instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, new HashMap<>());
+ instanceHostname, createdAt, ipAddresses, identityType, clusterType, URI.create(ztsUrl), serviceIdentity, new HashMap<>());
}
@JsonProperty("provider-unique-id") @Override public String providerUniqueId() { return providerUniqueId; }
@@ -45,7 +45,7 @@ public record IdentityDocumentEntity(String providerUniqueId, String providerSer
@JsonProperty("ip-addresses") @Override public Set<String> ipAddresses() { return ipAddresses; }
@JsonProperty("identity-type") @Override public String identityType() { return identityType; }
@JsonProperty("cluster-type") @Override public String clusterType() { return clusterType; }
- @JsonProperty("zts-url") @Override public String ztsUrl() { return ztsUrl; }
+ @JsonProperty("zts-url") @Override public URI ztsUrl() { return ztsUrl; }
@JsonProperty("service-identity") @Override public String serviceIdentity() { return serviceIdentity; }
@JsonAnyGetter @Override public Map<String, Object> unknownAttributes() { return unknownAttributes; }
@JsonAnySetter public void set(String name, Object value) { unknownAttributes.put(name, value); }
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
index e00ab9978f6..9bf91eff60a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/identityprovider/api/bindings/LegacySignedIdentityDocumentEntity.java
@@ -7,6 +7,7 @@ import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.net.URI;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
@@ -19,7 +20,7 @@ import java.util.Set;
public record LegacySignedIdentityDocumentEntity (
String signature, int signingKeyVersion, String providerUniqueId, String providerService, int documentVersion,
String configServerHostname, String instanceHostname, Instant createdAt, Set<String> ipAddresses,
- String identityType, String clusterType, String ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) implements SignedIdentityDocumentEntity {
+ String identityType, String clusterType, URI ztsUrl, String serviceIdentity, Map<String, Object> unknownAttributes) implements SignedIdentityDocumentEntity {
@JsonCreator
public LegacySignedIdentityDocumentEntity(@JsonProperty("signature") String signature,
@@ -36,7 +37,7 @@ public record LegacySignedIdentityDocumentEntity (
@JsonProperty("zts-url") String ztsUrl,
@JsonProperty("service-identity") String serviceIdentity) {
this(signature, signingKeyVersion, providerUniqueId, providerService, documentVersion, configServerHostname,
- instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity, new HashMap<>());
+ instanceHostname, createdAt, ipAddresses, identityType, clusterType, URI.create(ztsUrl), serviceIdentity, new HashMap<>());
}
@JsonProperty("signature") @Override public String signature() { return signature; }
@@ -50,7 +51,7 @@ public record LegacySignedIdentityDocumentEntity (
@JsonProperty("ip-addresses") @Override public Set<String> ipAddresses() { return ipAddresses; }
@JsonProperty("identity-type") @Override public String identityType() { return identityType; }
@JsonProperty("cluster-type") @Override public String clusterType() { return clusterType; }
- @JsonProperty("zts-url") @Override public String ztsUrl() { return ztsUrl; }
+ @JsonProperty("zts-url") @Override public URI ztsUrl() { return ztsUrl; }
@JsonProperty("service-identity") @Override public String serviceIdentity() { return serviceIdentity; }
@JsonAnyGetter @Override public Map<String, Object> unknownAttributes() { return unknownAttributes; }
@JsonAnySetter public void set(String name, Object value) { unknownAttributes.put(name, value); }
diff --git a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
index 276815f263d..45963aaaeb3 100644
--- a/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
+++ b/vespa-athenz/src/test/java/com/yahoo/vespa/athenz/identityprovider/client/IdentityDocumentSignerTest.java
@@ -15,15 +15,15 @@ import com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument;
import com.yahoo.vespa.athenz.identityprovider.api.VespaUniqueInstanceId;
import org.junit.jupiter.api.Test;
+import java.net.URI;
import java.security.KeyPair;
import java.time.Instant;
import java.util.Arrays;
import java.util.HashSet;
-import java.util.List;
import static com.yahoo.vespa.athenz.identityprovider.api.IdentityType.TENANT;
-import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION;
import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.DEFAULT_DOCUMENT_VERSION;
+import static com.yahoo.vespa.athenz.identityprovider.api.SignedIdentityDocument.LEGACY_DEFAULT_DOCUMENT_VERSION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -43,7 +43,7 @@ public class IdentityDocumentSignerTest {
private static final Instant createdAt = Instant.EPOCH;
private static final HashSet<String> ipAddresses = new HashSet<>(Arrays.asList("1.2.3.4", "::1"));
private static final ClusterType clusterType = ClusterType.CONTAINER;
- private static final String ztsUrl = "https://foo";
+ private static final URI ztsUrl = URI.create("https://foo");
private static final AthenzIdentity serviceIdentity = new AthenzService("vespa", "node");
@Test
@@ -67,7 +67,7 @@ public class IdentityDocumentSignerTest {
IdentityDocument identityDocument = new IdentityDocument(
id, providerService, configserverHostname,
instanceHostname, createdAt, ipAddresses, identityType, clusterType, ztsUrl, serviceIdentity);
- String data = EntityBindingsMapper.toIdentityDocmentData(identityDocument);
+ String data = EntityBindingsMapper.toIdentityDocumentData(identityDocument);
String signature =
signer.generateSignature(data, keyPair.getPrivate());
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
index 9f5523b062c..7fac977ca51 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/JettyCluster.java
@@ -5,6 +5,7 @@ package ai.vespa.feed.client.impl;
import ai.vespa.feed.client.FeedClientBuilder.Compression;
import ai.vespa.feed.client.HttpResponse;
import org.eclipse.jetty.client.HttpClient;
+import org.eclipse.jetty.client.MultiplexConnectionPool;
import org.eclipse.jetty.client.api.Request;
import org.eclipse.jetty.client.api.Response;
import org.eclipse.jetty.client.api.Result;
@@ -19,6 +20,7 @@ import org.eclipse.jetty.http2.client.http.HttpClientTransportOverHTTP2;
import org.eclipse.jetty.io.ClientConnector;
import org.eclipse.jetty.util.Callback;
import org.eclipse.jetty.util.HttpCookieStore;
+import org.eclipse.jetty.util.Pool;
import org.eclipse.jetty.util.Promise;
import org.eclipse.jetty.util.SocketAddressResolver;
import org.eclipse.jetty.util.component.AbstractLifeCycle;
@@ -67,25 +69,31 @@ class JettyCluster implements Cluster {
@Override
public void dispatch(HttpRequest req, CompletableFuture<HttpResponse> vessel) {
- Endpoint endpoint = findLeastBusyEndpoint(endpoints);
- long reqTimeoutMillis = req.timeout() != null
- ? req.timeout().toMillis() * 11 / 10 + 1000 : IDLE_TIMEOUT.toMillis();
- Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
- .version(HttpVersion.HTTP_2)
- .method(HttpMethod.fromString(req.method()))
- .headers(hs -> req.headers().forEach((k, v) -> hs.add(k, v.get())))
- .idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
- .timeout(reqTimeoutMillis, MILLISECONDS);
- if (req.body() != null) {
- FeedContent content = new FeedContent(compression, req.body());
- content.contentEncoding().ifPresent(ce -> jettyReq.headers(hs -> hs.add(ce)));
- jettyReq.body(content);
- }
- jettyReq.send(new BufferingResponseListener() {
- @Override
- public void onComplete(Result result) {
- if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
- else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
+ client.getExecutor().execute(() -> {
+ try {
+ Endpoint endpoint = findLeastBusyEndpoint(endpoints);
+ long reqTimeoutMillis = req.timeout() != null
+ ? req.timeout().toMillis() * 11 / 10 + 1000 : IDLE_TIMEOUT.toMillis();
+ Request jettyReq = client.newRequest(URI.create(endpoint.uri + req.path()))
+ .version(HttpVersion.HTTP_2)
+ .method(HttpMethod.fromString(req.method()))
+ .headers(hs -> req.headers().forEach((k, v) -> hs.add(k, v.get())))
+ .idleTimeout(IDLE_TIMEOUT.toMillis(), MILLISECONDS)
+ .timeout(reqTimeoutMillis, MILLISECONDS);
+ if (req.body() != null) {
+ FeedContent content = new FeedContent(compression, req.body());
+ content.contentEncoding().ifPresent(ce -> jettyReq.headers(hs -> hs.add(ce)));
+ jettyReq.body(content);
+ }
+ jettyReq.send(new BufferingResponseListener() {
+ @Override
+ public void onComplete(Result result) {
+ if (result.isFailed()) vessel.completeExceptionally(result.getFailure());
+ else vessel.complete(new JettyResponse(result.getResponse(), getContent()));
+ }
+ });
+ } catch (Exception e) {
+ vessel.completeExceptionally(e);
}
});
}
@@ -106,17 +114,26 @@ class JettyCluster implements Cluster {
clientSslCtxFactory.setEndpointIdentificationAlgorithm(null);
}
ClientConnector connector = new ClientConnector();
- int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 16), 4);
+ int threads = Math.max(Math.min(Runtime.getRuntime().availableProcessors(), 20), 8);
connector.setExecutor(new QueuedThreadPool(threads));
connector.setSslContextFactory(clientSslCtxFactory);
HTTP2Client h2Client = new HTTP2Client(connector);
h2Client.setMaxConcurrentPushedStreams(b.maxStreamsPerConnection);
- HttpClient httpClient = new HttpClient(new HttpClientTransportOverHTTP2(h2Client));
- httpClient.setMaxConnectionsPerDestination(b.connectionsPerEndpoint);
+ // Set the HTTP/2 flow control windows very large to cause TCP congestion instead of HTTP/2 flow control congestion.
+ int initialWindow = 128 * 1024 * 1024;
+ h2Client.setInitialSessionRecvWindow(initialWindow);
+ h2Client.setInitialStreamRecvWindow(initialWindow);
+ HttpClientTransportOverHTTP2 transport = new HttpClientTransportOverHTTP2(h2Client);
+ transport.setConnectionPoolFactory(dest -> {
+ MultiplexConnectionPool pool = new MultiplexConnectionPool(
+ dest, Pool.StrategyType.RANDOM, b.connectionsPerEndpoint, false, dest, Integer.MAX_VALUE);
+ pool.preCreateConnections(b.connectionsPerEndpoint);
+ return pool;
+ });
+ HttpClient httpClient = new HttpClient(transport);
httpClient.setFollowRedirects(false);
httpClient.setUserAgentField(
new HttpField(HttpHeader.USER_AGENT, String.format("vespa-feed-client/%s (Jetty)", Vespa.VERSION)));
- httpClient.setMaxRequestsQueuedPerDestination(Integer.MAX_VALUE);
httpClient.setConnectTimeout(Duration.ofSeconds(10).toMillis());
// Stop client from trying different IP address when TLS handshake fails
httpClient.setSocketAddressResolver(new Ipv4PreferringResolver(httpClient, Duration.ofSeconds(10)));
diff --git a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
index fcbc89307b8..3e9d704e11c 100644
--- a/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
+++ b/vespajlib/src/main/java/com/yahoo/compress/Compressor.java
@@ -194,14 +194,14 @@ public class Compressor {
public long warmup(double seconds) {
byte [] input = new byte[0x4000];
new Random().nextBytes(input);
- long timeDone = System.nanoTime() + (long)(seconds*1000000000);
+ long startTime = System.nanoTime();
long compressedBytes = 0;
byte [] decompressed = new byte [input.length];
LZ4FastDecompressor fastDecompressor = factory.fastDecompressor();
LZ4SafeDecompressor safeDecompressor = factory.safeDecompressor();
LZ4Compressor fastCompressor = factory.fastCompressor();
LZ4Compressor highCompressor = factory.highCompressor();
- while (System.nanoTime() < timeDone) {
+ while (System.nanoTime() - startTime < seconds * 1e9) {
byte [] compressedFast = fastCompressor.compress(input);
byte [] compressedHigh = highCompressor.compress(input);
fastDecompressor.decompress(compressedFast, decompressed);