summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x.buildkite/build-rpms.sh2
-rw-r--r--.copr/Makefile2
-rw-r--r--client/go/internal/admin/jvm/mem_options.go4
-rw-r--r--client/go/internal/admin/jvm/mem_options_test.go2
-rw-r--r--client/go/internal/admin/jvm/standalone_container.go1
-rw-r--r--client/go/internal/admin/jvm/xx_options.go2
-rw-r--r--client/js/app/yarn.lock61
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java4
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTagsTest.java52
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java2
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java7
-rw-r--r--config-model/src/main/java/com/yahoo/schema/Schema.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/documentmodel/DocumentSummary.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/Host.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainer.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidator.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java35
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/Container.java4
-rwxr-xr-xconfig-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java9
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java2
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java12
-rw-r--r--config-model/src/test/java/com/yahoo/schema/SummaryTestCase.java15
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/SummaryConsistencyTestCase.java8
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidatorTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidatorTest.java10
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/SchemaChainsTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java24
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java11
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java5
-rwxr-xr-xcontainer-disc/src/main/sh/vespa-start-container-daemon.sh4
-rw-r--r--container-search/abi-spec.json12
-rw-r--r--container-search/src/main/java/com/yahoo/search/querytransform/WeakAndReplacementSearcher.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcher.java85
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java9
-rw-r--r--container-search/src/test/java/com/yahoo/search/querytransform/WeakAndReplacementSearcherTestCase.java9
-rw-r--r--container-search/src/test/java/com/yahoo/search/rendering/EventRendererTestCase.java2
-rw-r--r--container-search/src/test/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcherTestCase.java42
-rw-r--r--dependency-versions/pom.xml15
-rw-r--r--eval/src/vespa/eval/instruction/generic_cell_cast.cpp18
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java35
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java4
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/misc/User.java12
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt4
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/MetricsJsonResponse.java31
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/PrometheusResponse.java35
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandler.java11
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV1Handler.java5
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV2Handler.java5
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandler.java5
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericApplicationModel.java11
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericJsonModel.java11
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusModel.java8
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java2
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java4
-rw-r--r--model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java1
-rw-r--r--parent/pom.xml2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_params.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp4
-rw-r--r--searchlib/CMakeLists.txt1
-rw-r--r--searchlib/src/apps/vespa-query-analyzer/.gitignore3
-rw-r--r--searchlib/src/apps/vespa-query-analyzer/CMakeLists.txt9
-rw-r--r--searchlib/src/apps/vespa-query-analyzer/vespa-query-analyzer.cpp361
-rw-r--r--searchlib/src/tests/hitcollector/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/hitcollector/hitcollector_test.cpp269
-rw-r--r--searchlib/src/tests/hitcollector/sorted_hit_sequence_test.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp197
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/hitcollector.h11
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.h4
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp4
-rw-r--r--storage/src/vespa/storage/storageserver/rpc/cluster_controller_api_rpc_service.cpp6
-rw-r--r--storage/src/vespa/storageapi/message/state.cpp24
-rw-r--r--storage/src/vespa/storageapi/message/state.h18
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java28
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java3
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java25
-rw-r--r--vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java2
-rw-r--r--vespalib/src/vespa/vespalib/data/slime/type.h1
86 files changed, 1327 insertions, 414 deletions
diff --git a/.buildkite/build-rpms.sh b/.buildkite/build-rpms.sh
index 9f4ba7894a5..3dc05e6e6ce 100755
--- a/.buildkite/build-rpms.sh
+++ b/.buildkite/build-rpms.sh
@@ -9,7 +9,7 @@ make -f .copr/Makefile srpm outdir="$WORKDIR"
rpmbuild --rebuild \
--define="_topdir $WORKDIR/vespa-rpmbuild" \
--define "_debugsource_template %{nil}" \
- --define "_binary_payload w10T.zstdio" \
+ --define "_binary_payload w10T4.zstdio" \
--define "installdir $WORKDIR/vespa-install" "$WORKDIR"/vespa-"$VESPA_VERSION"-*.src.rpm
mv "$WORKDIR"/vespa-rpmbuild/RPMS/*/*.rpm "$WORKDIR/artifacts/$ARCH/rpms"
diff --git a/.copr/Makefile b/.copr/Makefile
index c70f35f4450..8371f2fbd7b 100644
--- a/.copr/Makefile
+++ b/.copr/Makefile
@@ -15,7 +15,7 @@ srpm: VESPA_VERSION = $$(git tag --points-at HEAD | grep -oP "\d+\.\d+\.\d+" | s
srpm: deps
$(TOP)/../dist.sh $(VESPA_VERSION)
spectool -g -C $(SOURCEDIR) $(SPECDIR)/vespa-$(VESPA_VERSION).spec
- rpmbuild -bs --define "_topdir $(RPMTOPDIR)" $(SPECDIR)/vespa-$(VESPA_VERSION).spec
+ rpmbuild -bs --define "_topdir $(RPMTOPDIR)" --define "_source_payload w10T4.zstdio" $(SPECDIR)/vespa-$(VESPA_VERSION).spec
cp -a $(RPMTOPDIR)/SRPMS/* $(outdir)
rpms: srpm
diff --git a/client/go/internal/admin/jvm/mem_options.go b/client/go/internal/admin/jvm/mem_options.go
index d3b0d44c677..c78ee80dc80 100644
--- a/client/go/internal/admin/jvm/mem_options.go
+++ b/client/go/internal/admin/jvm/mem_options.go
@@ -55,8 +55,8 @@ func (opts *Options) MaybeAddHugepages(heapSize AmountOfMemory) {
}
func adjustAvailableMemory(measured AmountOfMemory) AmountOfMemory {
- reserved := 1024 // MB
- need_min := 64 // MB
+ reserved := 700 // MB -- keep in sync with com.yahoo.vespa.model.Host.memoryOverheadGb
+ need_min := 64 // MB
available := measured.ToMB()
if available > need_min+2*reserved {
return MegaBytesOfMemory(available - reserved)
diff --git a/client/go/internal/admin/jvm/mem_options_test.go b/client/go/internal/admin/jvm/mem_options_test.go
index 3501e44c723..3db10153086 100644
--- a/client/go/internal/admin/jvm/mem_options_test.go
+++ b/client/go/internal/admin/jvm/mem_options_test.go
@@ -14,6 +14,6 @@ func TestAdjustment(t *testing.T) {
assert.True(t, int(adj) >= lastAdj)
lastAdj = int(adj)
}
- adj := adjustAvailableMemory(MegaBytesOfMemory(31024)).ToMB()
+ adj := adjustAvailableMemory(MegaBytesOfMemory(30700)).ToMB()
assert.Equal(t, 30000, int(adj))
}
diff --git a/client/go/internal/admin/jvm/standalone_container.go b/client/go/internal/admin/jvm/standalone_container.go
index 20031bc7725..fb615dcf4c2 100644
--- a/client/go/internal/admin/jvm/standalone_container.go
+++ b/client/go/internal/admin/jvm/standalone_container.go
@@ -37,7 +37,6 @@ func (a *StandaloneContainer) configureOptions() {
opts := a.jvmOpts
opts.ConfigureCpuCount(0)
opts.AddCommonXX()
- opts.AddOption("-XX:-OmitStackTraceInFastThrow")
opts.AddCommonOpens()
opts.AddCommonJdkProperties()
a.addJdiscProperties()
diff --git a/client/go/internal/admin/jvm/xx_options.go b/client/go/internal/admin/jvm/xx_options.go
index 13b69e43dda..abc92f19bf2 100644
--- a/client/go/internal/admin/jvm/xx_options.go
+++ b/client/go/internal/admin/jvm/xx_options.go
@@ -19,5 +19,5 @@ func (opts *Options) AddCommonXX() {
// not common after all:
opts.AddOption("-XX:MaxJavaStackTraceDepth=1000000")
// Aid debugging for slight cost in performance
- opts.AddOption("-XX:+OmitStackTraceInFastThrow")
+ opts.AddOption("-XX:-OmitStackTraceInFastThrow")
}
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index c0a1c8be5d3..72d9d848d27 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -830,6 +830,15 @@
resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.10.0.tgz#548f6de556857c8bb73bbee70c35dc82a2e74d63"
integrity sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==
+"@eslint/config-array@^0.15.1":
+ version "0.15.1"
+ resolved "https://registry.yarnpkg.com/@eslint/config-array/-/config-array-0.15.1.tgz#1fa78b422d98f4e7979f2211a1fde137e26c7d61"
+ integrity sha512-K4gzNq+yymn/EVsXYmf+SBcBro8MTf+aXJZUphM96CdzUEr+ClGDvAbpmaEK+cGVigVXIgs9gNmvHAlrzzY5JQ==
+ dependencies:
+ "@eslint/object-schema" "^2.1.3"
+ debug "^4.3.1"
+ minimatch "^3.0.5"
+
"@eslint/eslintrc@^3.1.0":
version "3.1.0"
resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-3.1.0.tgz#dbd3482bfd91efa663cbe7aa1f506839868207b6"
@@ -845,10 +854,15 @@
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
-"@eslint/js@9.3.0":
- version "9.3.0"
- resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.3.0.tgz#2e8f65c9c55227abc4845b1513c69c32c679d8fe"
- integrity sha512-niBqk8iwv96+yuTwjM6bWg8ovzAPF9qkICsGtcoa5/dmqcEMfdwNAX7+/OHcJHc7wj7XqPxH98oAHytFYlw6Sw==
+"@eslint/js@9.4.0":
+ version "9.4.0"
+ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-9.4.0.tgz#96a2edd37ec0551ce5f9540705be23951c008a0c"
+ integrity sha512-fdI7VJjP3Rvc70lC4xkFXHB0fiPeojiL1PxVG6t1ZvXQrarj893PweuBTujxDUFk0Fxj4R7PIIAZ/aiiyZPZcg==
+
+"@eslint/object-schema@^2.1.3":
+ version "2.1.3"
+ resolved "https://registry.yarnpkg.com/@eslint/object-schema/-/object-schema-2.1.3.tgz#e65ae80ee2927b4fd8c5c26b15ecacc2b2a6cc2a"
+ integrity sha512-HAbhAYKfsAC2EkTqve00ibWIZlaU74Z1EHwAjYr4PXF0YU2VEA1zSIKSSpKszRLRWwHzzRZXvK632u+uXzvsvw==
"@floating-ui/core@^1.4.2":
version "1.5.0"
@@ -919,25 +933,11 @@
dependencies:
prop-types "^15.8.1"
-"@humanwhocodes/config-array@^0.13.0":
- version "0.13.0"
- resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.13.0.tgz#fb907624df3256d04b9aa2df50d7aa97ec648748"
- integrity sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==
- dependencies:
- "@humanwhocodes/object-schema" "^2.0.3"
- debug "^4.3.1"
- minimatch "^3.0.5"
-
"@humanwhocodes/module-importer@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c"
integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==
-"@humanwhocodes/object-schema@^2.0.3":
- version "2.0.3"
- resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz#4a2868d75d6d6963e423bcf90b7fd1be343409d3"
- integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==
-
"@humanwhocodes/retry@^0.3.0":
version "0.3.0"
resolved "https://registry.yarnpkg.com/@humanwhocodes/retry/-/retry-0.3.0.tgz#6d86b8cb322660f03d3f0aa94b99bdd8e172d570"
@@ -2324,13 +2324,20 @@ debug@^3.2.7:
dependencies:
ms "^2.1.1"
-debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2:
+debug@^4.1.0, debug@^4.1.1:
version "4.3.4"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865"
integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==
dependencies:
ms "2.1.2"
+debug@^4.3.1, debug@^4.3.2:
+ version "4.3.5"
+ resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e"
+ integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==
+ dependencies:
+ ms "2.1.2"
+
decode-uri-component@^0.2.0:
version "0.2.2"
resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.2.tgz#e69dbe25d37941171dd540e024c444cd5188e1e9"
@@ -2736,15 +2743,15 @@ eslint-visitor-keys@^4.0.0:
integrity sha512-OtIRv/2GyiF6o/d8K7MYKKbXrOUBIK6SfkIRM4Z0dY3w+LiQ0vy3F57m0Z71bjbyeiWFiHJ8brqnmE6H6/jEuw==
eslint@^9.0.0:
- version "9.3.0"
- resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.3.0.tgz#36a96db84592618d6ed9074d677e92f4e58c08b9"
- integrity sha512-5Iv4CsZW030lpUqHBapdPo3MJetAPtejVW8B84GIcIIv8+ohFaddXsrn1Gn8uD9ijDb+kcYKFUVmC8qG8B2ORQ==
+ version "9.4.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-9.4.0.tgz#79150c3610ae606eb131f1d648d5f43b3d45f3cd"
+ integrity sha512-sjc7Y8cUD1IlwYcTS9qPSvGjAC8Ne9LctpxKKu3x/1IC9bnOg98Zy6GxEJUfr1NojMgVPlyANXYns8oE2c1TAA==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
"@eslint-community/regexpp" "^4.6.1"
+ "@eslint/config-array" "^0.15.1"
"@eslint/eslintrc" "^3.1.0"
- "@eslint/js" "9.3.0"
- "@humanwhocodes/config-array" "^0.13.0"
+ "@eslint/js" "9.4.0"
"@humanwhocodes/module-importer" "^1.0.1"
"@humanwhocodes/retry" "^0.3.0"
"@nodelib/fs.walk" "^1.2.8"
@@ -4721,9 +4728,9 @@ prettier-linter-helpers@^1.0.0:
fast-diff "^1.1.2"
prettier@3:
- version "3.2.5"
- resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.5.tgz#e52bc3090586e824964a8813b09aba6233b28368"
- integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==
+ version "3.3.0"
+ resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.0.tgz#d173ea0524a691d4c0b1181752f2b46724328cdf"
+ integrity sha512-J9odKxERhCQ10OC2yb93583f6UnYutOeiV5i0zEDS7UGTdUt0u+y8erxl3lBKvwo/JHyyoEdXjwp4dke9oyZ/g==
pretty-format@^29.7.0:
version "29.7.0"
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
index 5f2046b1450..fe4c0af06d6 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
@@ -194,6 +194,10 @@ class OverrideProcessor implements PreProcessor {
if ( ! elementTags.isEmpty()) { // match tags
if ( ! elementTags.intersects(tags)) return false;
+ // Tags are set on instances. Having a tag match for a deployment to a non-prod environment
+ // disables the usual downscaling of the cluster, which is surprising. We therefore either
+ // require the tags match to either also match an environment directive, or the implicit prod.
+ if (elementEnvironments.isEmpty() && environment != Environment.prod) return false;
}
return true;
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTagsTest.java b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTagsTest.java
index 0cdbed3999c..7190b25965f 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTagsTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTagsTest.java
@@ -22,7 +22,7 @@ public class HostedOverrideProcessorTagsTest {
"<services xmlns:deploy='vespa' xmlns:preprocess='?' version='1.0'>" +
" <container id='foo' version='1.0'>" +
" <nodes count='5' deploy:tags='a' deploy:environment='perf'/>" +
- " <nodes count='10' deploy:tags='a b'/>" +
+ " <nodes count='10' deploy:tags='a b' deploy:environment='prod dev'/>" +
" <nodes count='20' deploy:tags='c'/>" +
" <search deploy:tags='b'/>" +
" <document-api deploy:tags='d'/>" +
@@ -62,6 +62,38 @@ public class HostedOverrideProcessorTagsTest {
}
@Test
+ public void testParsingTagATest() throws TransformerException {
+ String expected =
+ "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
+ "<services xmlns:deploy='vespa' xmlns:preprocess='?' version='1.0'>" +
+ " <container id='foo' version='1.0'>" +
+ " " + // (╭ರ_•́)
+ " </container>" +
+ "</services>";
+ assertOverride(InstanceName.defaultName(),
+ Environment.test,
+ RegionName.defaultName(),
+ Tags.fromString("a"),
+ expected);
+ }
+
+ @Test
+ public void testParsingTagADev() throws TransformerException {
+ String expected =
+ "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
+ "<services xmlns:deploy='vespa' xmlns:preprocess='?' version='1.0'>" +
+ " <container id='foo' version='1.0'>" +
+ " <nodes count='10' required='true'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(InstanceName.defaultName(),
+ Environment.dev,
+ RegionName.defaultName(),
+ Tags.fromString("a"),
+ expected);
+ }
+
+ @Test
public void testParsingTagB() throws TransformerException {
String expected =
"<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
@@ -79,7 +111,7 @@ public class HostedOverrideProcessorTagsTest {
}
@Test
- public void testParsingTagC() throws TransformerException {
+ public void testParsingTagCProd() throws TransformerException {
String expected =
"<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
"<services xmlns:deploy='vespa' xmlns:preprocess='?' version='1.0'>" +
@@ -95,6 +127,22 @@ public class HostedOverrideProcessorTagsTest {
}
@Test
+ public void testParsingTagCDev() throws TransformerException {
+ String expected =
+ "<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
+ "<services xmlns:deploy='vespa' xmlns:preprocess='?' version='1.0'>" +
+ " <container id='foo' version='1.0'>" +
+ " " + // (╭ರ_•́)
+ " </container>" +
+ "</services>";
+ assertOverride(InstanceName.defaultName(),
+ Environment.dev,
+ RegionName.defaultName(),
+ Tags.fromString("c"),
+ expected);
+ }
+
+ @Test
public void testParsingTagCAndD() throws TransformerException {
String expected =
"<?xml version='1.0' encoding='UTF-8' standalone='no'?>" +
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 0746079a626..cb03e6807e7 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -99,7 +99,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}) default int heapSizePercentage() { return 0; }
@ModelFeatureFlag(owners = {"bjorncs", "tokle"}) default List<String> allowedAthenzProxyIdentities() { return List.of(); }
@ModelFeatureFlag(owners = {"vekterli"}) default int maxActivationInhibitedOutOfSyncGroups() { return 0; }
- @ModelFeatureFlag(owners = {"hmusum"}) default String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return ""; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "8.350.x") default String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return "-XX:-OmitStackTraceInFastThrow"; }
@ModelFeatureFlag(owners = {"hmusum"}) default double resourceLimitDisk() { return 0.75; }
@ModelFeatureFlag(owners = {"hmusum"}) default double resourceLimitMemory() { return 0.8; }
@ModelFeatureFlag(owners = {"geirst", "vekterli"}) default double minNodeRatioPerGroup() { return 0.0; }
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index d67335522ed..970597685da 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -55,7 +55,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private double feedNiceness = 0.0;
private int maxActivationInhibitedOutOfSyncGroups = 0;
private List<TenantSecretStore> tenantSecretStores = List.of();
- private String jvmOmitStackTraceInFastThrowOption;
private boolean allowDisableMtls = true;
private List<X509Certificate> operatorCertificates = List.of();
private double resourceLimitDisk = 0.75;
@@ -112,7 +111,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public double feedNiceness() { return feedNiceness; }
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
@Override public List<TenantSecretStore> tenantSecretStores() { return tenantSecretStores; }
- @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return jvmOmitStackTraceInFastThrowOption; }
@Override public boolean allowDisableMtls() { return allowDisableMtls; }
@Override public List<X509Certificate> operatorCertificates() { return operatorCertificates; }
@Override public double resourceLimitDisk() { return resourceLimitDisk; }
@@ -278,11 +276,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties setJvmOmitStackTraceInFastThrowOption(String value) {
- this.jvmOmitStackTraceInFastThrowOption = value;
- return this;
- }
-
public TestProperties allowDisableMtls(boolean value) {
this.allowDisableMtls = value;
return this;
diff --git a/config-model/src/main/java/com/yahoo/schema/Schema.java b/config-model/src/main/java/com/yahoo/schema/Schema.java
index 3402ba31be9..127d12594b4 100644
--- a/config-model/src/main/java/com/yahoo/schema/Schema.java
+++ b/config-model/src/main/java/com/yahoo/schema/Schema.java
@@ -721,7 +721,7 @@ public class Schema implements ImmutableSchema {
"', but this schema does not exist");
// Require schema and document type inheritance to be consistent to keep things simple
- // And require it to be explicit so we have the option to support other possibilities later
+ // and require it to be explicit, so we have the option to support other possibilities later
var parentDocument = owner.schemas().get(inherited.get()).getDocument();
if ( ! getDocument().inheritedTypes().containsKey(new DataTypeName(parentDocument.getName())))
throw new IllegalArgumentException(this + " inherits '" + inherited.get() +
diff --git a/config-model/src/main/java/com/yahoo/vespa/documentmodel/DocumentSummary.java b/config-model/src/main/java/com/yahoo/vespa/documentmodel/DocumentSummary.java
index 2f61c1c631d..e48aa0eef7c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/documentmodel/DocumentSummary.java
+++ b/config-model/src/main/java/com/yahoo/vespa/documentmodel/DocumentSummary.java
@@ -9,7 +9,8 @@ import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
-import java.util.logging.Level;
+
+import static java.util.logging.Level.WARNING;
/**
* A document summary definition - a list of summary fields.
@@ -121,11 +122,11 @@ public class DocumentSummary extends FieldView {
public void validate(DeployLogger logger) {
for (var inheritedName : inherited) {
var inheritedSummary = owner.getSummary(inheritedName);
- if (inheritedSummary == null) {
- // TODO Vespa 9: Throw IllegalArgumentException instead
- logger.logApplicationPackage(Level.WARNING,
- this + " inherits '" + inheritedName + "' but this is not present in " + owner);
- }
+ // TODO: Throw when no one is doing this anymore
+ if (inheritedName.equals("default"))
+ logger.logApplicationPackage(WARNING, this + " inherits '" + inheritedName + "', which makes no sense. Remove this inheritance");
+ else if (inheritedSummary == null )
+ throw new IllegalArgumentException(this + " inherits '" + inheritedName + "', but this is not present in " + owner);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/Host.java b/config-model/src/main/java/com/yahoo/vespa/model/Host.java
index a8085919a98..f87f1382ffb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/Host.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/Host.java
@@ -16,7 +16,7 @@ import java.util.Objects;
public final class Host extends TreeConfigProducer<AnyConfigProducer> implements SentinelConfig.Producer, Comparable<Host> {
// Memory needed for auxiliary processes always running on the node (config-proxy, metrics-proxy).
- // Keep in sync with node-repository/ClusterModel.
+ // Keep in sync with node-repository/ClusterModel and startup scripts (go and bash).
public static final double memoryOverheadGb = 0.7;
private ConfigSentinel configSentinel = null;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainer.java
index e18ffea731e..a513cc673dd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogserverContainer.java
@@ -1,11 +1,9 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin;
-import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.api.container.ContainerServiceType;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.TreeConfigProducer;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.model.container.Container;
/**
@@ -30,9 +28,4 @@ public class LogserverContainer extends Container {
return "";
}
- @Override
- public String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) {
- return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.admin);
- }
-
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
index e702a29b640..be38298279f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/clustercontroller/ClusterControllerContainer.java
@@ -97,11 +97,6 @@ public class ClusterControllerContainer extends Container implements
return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER;
}
- @Override
- public String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) {
- return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.admin);
- }
-
private void configureZooKeeperServer(boolean runStandaloneZooKeeper) {
if (runStandaloneZooKeeper)
ContainerModelBuilder.addReconfigurableZooKeeperServerComponents(this);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
index 2ff58438a07..8f262281edc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
@@ -10,7 +10,6 @@ import ai.vespa.metricsproxy.rpc.RpcConnector;
import ai.vespa.metricsproxy.rpc.RpcConnectorConfig;
import ai.vespa.metricsproxy.service.VespaServices;
import ai.vespa.metricsproxy.service.VespaServicesConfig;
-import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.api.container.ContainerServiceType;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.provision.ApplicationId;
@@ -78,11 +77,6 @@ public class MetricsProxyContainer extends Container implements
}
@Override
- public String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) {
- return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.admin);
- }
-
- @Override
public int getWantedPort() {
return BASEPORT;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidator.java
index 9cf5fe84c21..4900b56801c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidator.java
@@ -23,21 +23,22 @@ public class JvmHeapSizeValidator implements Validator {
context.model().getContainerClusters().forEach((clusterId, appCluster) -> {
var mp = appCluster.getMemoryPercentage().orElse(null);
if (mp == null) return;
- if (mp.availableMemoryGb().isEmpty()) {
+ if (mp.asAbsoluteGb().isEmpty()) {
context.deployState().getDeployLogger().log(Level.FINE, "Host resources unknown or percentage overridden with 'allocated-memory'");
return;
}
long jvmModelCost = appCluster.onnxModelCostCalculator().aggregatedModelCostInBytes();
if (jvmModelCost > 0) {
- double availableMemoryGb = mp.availableMemoryGb().getAsDouble();
+ double availableMemoryGb = mp.asAbsoluteGb().getAsDouble();
+ int percentageOfTotal = mp.ofContainerTotal().getAsInt();
double modelCostGb = jvmModelCost / (1024D * 1024 * 1024);
context.deployState().getDeployLogger().log(Level.FINE, () -> Text.format("JVM: %d%% (limit: %d%%), %.2fGB (limit: %.2fGB), ONNX: %.2fGB",
- mp.percentage(), percentLimit, availableMemoryGb, gbLimit, modelCostGb));
- if (mp.percentage() < percentLimit) {
+ percentageOfTotal, percentLimit, availableMemoryGb, gbLimit, modelCostGb));
+ if (percentageOfTotal < percentLimit) {
context.illegal(Text.format("Allocated percentage of memory of JVM in cluster '%s' is too low (%d%% < %d%%). " +
"Estimated cost of ONNX models is %.2fGB. Either use a node flavor with more memory or use less expensive models. " +
"You may override this validation by specifying 'allocated-memory' (https://docs.vespa.ai/en/performance/container-tuning.html#jvm-heap-size).",
- clusterId, mp.percentage(), percentLimit, modelCostGb));
+ clusterId, percentageOfTotal, percentLimit, modelCostGb));
}
if (availableMemoryGb < gbLimit) {
context.illegal(
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidator.java
index 008a3fc5547..e57110e44e5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidator.java
@@ -115,7 +115,7 @@ public class RestartOnDeployForOnnxModelChangesValidator implements ChangeValida
double memoryUsedByModels = currentModelCostInGb + nextModelCostInGb;
double availableMemory = Math.max(0, totalMemory - Host.memoryOverheadGb - memoryUsedByModels);
- var availableMemoryPercentage = cluster.availableMemoryPercentage();
+ var availableMemoryPercentage = cluster.heapSizePercentageOfAvailable();
int memoryPercentage = (int) (availableMemory / totalMemory * availableMemoryPercentage);
var prefix = "Validating Onnx models memory usage for %s".formatted(cluster);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
index ed7646b3066..531dc8f0fcf 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
@@ -209,22 +209,27 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
if (memoryPercentage != null) return Optional.of(JvmMemoryPercentage.of(memoryPercentage));
if (isHostedVespa()) {
- int availableMemoryPercentage = availableMemoryPercentage();
- if (getContainers().isEmpty()) return Optional.of(JvmMemoryPercentage.of(availableMemoryPercentage)); // Node memory is not known
-
- // Node memory is known so convert available memory percentage to node memory percentage
- double totalMemory = getContainers().stream().mapToDouble(c -> c.getHostResource().realResources().memoryGb()).min().orElseThrow();
- double jvmHeapDeductionGb = onnxModelCostCalculator.aggregatedModelCostInBytes() / (1024D * 1024 * 1024);
- double availableMemory = Math.max(0, totalMemory - Host.memoryOverheadGb - jvmHeapDeductionGb);
- int memoryPercentage = (int) (availableMemory / totalMemory * availableMemoryPercentage);
- logger.log(FINE, () -> "cluster id '%s': memoryPercentage=%d, availableMemory=%f, totalMemory=%f, availableMemoryPercentage=%d, jvmHeapDeductionGb=%f"
- .formatted(id(), memoryPercentage, availableMemory, totalMemory, availableMemoryPercentage, jvmHeapDeductionGb));
- return Optional.of(JvmMemoryPercentage.of(memoryPercentage, availableMemory));
+ int heapSizePercentageOfAvailable = heapSizePercentageOfAvailable();
+ if (getContainers().isEmpty()) return Optional.of(JvmMemoryPercentage.of(heapSizePercentageOfAvailable)); // Node memory is not known
+
+ // Node memory is known, so compute heap size as a percentage of available memory (excluding overhead, which the startup scripts also account for)
+ double totalMemoryGb = getContainers().stream().mapToDouble(c -> c.getHostResource().realResources().memoryGb()).min().orElseThrow();
+ double totalMemoryMinusOverhead = Math.max(0, totalMemoryGb - Host.memoryOverheadGb);
+ double onnxModelCostGb = onnxModelCostCalculator.aggregatedModelCostInBytes() / (1024D * 1024 * 1024);
+ double availableMemoryGb = Math.max(0, totalMemoryMinusOverhead - onnxModelCostGb);
+ int memoryPercentageOfAvailable = (int) (heapSizePercentageOfAvailable * availableMemoryGb / totalMemoryMinusOverhead);
+ int memoryPercentageOfTotal = (int) (heapSizePercentageOfAvailable * availableMemoryGb / totalMemoryGb);
+ logger.log(FINE, () -> ("cluster id '%s': memoryPercentageOfAvailable=%d, memoryPercentageOfTotal=%d, " +
+ "availableMemoryGb=%f, totalMemoryGb=%f, heapSizePercentageOfAvailable=%d, onnxModelCostGb=%f")
+ .formatted(id(), memoryPercentageOfAvailable, memoryPercentageOfTotal,
+ availableMemoryGb, totalMemoryGb, heapSizePercentageOfAvailable, onnxModelCostGb));
+ return Optional.of(JvmMemoryPercentage.of(memoryPercentageOfAvailable, memoryPercentageOfTotal,
+ availableMemoryGb * heapSizePercentageOfAvailable * 1e-2));
}
return Optional.empty();
}
- public int availableMemoryPercentage() {
+ public int heapSizePercentageOfAvailable() {
return getHostClusterId().isPresent() ?
heapSizePercentageOfTotalAvailableMemoryWhenCombinedCluster :
heapSizePercentageOfAvailableMemory;
@@ -310,14 +315,14 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
public void getConfig(QrStartConfig.Builder builder) {
super.getConfig(builder);
var memoryPct = getMemoryPercentage().orElse(null);
- int heapsize = memoryPct != null && memoryPct.availableMemoryGb().isPresent()
- ? (int) (memoryPct.availableMemoryGb().getAsDouble() * 1024) : 1536;
+ int heapsize = memoryPct != null && memoryPct.asAbsoluteGb().isPresent()
+ ? (int) (memoryPct.asAbsoluteGb().getAsDouble() * 1024) : 1536;
builder.jvm.verbosegc(true)
.availableProcessors(0)
.compressedClassSpaceSize(0)
.minHeapsize(heapsize)
.heapsize(heapsize);
- if (memoryPct != null) builder.jvm.heapSizeAsPercentageOfPhysicalMemory(memoryPct.percentage());
+ if (memoryPct != null) builder.jvm.heapSizeAsPercentageOfPhysicalMemory(memoryPct.ofContainerAvailable());
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
index 864cbc8691b..c2368fd29a2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
@@ -108,10 +108,6 @@ public abstract class Container extends AbstractService implements
addEnvironmentVariable("VESPA_MALLOC_MMAP_THRESHOLD","0x1000000"); // 16M
}
- public String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) {
- return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.container);
- }
-
void setOwner(ContainerCluster<?> owner) { this.owner = owner; }
/** True if this container is retired (slated for removal) */
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
index aeb6c030a49..00ab47e8ddb 100755
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java
@@ -73,6 +73,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalDouble;
+import java.util.OptionalInt;
import java.util.Set;
import java.util.TreeSet;
@@ -721,10 +722,10 @@ public abstract class ContainerCluster<CONTAINER extends Container>
* Returns the percentage of host physical memory this application has specified for nodes in this cluster,
* or empty if this is not specified by the application.
*/
- public record JvmMemoryPercentage(int percentage, OptionalDouble availableMemoryGb) {
- static JvmMemoryPercentage of(int percentage) { return new JvmMemoryPercentage(percentage, OptionalDouble.empty()); }
- static JvmMemoryPercentage of(int percentage, double availableMemoryGb) {
- return new JvmMemoryPercentage(percentage, OptionalDouble.of(availableMemoryGb));
+ public record JvmMemoryPercentage(int ofContainerAvailable, OptionalInt ofContainerTotal, OptionalDouble asAbsoluteGb) { // optionalInt pctOfTotal < int pctOfAvailable
+ static JvmMemoryPercentage of(int percentageOfAvailable) { return new JvmMemoryPercentage(percentageOfAvailable, OptionalInt.empty(), OptionalDouble.empty()); }
+ static JvmMemoryPercentage of(int percentageOfAvailable, int percentageOfTotal, double absoluteMemoryGb) {
+ return new JvmMemoryPercentage(percentageOfAvailable, OptionalInt.of(percentageOfTotal), OptionalDouble.of(absoluteMemoryGb));
}
}
public Optional<JvmMemoryPercentage> getMemoryPercentage() { return Optional.empty(); }
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 5bcd21a5b9b..4983b36bee1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -987,8 +987,6 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
- // Must be done after setting Jvm options from services.xml (#extractJvmOptions()), otherwise those options will not be set
- cluster.getContainers().forEach(container -> container.appendJvmOptions(container.jvmOmitStackTraceInFastThrowOption(context.featureFlags())));
}
private ZoneEndpoint zoneEndpoint(ConfigModelContext context, ClusterSpec.Id cluster) {
@@ -1040,7 +1038,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
- " must be an integer percentage ending by the '%' sign", e);
+ " must be given as an integer followe by '%'", e);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index ae890408b99..d37e5d5382a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -249,7 +249,7 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
for (ContainerModel containerModel : containers) {
Optional<String> hostClusterId = containerModel.getCluster().getHostClusterId();
if (hostClusterId.isPresent() && hostClusterId.get().equals(clusterId) && containerModel.getCluster().getMemoryPercentage().isPresent()) {
- return containerModel.getCluster().getMemoryPercentage().get().percentage() * 0.01;
+ return containerModel.getCluster().getMemoryPercentage().get().ofContainerAvailable() * 0.01;
}
}
return 0.0;
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 5ead9812b56..2846925b9c3 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -151,7 +151,7 @@ public class ModelProvisioningTest {
assertEquals("-Xlog:gc", mydisc2.getContainers().get(1).getJvmOptions());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(0).getPreLoad());
assertEquals("lib/blablamalloc.so", mydisc2.getContainers().get(1).getPreLoad());
- assertEquals(45, mydisc2.getMemoryPercentage().get().percentage());
+ assertEquals(45, mydisc2.getMemoryPercentage().get().ofContainerAvailable());
assertEquals(Optional.of("-XX:+UseParNewGC"), mydisc2.getJvmGCOptions());
QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder();
mydisc2.getConfig(qrStartBuilder);
@@ -234,8 +234,8 @@ public class ModelProvisioningTest {
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(1, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
assertEquals(2, model.getContentClusters().get("content").getRootGroup().getNodes().size(), "Nodes in cluster without ID");
- assertEquals(65, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container1");
- assertEquals(84, physicalMemoryPercentage(model.getContainerClusters().get("container2")), "Heap size for container2");
+ assertEquals(85, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size for container1");
+ assertEquals(85, physicalMemoryPercentage(model.getContainerClusters().get("container2")), "Heap size for container2");
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Type.content, model);
assertProvisioned(1, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content"), ClusterSpec.Type.content, model);
@@ -287,8 +287,8 @@ public class ModelProvisioningTest {
VespaModel model = tester.createModel(xmlWithNodes, true, deployStateWithClusterEndpoints("container1").deployLogger(logger));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
- assertEquals(18, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
- assertEquals(2025077080L, protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is lowered to account for the jvm heap");
+ assertEquals(24, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is lowered with combined clusters");
+ assertEquals(1876900708, protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is lowered to account for the jvm heap");
assertProvisioned(0, ClusterSpec.Id.from("container1"), ClusterSpec.Type.container, model);
assertProvisioned(2, ClusterSpec.Id.from("content1"), ClusterSpec.Id.from("container1"), ClusterSpec.Type.combined, model);
var msgs = logger.msgs().stream().filter(m -> m.level().equals(Level.WARNING)).toList();
@@ -356,7 +356,7 @@ public class ModelProvisioningTest {
VespaModel model = tester.createModel(xmlWithNodes, true, deployStateWithClusterEndpoints("container1"));
assertEquals(2, model.getContentClusters().get("content1").getRootGroup().getNodes().size(), "Nodes in content1");
assertEquals(2, model.getContainerClusters().get("container1").getContainers().size(), "Nodes in container1");
- assertEquals(65, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
+ assertEquals(85, physicalMemoryPercentage(model.getContainerClusters().get("container1")), "Heap size is normal");
assertEquals((long) ((3 - memoryOverheadGb) * (Math.pow(1024, 3))), protonMemorySize(model.getContentClusters().get("content1")), "Memory for proton is normal");
}
diff --git a/config-model/src/test/java/com/yahoo/schema/SummaryTestCase.java b/config-model/src/test/java/com/yahoo/schema/SummaryTestCase.java
index 8ffbab84fd7..539ec91ee5f 100644
--- a/config-model/src/test/java/com/yahoo/schema/SummaryTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/SummaryTestCase.java
@@ -6,16 +6,13 @@ import com.yahoo.vespa.documentmodel.DocumentSummary;
import com.yahoo.vespa.model.test.utils.DeployLoggerStub;
import com.yahoo.vespa.objects.FieldBase;
import com.yahoo.yolean.Exceptions;
-import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import static com.yahoo.config.model.test.TestUtil.joinLines;
import java.util.Collection;
import java.util.List;
-import java.util.Optional;
import java.util.logging.Level;
-import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.*;
@@ -233,16 +230,12 @@ public class SummaryTestCase {
" document-summary test_summary inherits nonesuch {" +
" }" +
"}");
- DeployLoggerStub logger = new DeployLoggerStub();
- ApplicationBuilder.createFromStrings(logger, schema);
- assertEquals("document-summary 'test_summary' inherits 'nonesuch' but this is not present in schema 'test'",
- logger.entries.get(0).message);
- // fail("Expected failure");
+ ApplicationBuilder.createFromString(schema);
+ fail("Expected failure");
}
catch (IllegalArgumentException e) {
- fail();
- // assertEquals("document-summary 'test_summary' inherits nonesuch but this is not present in schema 'test'",
- // e.getMessage());
+ assertEquals("document-summary 'test_summary' inherits 'nonesuch', but this is not present in schema 'test'",
+ e.getMessage());
}
}
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/SummaryConsistencyTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/SummaryConsistencyTestCase.java
index 1184e49b381..5ea097f13fb 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/SummaryConsistencyTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/SummaryConsistencyTestCase.java
@@ -5,11 +5,11 @@ import com.yahoo.schema.Schema;
import com.yahoo.schema.ApplicationBuilder;
import com.yahoo.schema.parser.ParseException;
import com.yahoo.vespa.documentmodel.SummaryTransform;
-import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import static com.yahoo.config.model.test.TestUtil.joinLines;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
public class SummaryConsistencyTestCase {
@@ -45,8 +45,7 @@ public class SummaryConsistencyTestCase {
}
@Test
- @Disabled
- void testDocumentTypesWithInheritanceOfNonExistingField() throws ParseException {
+ void testDocumentSummaryWithInheritanceOfNonExistingSummary() {
String schemaString = """
schema foo {
document foo {
@@ -61,8 +60,7 @@ public class SummaryConsistencyTestCase {
}
}
""";
- var schema = ApplicationBuilder.createFromString(schemaString).getSchema();
- schema.getSummaryField("foo_summary");
+ assertThrows(IllegalArgumentException.class, () -> ApplicationBuilder.createFromString(schemaString).getSchema());
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidatorTest.java
index 45125c8eb68..340968f89d1 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/JvmHeapSizeValidatorTest.java
@@ -45,10 +45,10 @@ class JvmHeapSizeValidatorTest {
@Test
void fails_on_too_low_heap_size() throws IOException, SAXException {
- var deployState = createDeployState(2.2, 1024L * 1024 * 1024);
+ var deployState = createDeployState(2.3, 1024L * 1024 * 1024);
var model = new VespaModel(new NullConfigModelRegistry(), deployState);
ValidationTester.expect(new JvmHeapSizeValidator(), model, deployState,
- "Allocated memory to JVM in cluster 'container' is too low (0.50GB < 0.60GB). Estimated cost of ONNX models is 1.00GB.");
+ "Allocated memory to JVM in cluster 'container' is too low (0.51GB < 0.60GB). Estimated cost of ONNX models is 1.00GB.");
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidatorTest.java
index 4c0786ea879..484a938476d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RestartOnDeployForOnnxModelChangesValidatorTest.java
@@ -33,7 +33,7 @@ public class RestartOnDeployForOnnxModelChangesValidatorTest {
// Must be so large that changing model set or options requires restart (due to using more memory than available),
// but not so large that deployment will not work at all with one model
- private static final long defaultCost = 723456789;
+ private static final long defaultCost = 635241309;
private static final long defaultHash = 0;
@@ -47,8 +47,8 @@ public class RestartOnDeployForOnnxModelChangesValidatorTest {
@Test
void validate_changed_estimated_cost() {
- VespaModel current = createModel(onnxModelCost(70000000, defaultHash));
- VespaModel next = createModel(onnxModelCost(723456789, defaultHash));
+ VespaModel current = createModel(onnxModelCost(defaultCost, defaultHash));
+ VespaModel next = createModel(onnxModelCost(19 * defaultCost / 20, defaultHash));
List<ConfigChangeAction> result = validateModel(current, next);
assertEquals(1, result.size());
assertTrue(result.get(0).validationId().isEmpty());
@@ -58,8 +58,8 @@ public class RestartOnDeployForOnnxModelChangesValidatorTest {
@Test
void validate_changed_estimated_cost_non_hosted() {
boolean hosted = false;
- VespaModel current = createModel(onnxModelCost(70000000, defaultHash), hosted);
- VespaModel next = createModel(onnxModelCost(723456789, defaultHash), hosted);
+ VespaModel current = createModel(onnxModelCost(defaultCost, defaultHash), hosted);
+ VespaModel next = createModel(onnxModelCost(19 * defaultCost / 20, defaultHash), hosted);
List<ConfigChangeAction> result = validateModel(current, next, hosted);
assertEquals(0, result.size());
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/SchemaChainsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/SchemaChainsTest.java
index ea43f5c8124..b782366655f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/SchemaChainsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/SchemaChainsTest.java
@@ -142,7 +142,7 @@ public class SchemaChainsTest extends SchemaChainsTestBase {
assertTrue(chain.phases().isEmpty());
assertEquals(1, chain.inherits().size());
assertEquals("native", chain.inherits(0));
- assertEquals(10, chain.components().size());
+ assertEquals(11, chain.components().size());
assertEquals("com.yahoo.prelude.querytransform.PhrasingSearcher@vespa", chain.components(0));
assertEquals("com.yahoo.prelude.searcher.FieldCollapsingSearcher@vespa", chain.components(1));
assertEquals("com.yahoo.search.yql.MinimalQueryInserter@vespa", chain.components(2));
@@ -153,13 +153,14 @@ public class SchemaChainsTest extends SchemaChainsTestBase {
assertEquals("com.yahoo.prelude.semantics.SemanticSearcher@vespa", chain.components(7));
assertEquals("com.yahoo.search.grouping.GroupingQueryParser@vespa", chain.components(8));
assertEquals("com.yahoo.search.querytransform.WeakAndReplacementSearcher@vespa", chain.components(9));
+ assertEquals("com.yahoo.search.searchers.OpportunisticWeakAndSearcher@vespa", chain.components(10));
assertTrue(chain.excludes().isEmpty());
assertEquals(ChainsConfig.Chains.Type.SEARCH, chain.type());
}
@Test
public void require_all_default_chains_are_correct() {
- assertEquals(63, chainsConfig.components().size());
+ assertEquals(64, chainsConfig.components().size());
assertEquals(10, chainsConfig.chains().size());
validateVespaPhasesChain(findChain("vespaPhases"));
validateNativeChain(findChain("native"));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
index 64afd444989..46097da434e 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/JvmOptionsTest.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model.container.xml;
-import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.collections.Pair;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
@@ -206,29 +205,6 @@ public class JvmOptionsTest extends ContainerModelBuilderTestBase {
assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts());
}
- @Test
- void verify_jvm_option_and_value_from_feature_flag_are_both_included() throws IOException, SAXException {
- String servicesXml = """
- <container version='1.0'>
- <search/>
- <nodes>
- <jvm options="-Xms1024m -Xmx2048m" />
- <node hostalias="node1" />
- </nodes>
- </container>
- """;
- ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build();
- // Need to create VespaModel to make deploy properties have effect
- VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder()
- .applicationPackage(applicationPackage)
- .properties(new TestProperties().setJvmOmitStackTraceInFastThrowOption("-XX:-OmitStackTraceInFastThrow"))
- .build());
- SentinelConfig.Builder builder = new SentinelConfig.Builder();
- model.getConfig(builder, "hosts/localhost");
- SentinelConfig config = builder.build();
- assertEquals("PRELOAD=/opt/vespa/lib64/vespa/malloc/libvespamalloc.so exec ${VESPA_HOME}/libexec/vespa/vespa-wrapper vespa-start-container-daemon -Xms1024m -Xmx2048m -XX:-OmitStackTraceInFastThrow ", config.service().get(0).command());
- }
-
private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException {
verifyLogMessage(isHosted, "gc-options", override, invalidOptions);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index e0891854d07..1e2081f160f 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -180,7 +180,6 @@ public class ModelContextImpl implements ModelContext {
private final double feedNiceness;
private final List<String> allowedAthenzProxyIdentities;
private final int maxActivationInhibitedOutOfSyncGroups;
- private final Predicate<ClusterSpec.Type> jvmOmitStackTraceInFastThrow;
private final double resourceLimitDisk;
private final double resourceLimitMemory;
private final double minNodeRatioPerGroup;
@@ -226,7 +225,6 @@ public class ModelContextImpl implements ModelContext {
this.mbus_network_threads = Flags.MBUS_NUM_NETWORK_THREADS.bindTo(source).with(appId).with(version).value();
this.allowedAthenzProxyIdentities = Flags.ALLOWED_ATHENZ_PROXY_IDENTITIES.bindTo(source).with(appId).with(version).value();
this.maxActivationInhibitedOutOfSyncGroups = Flags.MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS.bindTo(source).with(appId).with(version).value();
- this.jvmOmitStackTraceInFastThrow = type -> PermanentFlags.JVM_OMIT_STACK_TRACE_IN_FAST_THROW.bindTo(source).with(appId).with(version).with(type).value();
this.resourceLimitDisk = PermanentFlags.RESOURCE_LIMIT_DISK.bindTo(source).with(appId).with(version).value();
this.resourceLimitMemory = PermanentFlags.RESOURCE_LIMIT_MEMORY.bindTo(source).with(appId).with(version).value();
this.minNodeRatioPerGroup = Flags.MIN_NODE_RATIO_PER_GROUP.bindTo(source).with(appId).with(version).value();
@@ -277,9 +275,6 @@ public class ModelContextImpl implements ModelContext {
@Override public int mbusNetworkThreads() { return mbus_network_threads; }
@Override public List<String> allowedAthenzProxyIdentities() { return allowedAthenzProxyIdentities; }
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
- @Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) {
- return translateJvmOmitStackTraceInFastThrowToString(jvmOmitStackTraceInFastThrow, type);
- }
@Override public double resourceLimitDisk() { return resourceLimitDisk; }
@Override public double resourceLimitMemory() { return resourceLimitMemory; }
@Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; }
@@ -316,12 +311,6 @@ public class ModelContextImpl implements ModelContext {
@Override public SharedHosts sharedHosts() { return sharedHosts; }
@Override public Architecture adminClusterArchitecture() { return adminClusterArchitecture; }
@Override public boolean symmetricPutAndActivateReplicaSelection() { return symmetricPutAndActivateReplicaSelection; }
-
- private String translateJvmOmitStackTraceInFastThrowToString(Predicate<ClusterSpec.Type> function,
- ClusterSpec.Type clusterType) {
- return function.test(clusterType) ? "" : "-XX:-OmitStackTraceInFastThrow";
- }
-
}
public static class Properties implements ModelContext.Properties {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index 76879ccf8ae..c2221d21015 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -94,7 +94,10 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
return;
}
}
- catch (Exception ignored) { }
+ catch (Exception e) {
+ log.warning("Exception when downloading application package (" + appFileReference + ")" +
+ " for " + applicationId + " (session " + sessionId + "): " + e.getMessage());
+ }
failures[0]++;
log.info("Downloading application package (" + appFileReference + ")" +
" for " + applicationId + " (session " + sessionId + ") unsuccessful. " +
diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh
index c4807bb134f..a13485fb6bb 100755
--- a/container-disc/src/main/sh/vespa-start-container-daemon.sh
+++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh
@@ -151,8 +151,8 @@ configure_memory() {
available_cgroup=$((available_cgroup_bytes >> 20))
available=$((available > available_cgroup ? available_cgroup : available))
fi
- #Subtract 1G as fixed overhead for an application container.
- reserved_mem=1024
+ # Subtract 700MB as fixed overhead for an application container -- keep in sync with com.yahoo.vespa.model.Host.memoryOverheadGb
+ reserved_mem=700
available=$((available > reserved_mem ? available - reserved_mem : available))
jvm_heapsize=$((available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100))
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 1c6c773afd9..1bed32c6cdf 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -8917,6 +8917,18 @@
],
"fields" : [ ]
},
+ "com.yahoo.search.searchers.OpportunisticWeakAndSearcher" : {
+ "superClass" : "com.yahoo.search.Searcher",
+ "interfaces" : [ ],
+ "attributes" : [
+ "public"
+ ],
+ "methods" : [
+ "public void <init>()",
+ "public com.yahoo.search.Result search(com.yahoo.search.Query, com.yahoo.search.searchchain.Execution)"
+ ],
+ "fields" : [ ]
+ },
"com.yahoo.search.searchers.QueryValidator" : {
"superClass" : "com.yahoo.search.Searcher",
"interfaces" : [ ],
diff --git a/container-search/src/main/java/com/yahoo/search/querytransform/WeakAndReplacementSearcher.java b/container-search/src/main/java/com/yahoo/search/querytransform/WeakAndReplacementSearcher.java
index 7536e74042c..72a1a7d3430 100644
--- a/container-search/src/main/java/com/yahoo/search/querytransform/WeakAndReplacementSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/querytransform/WeakAndReplacementSearcher.java
@@ -12,6 +12,7 @@ import com.yahoo.search.Searcher;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.search.yql.MinimalQueryInserter;
import com.yahoo.yolean.chain.After;
+import com.yahoo.yolean.chain.Provides;
/**
* Recursively replaces all instances of OrItems with WeakAndItems if the query property weakand.replace is true.
@@ -19,10 +20,12 @@ import com.yahoo.yolean.chain.After;
*
* @author karowan
*/
+@Provides(WeakAndReplacementSearcher.REPLACE_OR_WITH_WEAKAND)
@After(MinimalQueryInserter.EXTERNAL_YQL)
public class WeakAndReplacementSearcher extends Searcher {
+ public static final String REPLACE_OR_WITH_WEAKAND = "replace-or-with-weakand";
static final CompoundName WEAKAND_REPLACE = CompoundName.from("weakAnd.replace");
- static final CompoundName WAND_HITS = CompoundName.from("wand.hits");
+ public static final CompoundName WAND_HITS = CompoundName.from("wand.hits");
@Override public Result search(Query query, Execution execution) {
if (!query.properties().getBoolean(WEAKAND_REPLACE)) {
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java b/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
index 69a1f8ec6cb..c03a74ea2c5 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/model/VespaSearchers.java
@@ -34,7 +34,8 @@ public class VespaSearchers {
com.yahoo.prelude.searcher.PosSearcher.class,
com.yahoo.prelude.semantics.SemanticSearcher.class,
com.yahoo.search.grouping.GroupingQueryParser.class,
- com.yahoo.search.querytransform.WeakAndReplacementSearcher.class);
+ com.yahoo.search.querytransform.WeakAndReplacementSearcher.class,
+ com.yahoo.search.searchers.OpportunisticWeakAndSearcher.class);
public static final Collection<ChainedComponentModel> nativeSearcherModels;
diff --git a/container-search/src/main/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcher.java b/container-search/src/main/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcher.java
new file mode 100644
index 00000000000..c512e821128
--- /dev/null
+++ b/container-search/src/main/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcher.java
@@ -0,0 +1,85 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.search.searchers;
+
+import com.yahoo.api.annotations.Beta;
+import com.yahoo.component.chain.dependencies.After;
+import com.yahoo.prelude.query.AndItem;
+import com.yahoo.prelude.query.CompositeItem;
+import com.yahoo.prelude.query.Item;
+import com.yahoo.prelude.query.WeakAndItem;
+import com.yahoo.processing.request.CompoundName;
+import com.yahoo.search.Query;
+import com.yahoo.search.Result;
+import com.yahoo.search.Searcher;
+import com.yahoo.search.querytransform.WeakAndReplacementSearcher;
+import com.yahoo.search.searchchain.Execution;
+
+import static com.yahoo.search.querytransform.WeakAndReplacementSearcher.WAND_HITS;
+
+/**
+ * Will opportunistically replace the WeakAND with an AND as it is faster.
+ * If enough hits are returned all is good and we return. If not we fall back to the original query.
+ *
+ * @author baldersheim
+ */
+@Beta
+@After(WeakAndReplacementSearcher.REPLACE_OR_WITH_WEAKAND)
+public class OpportunisticWeakAndSearcher extends Searcher {
+ static final CompoundName OPPORTUNISTIC_AND = CompoundName.from("weakAnd.opportunistic.and");
+
+ @Override
+ public Result search(Query query, Execution execution) {
+ if (!query.properties().getBoolean(OPPORTUNISTIC_AND)) {
+ return execution.search(query);
+ }
+
+ Item originalRoot = query.getModel().getQueryTree().getRoot();
+ int targetHits = targetHits(originalRoot);
+ if (targetHits >= 0) {
+ query.getModel().getQueryTree().setRoot(weakAnd2AndRecurse(originalRoot.clone()));
+ query.trace("WeakAND => AND", true, 2);
+ Result result = execution.search(query);
+ if (result.getHitCount() >= query.properties().getInteger(WAND_HITS)) {
+ return result;
+ }
+ query.getModel().getQueryTree().setRoot(originalRoot);
+ return execution.search(query);
+ }
+ return execution.search(query);
+ }
+
+ // returns targetHits for the first WeakAndItem found, -1 if none found.
+ static int targetHits(Item item) {
+ if (!(item instanceof CompositeItem compositeItem)) return -1;
+ if (item instanceof WeakAndItem weakAndItem) return weakAndItem.getN();
+ for (int i = 0; i < compositeItem.getItemCount(); i++) {
+ int targetHits = targetHits(compositeItem.getItem(i));
+ if (targetHits >= 0) return targetHits;
+ }
+ return -1;
+ }
+
+ static Item weakAnd2AndRecurse(Item item) {
+ if (!(item instanceof CompositeItem compositeItem)) return item;
+ compositeItem = weakAnd2And(compositeItem);
+ for (int i = 0; i < compositeItem.getItemCount(); i++) {
+ Item subItem = compositeItem.getItem(i);
+ Item replacedItem = weakAnd2AndRecurse(subItem);
+ if (replacedItem != subItem) {
+ compositeItem.setItem(i, replacedItem);
+ }
+ }
+ return compositeItem;
+ }
+
+ private static CompositeItem weakAnd2And(CompositeItem item) {
+ if (item instanceof WeakAndItem weakAndItem) {
+ AndItem andItem = new AndItem();
+ andItem.setWeight(weakAndItem.getWeight());
+ item.items().forEach(andItem::addItem);
+ return andItem;
+ }
+ return item;
+ }
+}
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
index 0516a8a227a..a662c445bc7 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/test/QueryCanonicalizerTestCase.java
@@ -22,6 +22,15 @@ public class QueryCanonicalizerTestCase {
}
@Test
+ void testNoCanonicalizationWithWhereTrue() {
+ CompositeItem root = new AndItem();
+
+ root.addItem(new TrueItem());
+ root.addItem(new WordItem("word"));
+ assertCanonicalized("AND TRUE word", null, root);
+ }
+
+ @Test
void testSingleLevelSingleItemNonReducibleComposite() {
CompositeItem root = new WeakAndItem();
diff --git a/container-search/src/test/java/com/yahoo/search/querytransform/WeakAndReplacementSearcherTestCase.java b/container-search/src/test/java/com/yahoo/search/querytransform/WeakAndReplacementSearcherTestCase.java
index 52f5fd0cafb..7b91a5d3c25 100644
--- a/container-search/src/test/java/com/yahoo/search/querytransform/WeakAndReplacementSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/querytransform/WeakAndReplacementSearcherTestCase.java
@@ -23,6 +23,7 @@ import static com.yahoo.search.querytransform.WeakAndReplacementSearcher.WAND_HI
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class WeakAndReplacementSearcherTestCase {
@@ -57,7 +58,7 @@ public class WeakAndReplacementSearcherTestCase {
Result result = buildExec().search(query);
Item root = TestUtils.getQueryTreeRoot(result);
assertFalse(orItemsExist(root));
- assertTrue(root instanceof WeakAndItem);
+ assertInstanceOf(WeakAndItem.class, root);
assertEquals(N, ((WeakAndItem) root).getN());
}
@@ -103,24 +104,22 @@ public class WeakAndReplacementSearcherTestCase {
if (item1 != item2) {
return false;
}
- if (!(item1 instanceof CompositeItem)) {
+ if (!(item1 instanceof CompositeItem compositeItem1)) {
return true;
}
- CompositeItem compositeItem1 = (CompositeItem) item1;
CompositeItem compositeItem2 = (CompositeItem) item2;
return IntStream.range(0, compositeItem1.getItemCount())
.allMatch(i -> deepEquals(compositeItem1.getItem(i), compositeItem2.getItem(i)));
}
private boolean orItemsExist(Item item) {
- if (!(item instanceof CompositeItem)) {
+ if (!(item instanceof CompositeItem compositeItem)) {
return false;
}
if (item instanceof OrItem) {
return true;
}
- CompositeItem compositeItem = (CompositeItem) item;
return compositeItem.items().stream().anyMatch(this::orItemsExist);
}
diff --git a/container-search/src/test/java/com/yahoo/search/rendering/EventRendererTestCase.java b/container-search/src/test/java/com/yahoo/search/rendering/EventRendererTestCase.java
index c0a677b2094..2cfb6552379 100644
--- a/container-search/src/test/java/com/yahoo/search/rendering/EventRendererTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/rendering/EventRendererTestCase.java
@@ -141,7 +141,7 @@ public class EventRendererTestCase {
});
assertFalse(future.isDone());
result = render(new Result(new Query(), newHitGroup(tokenStream, "token_stream")));
- assertTrue(future.isDone()); // Renderer waits for async completion
+ future.join(); // Renderer waits for async completion
} finally {
executor.shutdownNow();
diff --git a/container-search/src/test/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcherTestCase.java b/container-search/src/test/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcherTestCase.java
new file mode 100644
index 00000000000..c099cf437f8
--- /dev/null
+++ b/container-search/src/test/java/com/yahoo/search/searchers/OpportunisticWeakAndSearcherTestCase.java
@@ -0,0 +1,42 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.search.searchers;
+
+import com.yahoo.prelude.query.AndItem;
+import com.yahoo.prelude.query.CompositeItem;
+import com.yahoo.prelude.query.Item;
+import com.yahoo.prelude.query.OrItem;
+import com.yahoo.prelude.query.WeakAndItem;
+import com.yahoo.prelude.query.WordItem;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+
+public class OpportunisticWeakAndSearcherTestCase {
+ private static Item buildQueryItem(CompositeItem root, CompositeItem injectAtLevel2) {
+ root.addItem(new WordItem("text"));
+ injectAtLevel2.addItem(new WordItem("a"));
+ injectAtLevel2.addItem(new WordItem("b"));
+ root.addItem(injectAtLevel2);
+ return root;
+ }
+
+ @Test
+ public void requireThatWeakAndIsDetected() {
+ assertEquals(-1, OpportunisticWeakAndSearcher.targetHits(new OrItem()));
+ assertEquals(33, OpportunisticWeakAndSearcher.targetHits(new WeakAndItem(33)));
+ assertEquals(77, OpportunisticWeakAndSearcher.targetHits(buildQueryItem(new OrItem(), new WeakAndItem(77))));
+ assertEquals(77, OpportunisticWeakAndSearcher.targetHits(buildQueryItem(new AndItem(), new WeakAndItem(77))));
+ assertEquals(-1, OpportunisticWeakAndSearcher.targetHits(buildQueryItem(new OrItem(), new AndItem())));
+ }
+
+ @Test
+ public void requireThatWeakAndIsReplacedWithAnd() {
+ assertEquals(buildQueryItem(new OrItem(), new AndItem()),
+ OpportunisticWeakAndSearcher.weakAnd2AndRecurse(buildQueryItem(new OrItem(), new WeakAndItem())));
+ assertEquals(buildQueryItem(new AndItem(), new AndItem()),
+ OpportunisticWeakAndSearcher.weakAnd2AndRecurse(buildQueryItem(new AndItem(), new WeakAndItem())));
+ }
+
+}
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 0e7921f050a..c5f5d8b832f 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -33,7 +33,7 @@
<!-- DO NOT UPGRADE THESE TO A NEW MAJOR VERSION WITHOUT CHECKING FOR BINARY COMPATIBILITY -->
<aopalliance.vespa.version>1.0</aopalliance.vespa.version>
- <error-prone-annotations.vespa.version>2.27.1</error-prone-annotations.vespa.version>
+ <error-prone-annotations.vespa.version>2.28.0</error-prone-annotations.vespa.version>
<guava.vespa.version>33.2.0-jre</guava.vespa.version>
<guice.vespa.version>6.0.0</guice.vespa.version>
<j2objc-annotations.vespa.version>3.0.0</j2objc-annotations.vespa.version>
@@ -68,7 +68,7 @@
<assertj.vespa.version>3.26.0</assertj.vespa.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
- <aws-sdk.vespa.version>1.12.731</aws-sdk.vespa.version>
+ <aws-sdk.vespa.version>1.12.734</aws-sdk.vespa.version>
<athenz.vespa.version>1.11.59</athenz.vespa.version>
<!-- Athenz END -->
@@ -79,7 +79,7 @@
xargs perl -pi -e 's/major = [0-9]+, minor = [0-9]+, micro = [0-9]+/major = 5, minor = 3, micro = 0/g'
-->
<bouncycastle.vespa.version>1.78.1</bouncycastle.vespa.version>
- <byte-buddy.vespa.version>1.14.16</byte-buddy.vespa.version>
+ <byte-buddy.vespa.version>1.14.17</byte-buddy.vespa.version>
<checker-qual.vespa.version>3.38.0</checker-qual.vespa.version>
<commons-beanutils.vespa.version>1.9.4</commons-beanutils.vespa.version>
<commons-codec.vespa.version>1.17.0</commons-codec.vespa.version>
@@ -102,7 +102,7 @@
<felix.log.vespa.version>1.3.0</felix.log.vespa.version>
<findbugs.vespa.version>3.0.2</findbugs.vespa.version> <!-- Should be kept in sync with guava -->
<hamcrest.vespa.version>2.2</hamcrest.vespa.version>
- <hdrhistogram.vespa.version>2.2.1</hdrhistogram.vespa.version>
+ <hdrhistogram.vespa.version>2.2.2</hdrhistogram.vespa.version>
<huggingface.vespa.version>0.28.0</huggingface.vespa.version>
<icu4j.vespa.version>75.1</icu4j.vespa.version>
<java-jjwt.vespa.version>0.11.5</java-jjwt.vespa.version>
@@ -173,18 +173,19 @@
<maven-core.vespa.version>3.9.7</maven-core.vespa.version>
<maven-dependency-plugin.vespa.version>3.6.1</maven-dependency-plugin.vespa.version>
<maven-deploy-plugin.vespa.version>3.1.2</maven-deploy-plugin.vespa.version>
- <maven-enforcer-plugin.vespa.version>3.4.1</maven-enforcer-plugin.vespa.version>
+ <maven-enforcer-plugin.vespa.version>3.5.0</maven-enforcer-plugin.vespa.version>
<maven-failsafe-plugin.vespa.version>3.2.5</maven-failsafe-plugin.vespa.version>
<maven-gpg-plugin.vespa.version>3.2.4</maven-gpg-plugin.vespa.version>
<maven-install-plugin.vespa.version>3.1.2</maven-install-plugin.vespa.version>
<maven-jar-plugin.vespa.version>3.4.1</maven-jar-plugin.vespa.version>
- <maven-javadoc-plugin.vespa.version>3.6.3</maven-javadoc-plugin.vespa.version>
+ <maven-javadoc-plugin.vespa.version>3.7.0</maven-javadoc-plugin.vespa.version>
<maven-plugin-api.vespa.version>${maven-core.vespa.version}</maven-plugin-api.vespa.version>
- <maven-plugin-tools.vespa.version>3.13.0</maven-plugin-tools.vespa.version>
+ <maven-plugin-tools.vespa.version>3.13.1</maven-plugin-tools.vespa.version>
<maven-resources-plugin.vespa.version>3.3.1</maven-resources-plugin.vespa.version>
<maven-resolver.vespa.version>1.9.20</maven-resolver.vespa.version>
<maven-shade-plugin.vespa.version>3.5.3</maven-shade-plugin.vespa.version>
<maven-shared-utils.vespa.version>3.4.2</maven-shared-utils.vespa.version>
+ <maven-dependency-tree.vespa.version>3.3.0</maven-dependency-tree.vespa.version>
<maven-site-plugin.vespa.version>3.12.1</maven-site-plugin.vespa.version>
<maven-source-plugin.vespa.version>3.3.1</maven-source-plugin.vespa.version>
<properties-maven-plugin.vespa.version>1.2.1</properties-maven-plugin.vespa.version>
diff --git a/eval/src/vespa/eval/instruction/generic_cell_cast.cpp b/eval/src/vespa/eval/instruction/generic_cell_cast.cpp
index 16f47a56222..2ee0245a978 100644
--- a/eval/src/vespa/eval/instruction/generic_cell_cast.cpp
+++ b/eval/src/vespa/eval/instruction/generic_cell_cast.cpp
@@ -5,6 +5,7 @@
#include <vespa/eval/eval/wrap_param.h>
#include <vespa/vespalib/util/stash.h>
#include <vespa/vespalib/util/typify.h>
+#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
#include <cassert>
using namespace vespalib::eval::tensor_function;
@@ -16,6 +17,8 @@ using Instruction = InterpretedFunction::Instruction;
namespace {
+using hwaccelrated::IAccelrated;
+
template <typename ICT, typename OCT>
void my_generic_cell_cast_op(State &state, uint64_t param_in) {
const auto &res_type = unwrap_param<ValueType>(param_in);
@@ -31,6 +34,19 @@ void my_generic_cell_cast_op(State &state, uint64_t param_in) {
state.pop_push(result_ref);
}
+template <>
+void my_generic_cell_cast_op<BFloat16, float>(State &state, uint64_t param_in) {
+ const auto &res_type = unwrap_param<ValueType>(param_in);
+ const Value &a = state.peek(0);
+ auto input_cells = a.cells().typify<BFloat16>();
+ auto output_cells = state.stash.create_uninitialized_array<float>(input_cells.size());
+ static const IAccelrated & accelrator = IAccelrated::getAccelerator();
+ accelrator.convert_bfloat16_to_float(reinterpret_cast<const uint16_t *>(input_cells.begin()),
+ output_cells.data(), output_cells.size());
+ Value &result_ref = state.stash.create<ValueView>(res_type, a.index(), TypedCells(output_cells));
+ state.pop_push(result_ref);
+}
+
struct SelectGenericCellCastOp {
template <typename ICT, typename OCT>
static InterpretedFunction::op_function invoke() {
@@ -60,7 +76,7 @@ GenericCellCast::make_instruction(const ValueType &result_type,
assert(!input_type.is_double());
auto &param = stash.create<ValueType>(result_type);
auto op = typify_invoke<2,TypifyCellType,SelectGenericCellCastOp>(from, to);
- return Instruction(op, wrap_param<ValueType>(param));
+ return {op, wrap_param<ValueType>(param)};
}
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index e420f12c985..a403adeb756 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -48,14 +48,6 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
- public static final UnboundBooleanFlag ATHENZ_SERVICE_ACCOUNTS = defineFeatureFlag(
- "athenz-service-accounts", false,
- List.of("hakonhall"), "2024-05-06", "2024-07-06",
- "Whether to provision new GCP VM instances with a service account that are independent " +
- "of the zone, and aligned with the Athenz service names (configserver and tenant-host).",
- "Takes effect when provisioning new VM instances",
- APPLICATION, INSTANCE_ID);
-
public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag(
"default-term-wise-limit", 1.0,
List.of("baldersheim"), "2020-12-02", "2024-12-31",
@@ -208,7 +200,7 @@ public class Flags {
public static final UnboundIntFlag MAX_ACTIVATION_INHIBITED_OUT_OF_SYNC_GROUPS = defineIntFlag(
"max-activation-inhibited-out-of-sync-groups", 0,
- List.of("vekterli"), "2021-02-19", "2024-06-01",
+ List.of("vekterli"), "2021-02-19", "2025-02-01",
"Allows replicas in up to N content groups to not be activated " +
"for query visibility if they are out of sync with a majority of other replicas",
"Takes effect at redeployment",
@@ -216,7 +208,7 @@ public class Flags {
public static final UnboundDoubleFlag MIN_NODE_RATIO_PER_GROUP = defineDoubleFlag(
"min-node-ratio-per-group", 0.0,
- List.of("geirst", "vekterli"), "2021-07-16", "2024-06-01",
+ List.of("geirst", "vekterli"), "2021-07-16", "2025-02-01",
"Minimum ratio of nodes that have to be available (i.e. not Down) in any hierarchic content cluster group for the group to be Up",
"Takes effect at redeployment",
INSTANCE_ID);
@@ -239,13 +231,6 @@ public class Flags {
"Takes effect on next tick.",
NODE_TYPE);
- public static final UnboundStringFlag DIST_HOST = defineStringFlag(
- "dist-host", "",
- List.of("freva"), "2024-04-15", "2024-05-31",
- "Sets dist_host YUM variable, empty means old behavior. Only effective in Public.",
- "Provisioning of instance or next host-admin tick",
- HOSTNAME, NODE_TYPE, CLOUD_ACCOUNT);
-
public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag(
"enabled-horizon-dashboard", false,
List.of("olaa"), "2021-09-13", "2024-09-01",
@@ -314,7 +299,7 @@ public class Flags {
public static final UnboundStringFlag CORE_ENCRYPTION_PUBLIC_KEY_ID = defineStringFlag(
"core-encryption-public-key-id", "",
- List.of("vekterli"), "2022-11-03", "2024-06-01",
+ List.of("vekterli"), "2022-11-03", "2025-02-01",
"Specifies which public key to use for core dump encryption.",
"Takes effect on the next tick.",
NODE_TYPE, HOSTNAME);
@@ -332,14 +317,14 @@ public class Flags {
public static final UnboundBooleanFlag SORT_BLUEPRINTS_BY_COST = defineFeatureFlag(
"sort-blueprints-by-cost", false,
- List.of("baldersheim"), "2023-12-19", "2024-05-31",
+ List.of("baldersheim"), "2023-12-19", "2024-10-31",
"If true blueprints are sorted based on cost estimate, rather that absolute estimated hits",
"Takes effect at redeployment",
INSTANCE_ID);
public static final UnboundBooleanFlag ALWAYS_MARK_PHRASE_EXPENSIVE = defineFeatureFlag(
"always-mark-phrase-expensive", false,
- List.of("baldersheim"), "2023-11-20", "2024-05-31",
+ List.of("baldersheim"), "2023-11-20", "2024-10-31",
"If true all phrases will be marked expensive, independent of parents",
"Takes effect at redeployment",
INSTANCE_ID);
@@ -379,7 +364,7 @@ public class Flags {
public static final UnboundIntFlag CONTENT_LAYER_METADATA_FEATURE_LEVEL = defineIntFlag(
"content-layer-metadata-feature-level", 0,
- List.of("vekterli"), "2022-09-12", "2024-06-01",
+ List.of("vekterli"), "2022-09-12", "2024-12-01",
"Value semantics: 0) legacy behavior, 1) operation cancellation, 2) operation " +
"cancellation and ephemeral content node sequence numbers for bucket replicas",
"Takes effect at redeployment",
@@ -401,7 +386,7 @@ public class Flags {
public static final UnboundStringFlag ENDPOINT_CONFIG = defineStringFlag(
"endpoint-config", "legacy",
- List.of("mpolden", "tokle"), "2023-10-06", "2024-06-01",
+ List.of("mpolden", "tokle"), "2023-10-06", "2024-09-01",
"Set the endpoint config to use for an application. Must be 'legacy', 'combined' or 'generated'. See EndpointConfig for further details",
"Takes effect on next deployment through controller",
TENANT_ID, APPLICATION, INSTANCE_ID);
@@ -445,12 +430,6 @@ public class Flags {
"Whether logserver container should run otel agent",
"Takes effect at redeployment", INSTANCE_ID);
- public static UnboundBooleanFlag ENCRYPT_DISK = defineFeatureFlag(
- "encrypt-disk", true,
- List.of("hmusum"), "2024-04-29", "2024-07-01",
- "Whether to encrypt disk when provisioning new hosts",
- "Will be read only on boot.");
-
public static UnboundBooleanFlag HUBSPOT_SYNC_TENANTS = defineFeatureFlag(
"hubspot-sync-tenants", false,
List.of("bjorncs"), "2024-05-07", "2025-01-01",
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index f200940e52d..1b1e74c0b1e 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -196,8 +196,8 @@ public class PermanentFlags {
);
public static final UnboundBooleanFlag JVM_OMIT_STACK_TRACE_IN_FAST_THROW = defineFeatureFlag(
- "jvm-omit-stack-trace-in-fast-throw", true,
- "Controls JVM option OmitStackTraceInFastThrow (default feature flag value is true, which is the default JVM option value as well)",
+ "jvm-omit-stack-trace-in-fast-throw", false,
+ "Controls JVM option OmitStackTraceInFastThrow (the default JVM option corresponds to feature flag being false )",
"takes effect on JVM restart",
CLUSTER_TYPE, INSTANCE_ID);
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/misc/User.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/misc/User.java
index 607e3482c3d..616b9df4bd8 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/misc/User.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/misc/User.java
@@ -35,6 +35,7 @@ public record User(String email, String name, String nickname, String picture, b
}
public static Builder builder() { return new Builder(); }
+ public static Builder builder(User u) { return new Builder(u); }
public static class Builder {
private String email;
@@ -48,6 +49,17 @@ public record User(String email, String name, String nickname, String picture, b
private Builder() {}
+ private Builder(User u) {
+ email = u.email;
+ name = u.name;
+ nickname = u.nickname;
+ picture = u.picture;
+ isVerified = u.isVerified;
+ loginCount = u.loginCount;
+ lastLogin = u.lastLogin;
+ extraAttributes.putAll(u.extraAttributes);
+ }
+
public Builder email(String email) { this.email = email; return this; }
public Builder name(String name) { this.name = name; return this; }
public Builder nickname(String nickname) { this.nickname = nickname; return this; }
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index 6245caf1d92..80cd3c47a58 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -32,7 +32,7 @@ org.apache.maven.resolver:maven-resolver-impl:${maven-resolver.vespa.version}
org.apache.maven.resolver:maven-resolver-named-locks:${maven-resolver.vespa.version}
org.apache.maven.resolver:maven-resolver-spi:${maven-resolver.vespa.version}
org.apache.maven.resolver:maven-resolver-util:${maven-resolver.vespa.version}
-org.apache.maven.shared:maven-dependency-tree:3.2.1
+org.apache.maven.shared:maven-dependency-tree:${maven-dependency-tree.vespa.version}
org.apache.maven.shared:maven-shared-utils:${maven-shared-utils.vespa.version}
org.apache.maven:maven-api-meta:${maven-xml-impl.vespa.version}
org.apache.maven:maven-api-xml:${maven-xml-impl.vespa.version}
@@ -59,8 +59,6 @@ org.codehaus.plexus:plexus-sec-dispatcher:2.0
org.codehaus.plexus:plexus-utils:${plexus-utils.vespa.version}
org.codehaus.plexus:plexus-xml:${plexus-xml.vespa.version}
org.codehaus.woodstox:stax2-api:${stax2-api.vespa.version}
-org.eclipse.aether:aether-api:1.0.0.v20140518
-org.eclipse.aether:aether-util:1.0.0.v20140518
org.eclipse.sisu:org.eclipse.sisu.inject:${eclipse-sisu.vespa.version}
org.eclipse.sisu:org.eclipse.sisu.plexus:${eclipse-sisu.vespa.version}
org.hamcrest:hamcrest-core:${hamcrest.vespa.version}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/MetricsJsonResponse.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/MetricsJsonResponse.java
new file mode 100644
index 00000000000..b927db790b2
--- /dev/null
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/MetricsJsonResponse.java
@@ -0,0 +1,31 @@
+package ai.vespa.metricsproxy.http;
+
+import com.yahoo.container.jdisc.HttpResponse;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.function.Consumer;
+
+/**
+ * @author jonmv
+ */
+public class MetricsJsonResponse extends HttpResponse {
+
+ private final Consumer<OutputStream> modelWriter;
+
+ public MetricsJsonResponse(int status, Consumer<OutputStream> modelWriter) {
+ super(status);
+ this.modelWriter = modelWriter;
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ modelWriter.accept(outputStream);
+ }
+
+ @Override
+ public long maxPendingBytes() {
+ return 1 << 20;
+ }
+
+}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/PrometheusResponse.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/PrometheusResponse.java
new file mode 100644
index 00000000000..e0c74671c9c
--- /dev/null
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/PrometheusResponse.java
@@ -0,0 +1,35 @@
+package ai.vespa.metricsproxy.http;
+
+import ai.vespa.metricsproxy.metric.model.prometheus.PrometheusModel;
+import com.yahoo.container.jdisc.HttpResponse;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+
+/**
+ * @author jonmv
+ */
+public class PrometheusResponse extends HttpResponse {
+
+ private final PrometheusModel model;
+
+ public PrometheusResponse(int status, PrometheusModel model) {
+ super(status);
+ this.model = model;
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ Writer writer = new OutputStreamWriter(outputStream);
+ model.serialize(writer);
+ writer.flush();
+ }
+
+ @Override
+ public long maxPendingBytes() {
+ return 1 << 20;
+ }
+
+}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandler.java
index 58b51020bb9..ace0d0abc65 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandler.java
@@ -3,7 +3,8 @@
package ai.vespa.metricsproxy.http.application;
import ai.vespa.metricsproxy.core.MetricsConsumers;
-import ai.vespa.metricsproxy.http.TextResponse;
+import ai.vespa.metricsproxy.http.MetricsJsonResponse;
+import ai.vespa.metricsproxy.http.PrometheusResponse;
import ai.vespa.metricsproxy.metric.model.ConsumerId;
import ai.vespa.metricsproxy.metric.model.DimensionId;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
@@ -62,12 +63,12 @@ public class ApplicationMetricsHandler extends HttpHandlerBase {
return Optional.empty();
}
- private JsonResponse applicationMetricsResponse(String requestedConsumer) {
+ private HttpResponse applicationMetricsResponse(String requestedConsumer) {
try {
ConsumerId consumer = getConsumerOrDefault(requestedConsumer, metricsConsumers);
var metricsByNode = metricsRetriever.getMetrics(consumer);
- return new JsonResponse(OK, toGenericApplicationModel(metricsByNode).serialize());
+ return new MetricsJsonResponse(OK, toGenericApplicationModel(metricsByNode)::serialize);
} catch (Exception e) {
log.log(Level.WARNING, "Got exception when retrieving metrics:", e);
@@ -75,7 +76,7 @@ public class ApplicationMetricsHandler extends HttpHandlerBase {
}
}
- private TextResponse applicationPrometheusResponse(String requestedConsumer) {
+ private HttpResponse applicationPrometheusResponse(String requestedConsumer) {
ConsumerId consumer = getConsumerOrDefault(requestedConsumer, metricsConsumers);
var metricsByNode = metricsRetriever.getMetrics(consumer);
@@ -87,7 +88,7 @@ public class ApplicationMetricsHandler extends HttpHandlerBase {
.map(builder -> builder.putDimension(DimensionId.toDimensionId("hostname"), element.hostname))
.map(MetricsPacket.Builder::build))
.toList();
- return new TextResponse(200, toPrometheusModel(metricsForAllNodes).serialize());
+ return new PrometheusResponse(200, toPrometheusModel(metricsForAllNodes));
}
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV1Handler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV1Handler.java
index 3e4565c780b..50c1420edef 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV1Handler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV1Handler.java
@@ -3,6 +3,7 @@ package ai.vespa.metricsproxy.http.metrics;
import ai.vespa.metricsproxy.core.MetricsConsumers;
import ai.vespa.metricsproxy.core.MetricsManager;
+import ai.vespa.metricsproxy.http.MetricsJsonResponse;
import ai.vespa.metricsproxy.http.ValuesFetcher;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
import ai.vespa.metricsproxy.service.VespaServices;
@@ -51,10 +52,10 @@ public class MetricsV1Handler extends HttpHandlerBase {
return Optional.empty();
}
- private JsonResponse valuesResponse(String consumer) {
+ private HttpResponse valuesResponse(String consumer) {
try {
List<MetricsPacket> metrics = valuesFetcher.fetch(consumer);
- return new JsonResponse(OK, toGenericJsonModel(metrics).serialize());
+ return new MetricsJsonResponse(OK, toGenericJsonModel(metrics)::serialize);
} catch (Exception e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage());
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV2Handler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV2Handler.java
index 1f6cc17b2e1..7e9bba466df 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV2Handler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/metrics/MetricsV2Handler.java
@@ -3,6 +3,7 @@ package ai.vespa.metricsproxy.http.metrics;
import ai.vespa.metricsproxy.core.MetricsConsumers;
import ai.vespa.metricsproxy.core.MetricsManager;
+import ai.vespa.metricsproxy.http.MetricsJsonResponse;
import ai.vespa.metricsproxy.http.ValuesFetcher;
import ai.vespa.metricsproxy.http.application.ClusterIdDimensionProcessor;
import ai.vespa.metricsproxy.http.application.Node;
@@ -62,7 +63,7 @@ public class MetricsV2Handler extends HttpHandlerBase {
return Optional.empty();
}
- private JsonResponse valuesResponse(String consumer) {
+ private HttpResponse valuesResponse(String consumer) {
try {
List<MetricsPacket> metrics = processAndBuild(valuesFetcher.fetchMetricsAsBuilders(consumer),
new ServiceIdDimensionProcessor(),
@@ -71,7 +72,7 @@ public class MetricsV2Handler extends HttpHandlerBase {
Node localNode = new Node(nodeInfoConfig.role(), nodeInfoConfig.hostname(), 0, "");
Map<Node, List<MetricsPacket>> metricsByNode = Map.of(localNode, metrics);
- return new JsonResponse(OK, toGenericApplicationModel(metricsByNode).serialize());
+ return new MetricsJsonResponse(OK, toGenericApplicationModel(metricsByNode)::serialize);
} catch (Exception e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage());
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandler.java
index d73561b5eff..e609b54b916 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/prometheus/PrometheusHandler.java
@@ -3,6 +3,7 @@ package ai.vespa.metricsproxy.http.prometheus;
import ai.vespa.metricsproxy.core.MetricsConsumers;
import ai.vespa.metricsproxy.core.MetricsManager;
+import ai.vespa.metricsproxy.http.PrometheusResponse;
import ai.vespa.metricsproxy.http.TextResponse;
import ai.vespa.metricsproxy.http.ValuesFetcher;
import ai.vespa.metricsproxy.metric.dimensions.ApplicationDimensions;
@@ -56,11 +57,11 @@ public class PrometheusHandler extends HttpHandlerBase {
return Optional.empty();
}
- private TextResponse valuesResponse(String consumer) {
+ private HttpResponse valuesResponse(String consumer) {
try {
List<MetricsPacket> metrics = new ArrayList<>(valuesFetcher.fetch(consumer));
metrics.addAll(nodeMetricGatherer.gatherMetrics());
- return new TextResponse(OK, toPrometheusModel(metrics).serialize());
+ return new PrometheusResponse(OK, toPrometheusModel(metrics));
} catch (Exception e) {
log.log(Level.WARNING, "Got exception when rendering metrics:", e);
return new TextResponse(INTERNAL_SERVER_ERROR, e.getMessage());
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericApplicationModel.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericApplicationModel.java
index 59d1f9e5c83..105724d17f8 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericApplicationModel.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericApplicationModel.java
@@ -5,10 +5,13 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.OutputStream;
import java.util.List;
import static com.fasterxml.jackson.annotation.JsonInclude.Include.NON_ABSENT;
+import static java.nio.charset.StandardCharsets.UTF_8;
/**
* @author gjoranv
@@ -21,8 +24,14 @@ public class GenericApplicationModel {
public List<GenericJsonModel> nodes;
public String serialize() {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ serialize(out);
+ return out.toString(UTF_8);
+ }
+
+ public void serialize(OutputStream out) {
try {
- return JacksonUtil.objectMapper().writeValueAsString(this);
+ JacksonUtil.objectMapper().writeValue(out, this);
} catch (IOException e) {
throw new JsonRenderingException("Could not render application nodes. Check the log for details.", e);
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericJsonModel.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericJsonModel.java
index c4ba0f39a18..54616dff759 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericJsonModel.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/GenericJsonModel.java
@@ -6,10 +6,13 @@ import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.OutputStream;
import java.util.List;
import static com.fasterxml.jackson.annotation.JsonInclude.Include.NON_ABSENT;
+import static java.nio.charset.StandardCharsets.UTF_8;
/**
* @author gjoranv
@@ -32,8 +35,14 @@ public class GenericJsonModel {
public List<GenericService> services;
public String serialize() {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ serialize(out);
+ return out.toString(UTF_8);
+ }
+
+ public void serialize(OutputStream out) {
try {
- return JacksonUtil.objectMapper().writeValueAsString(this);
+ JacksonUtil.objectMapper().writeValue(out, this);
} catch (IOException e) {
throw new JsonRenderingException("Could not render metrics. Check the log for details.", e);
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusModel.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusModel.java
index fd00ad67ec0..0f3878821f3 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusModel.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusModel.java
@@ -11,6 +11,7 @@ import io.prometheus.client.Collector.MetricFamilySamples.Sample;
import io.prometheus.client.exporter.common.TextFormat;
import java.io.StringWriter;
+import java.io.Writer;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
@@ -50,12 +51,16 @@ public class PrometheusModel implements Enumeration<MetricFamilySamples> {
public String serialize() {
var writer = new StringWriter();
+ serialize(writer);
+ return writer.toString();
+ }
+
+ public void serialize(Writer writer) {
try {
TextFormat.write004(writer, this);
} catch (Exception e) {
throw new PrometheusRenderingException("Could not render metrics. Check the log for details.", e);
}
- return writer.toString();
}
private MetricFamilySamples createMetricFamily(MetricId metricId) {
@@ -70,6 +75,7 @@ public class PrometheusModel implements Enumeration<MetricFamilySamples> {
}));
return new MetricFamilySamples(metricId.getIdForPrometheus(), Collector.Type.UNKNOWN, "", sampleList);
}
+
private static Sample createSample(ServiceId serviceId, MetricId metricId, Number metric,
Long timeStamp, Map<DimensionId, String> dimensions)
{
diff --git a/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java
index da66d453125..801740c9cf5 100644
--- a/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java
@@ -13,7 +13,7 @@ public enum ClusterControllerMetrics implements VespaMetrics {
STOPPING_COUNT("cluster-controller.stopping.count", Unit.NODE, "Number of content nodes currently stopping"),
UP_COUNT("cluster-controller.up.count", Unit.NODE, "Number of content nodes up"),
CLUSTER_STATE_CHANGE_COUNT("cluster-controller.cluster-state-change.count", Unit.NODE, "Number of nodes changing state"),
- CLUSTER_BUCKETS_OUT_OF_SYNC_RATIO("cluster-buckets-out-of-sync-ratio", Unit.FRACTION, "Ratio of buckets in the cluster currently in need of syncing"),
+ CLUSTER_BUCKETS_OUT_OF_SYNC_RATIO("cluster-controller.cluster-buckets-out-of-sync-ratio", Unit.FRACTION, "Ratio of buckets in the cluster currently in need of syncing"),
BUSY_TICK_TIME_MS("cluster-controller.busy-tick-time-ms", Unit.MILLISECOND, "Time busy"),
IDLE_TICK_TIME_MS("cluster-controller.idle-tick-time-ms", Unit.MILLISECOND, "Time idle"),
WORK_MS("cluster-controller.work-ms", Unit.MILLISECOND, "Time used for actual work"),
diff --git a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
index 4d91cc1d989..8718dc0076a 100644
--- a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
@@ -39,8 +39,8 @@ public enum StorageMetrics implements VespaMetrics {
VDS_FILESTOR_ALLTHREADS_MERGEBUCKETS_FAILED("vds.filestor.allthreads.mergebuckets.failed", Unit.REQUEST, "Number of failed requests."),
VDS_FILESTOR_ALLTHREADS_MERGEBUCKETS_LATENCY("vds.filestor.allthreads.mergebuckets.latency", Unit.MILLISECOND, "Latency of successful requests."),
VDS_FILESTOR_ALLTHREADS_MERGELATENCYTOTAL("vds.filestor.allthreads.mergelatencytotal", Unit.MILLISECOND, "Latency of total merge operation, from master node receives it, until merge is complete and master node replies."),
- VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY("vds.filestor.allthreads.put_latency", Unit.MILLISECOND, "Latency of individual puts that are part of merge operations"), // TODO Vespa 9: Update metric name to include 'merge'
- VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY("vds.filestor.allthreads.remove_latency", Unit.MILLISECOND, "Latency of individual removes that are part of merge operations"), // TODO Vespa 9: Update metric name to include 'merge'
+ VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY("vds.filestor.allthreads.merge_put_latency", Unit.MILLISECOND, "Latency of individual puts that are part of merge operations"),
+ VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY("vds.filestor.allthreads.merge_remove_latency", Unit.MILLISECOND, "Latency of individual removes that are part of merge operations"),
VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES("vds.filestor.allstripes.throttled_rpc_direct_dispatches", Unit.INSTANCE, "Number of times an RPC thread could not directly dispatch an async operation directly to Proton because it was disallowed by the throttle policy"),
VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS("vds.filestor.allstripes.throttled_persistence_thread_polls", Unit.INSTANCE, "Number of times a persistence thread could not immediately dispatch a queued async operation because it was disallowed by the throttle policy"),
VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN("vds.filestor.allstripes.timeouts_waiting_for_throttle_token", Unit.INSTANCE, "Number of times a persistence thread timed out waiting for an available throttle policy token"),
diff --git a/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java b/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
index fd02756e2ea..bbb82db7139 100644
--- a/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
+++ b/model-integration/src/main/java/ai/vespa/llm/clients/LocalLLM.java
@@ -114,6 +114,8 @@ public class LocalLLM extends AbstractComponent implements LanguageModel {
options.ifPresent("repeatpenalty", (v) -> inferParams.setRepeatPenalty(Float.parseFloat(v)));
// Todo: more options?
+ inferParams.setUseChatTemplate(true);
+
var completionFuture = new CompletableFuture<Completion.FinishReason>();
var hasStarted = new AtomicBoolean(false);
try {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index 79e709e7bae..27216695584 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -48,8 +48,11 @@ public class AllocatableResources {
}
public AllocatableResources(NodeList nodes, NodeRepository nodeRepository) {
+ if (nodes.isEmpty()) {
+ throw new IllegalArgumentException("Expected a non-empty node list");
+ }
this.nodes = nodes.size();
- this.groups = (int)nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
+ this.groups = (int) nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
this.realResources = averageRealResourcesOf(nodes.asList(), nodeRepository); // Average since we average metrics over nodes
this.advertisedResources = nodes.requestedResources();
this.clusterSpec = nodes.clusterSpec();
@@ -82,19 +85,6 @@ public class AllocatableResources {
this.fulfilment = fulfilment;
}
- /** Returns this with the redundant node or group removed from counts. */
- public AllocatableResources withoutRedundancy() {
- int groupSize = nodes / groups;
- int nodesAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? nodes - 1 : nodes - groupSize) : nodes;
- int groupsAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? 1 : groups - 1) : groups;
- return new AllocatableResources(nodesAdjustedForRedundancy,
- groupsAdjustedForRedundancy,
- realResources,
- advertisedResources,
- clusterSpec,
- fulfilment);
- }
-
/**
* Returns the resources which will actually be available per node in this cluster with this allocation.
* These should be used for reasoning about allocation to meet measured demand.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 5c9c5fe30d7..471b9d0122f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -91,6 +91,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
Cluster unchangedCluster = cluster;
NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId);
+ if (clusterNodes.isEmpty()) return true; // Cluster was removed since we started
cluster = updateCompletion(cluster, clusterNodes);
var current = new AllocatableResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();
diff --git a/parent/pom.xml b/parent/pom.xml
index eec97060a88..2f7eabab3e4 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -915,7 +915,7 @@
<dependency>
<groupId>org.apache.maven.shared</groupId>
<artifactId>maven-dependency-tree</artifactId>
- <version>3.2.1</version>
+ <version>${maven-dependency-tree.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven.shared</groupId>
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_params.h b/searchcore/src/vespa/searchcore/proton/matching/match_params.h
index 5b58c11b7e1..f9dd55e7bb1 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_params.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_params.h
@@ -27,7 +27,7 @@ struct MatchParams {
uint32_t hits_in,
bool hasFinalRank,
bool needRanking=true);
- bool save_rank_scores() const { return ((heapSize + arraySize) != 0); }
+ bool save_rank_scores() const noexcept { return (arraySize != 0); }
bool has_rank_drop_limit() const;
};
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
index 211e67f1e2b..6b443231c0a 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_thread.cpp
@@ -367,7 +367,7 @@ MatchThread::findMatches(MatchTools &tools)
tools.give_back_search(ProfiledIterator::profile(*match_profiler, tools.borrow_search()));
tools.tag_search_as_changed();
}
- HitCollector hits(matchParams.numDocs, matchParams.arraySize);
+ HitCollector hits(matchParams.numDocs, match_with_ranking ? matchParams.arraySize : 0);
trace->addEvent(4, "Start match and first phase rank");
/**
* All, or none of the threads in the bundle must execute the match loop.
@@ -380,7 +380,7 @@ MatchThread::findMatches(MatchTools &tools)
secondPhase(tools, hits);
}
trace->addEvent(4, "Create result set");
- return hits.getResultSet(fallback_rank_value());
+ return hits.getResultSet();
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
index ff64ece4494..66648dda8c6 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
@@ -275,11 +275,13 @@ MatchToolsFactory::createTask(vespalib::stringref attribute, vespalib::stringref
? std::make_unique<AttributeOperationTask>(_requestContext, attribute, operation)
: std::unique_ptr<AttributeOperationTask>();
}
+
std::unique_ptr<AttributeOperationTask>
MatchToolsFactory::createOnMatchTask() const {
const auto & op = _rankSetup.getMutateOnMatch();
return createTask(op._attribute, op._operation);
}
+
std::unique_ptr<AttributeOperationTask>
MatchToolsFactory::createOnFirstPhaseTask() const {
const auto & op = _rankSetup.getMutateOnFirstPhase();
@@ -292,6 +294,7 @@ MatchToolsFactory::createOnFirstPhaseTask() const {
return createTask(op._attribute, op._operation);
}
}
+
std::unique_ptr<AttributeOperationTask>
MatchToolsFactory::createOnSecondPhaseTask() const {
const auto & op = _rankSetup.getMutateOnSecondPhase();
@@ -302,6 +305,7 @@ MatchToolsFactory::createOnSecondPhaseTask() const {
return createTask(op._attribute, op._operation);
}
}
+
std::unique_ptr<AttributeOperationTask>
MatchToolsFactory::createOnSummaryTask() const {
const auto & op = _rankSetup.getMutateOnSummary();
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index 570bffa59c2..43e17417c51 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -60,6 +60,7 @@ vespa_define_module(
src/apps/vespa-attribute-inspect
src/apps/vespa-fileheader-inspect
src/apps/vespa-index-inspect
+ src/apps/vespa-query-analyzer
src/apps/vespa-ranking-expression-analyzer
TESTS
diff --git a/searchlib/src/apps/vespa-query-analyzer/.gitignore b/searchlib/src/apps/vespa-query-analyzer/.gitignore
new file mode 100644
index 00000000000..e5a31caab09
--- /dev/null
+++ b/searchlib/src/apps/vespa-query-analyzer/.gitignore
@@ -0,0 +1,3 @@
+/.depend
+/Makefile
+/vespa-query-analyzer
diff --git a/searchlib/src/apps/vespa-query-analyzer/CMakeLists.txt b/searchlib/src/apps/vespa-query-analyzer/CMakeLists.txt
new file mode 100644
index 00000000000..f84a413ee70
--- /dev/null
+++ b/searchlib/src/apps/vespa-query-analyzer/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchlib_vespa-query-analyzer_app
+ SOURCES
+ vespa-query-analyzer.cpp
+ OUTPUT_NAME vespa-query-analyzer
+ INSTALL bin
+ DEPENDS
+ searchlib
+)
diff --git a/searchlib/src/apps/vespa-query-analyzer/vespa-query-analyzer.cpp b/searchlib/src/apps/vespa-query-analyzer/vespa-query-analyzer.cpp
new file mode 100644
index 00000000000..178c09c02ac
--- /dev/null
+++ b/searchlib/src/apps/vespa-query-analyzer/vespa-query-analyzer.cpp
@@ -0,0 +1,361 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/data/simple_buffer.h>
+#include <vespa/vespalib/data/slime/json_format.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/io/mapped_file_input.h>
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/signalhandler.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/searchlib/queryeval/flow.h>
+#include <variant>
+#include <vector>
+#include <map>
+
+using namespace vespalib::slime::convenience;
+using vespalib::make_string_short::fmt;
+using vespalib::slime::JsonFormat;
+using vespalib::slime::ARRAY;
+using vespalib::slime::OBJECT;
+using vespalib::slime::STRING;
+using vespalib::slime::DOUBLE;
+using vespalib::slime::BOOL;
+using search::queryeval::FlowStats;
+using search::queryeval::InFlow;
+
+//-----------------------------------------------------------------------------
+
+using Path = std::vector<std::variant<size_t,vespalib::stringref>>;
+using Paths = std::vector<Path>;
+
+template <typename F>
+struct Matcher : vespalib::slime::ObjectTraverser {
+ Path path;
+ Paths result;
+ F match;
+ ~Matcher();
+ Matcher(F match_in) noexcept : path(), result(), match(match_in) {}
+ void search(const Inspector &node) {
+ if (path.empty() && match(path, node)) {
+ result.push_back(path);
+ }
+ if (node.type() == OBJECT()) {
+ node.traverse(*this);
+ }
+ if (node.type() == ARRAY()) {
+ size_t size = node.entries();
+ for (size_t i = 0; i < size; ++i) {
+ path.emplace_back(i);
+ if (match(path, node[i])) {
+ result.push_back(path);
+ }
+ search(node[i]);
+ path.pop_back();
+ }
+ }
+ }
+ void field(const Memory &symbol, const Inspector &inspector) final {
+ path.emplace_back(symbol.make_stringref());
+ if (match(path, inspector)) {
+ result.push_back(path);
+ }
+ search(inspector);
+ path.pop_back();
+ }
+};
+template <typename F> Matcher<F>::~Matcher() = default;
+
+std::vector<Path> find_field(const Inspector &root, const vespalib::string &name) {
+ auto matcher = Matcher([&](const Path &path, const Inspector &){
+ return ((path.size() > 0) &&
+ (std::holds_alternative<vespalib::stringref>(path.back())) &&
+ (std::get<vespalib::stringref>(path.back()) == name));
+ });
+ matcher.search(root);
+ return matcher.result;
+}
+
+std::vector<Path> find_tag(const Inspector &root, const vespalib::string &name) {
+ auto matcher = Matcher([&](const Path &path, const Inspector &value){
+ return ((path.size() > 0) &&
+ (std::holds_alternative<vespalib::stringref>(path.back())) &&
+ (std::get<vespalib::stringref>(path.back()) == "tag") &&
+ (value.asString().make_stringref() == name));
+ });
+ matcher.search(root);
+ return matcher.result;
+}
+
+vespalib::string path_to_str(const Path &path) {
+ size_t cnt = 0;
+ vespalib::string str("[");
+ for (const auto &item: path) {
+ if (cnt++ > 0) {
+ str.append(",");
+ }
+ std::visit(vespalib::overload{
+ [&str](size_t value)noexcept{ str.append(fmt("%zu", value)); },
+ [&str](vespalib::stringref value)noexcept{ str.append(value); }}, item);
+ }
+ str.append("]");
+ return str;
+}
+
+vespalib::string strip_name(vespalib::stringref name) {
+ auto end = name.find("<");
+ auto ns = name.rfind("::", end);
+ size_t begin = (ns > name.size()) ? 0 : ns + 2;
+ return name.substr(begin, end - begin);
+}
+
+const Inspector &apply_path(const Inspector &node, const Path &path, size_t max = -1) {
+ size_t cnt = 0;
+ const Inspector *ptr = &node;
+ for (const auto &elem: path) {
+ if (cnt++ >= max) {
+ return *ptr;
+ }
+ if (std::holds_alternative<size_t>(elem)) {
+ ptr = &((*ptr)[std::get<size_t>(elem)]);
+ }
+ if (std::holds_alternative<vespalib::stringref>(elem)) {
+ auto ref = std::get<vespalib::stringref>(elem);
+ ptr = &((*ptr)[Memory(ref.data(), ref.size())]);
+ }
+ }
+ return *ptr;
+}
+
+void extract(vespalib::string &value, const Inspector &data) {
+ if (data.valid() && data.type() == STRING()) {
+ value = data.asString().make_stringref();
+ }
+}
+
+struct Sample {
+ enum class Type { INVALID, INIT, SEEK, UNPACK, TERMWISE };
+ Type type = Type::INVALID;
+ std::vector<size_t> path;
+ double self_time_ms = 0.0;
+ double total_time_ms = 0.0;
+ size_t count = 0;
+ Sample(const Inspector &sample) {
+ auto name = sample["name"].asString().make_stringref();
+ if (ends_with(name, "/init")) {
+ type = Type::INIT;
+ }
+ if (ends_with(name, "/seek")) {
+ type = Type::SEEK;
+ }
+ if (ends_with(name, "/unpack")) {
+ type = Type::UNPACK;
+ }
+ if (ends_with(name, "/termwise")) {
+ type = Type::TERMWISE;
+ }
+ if (starts_with(name, "/")) {
+ size_t child = 0;
+ for (size_t pos = 1; pos < name.size(); ++pos) {
+ char c = name[pos];
+ if (c == '/') {
+ path.push_back(child);
+ child = 0;
+ } else {
+ if (c < '0' || c > '9') {
+ break;
+ }
+ child = child * 10 + (c - '0');
+ }
+ }
+ }
+ self_time_ms = sample["self_time_ms"].asDouble();
+ total_time_ms = sample["total_time_ms"].asDouble();
+ count = sample["count"].asLong();
+ }
+ static vespalib::string type_to_str(Type type) {
+ switch(type) {
+ case Type::INVALID: return "<invalid>";
+ case Type::INIT: return "init";
+ case Type::SEEK: return "seek";
+ case Type::UNPACK: return "unpack";
+ case Type::TERMWISE: return "termwise";
+ }
+ abort();
+ }
+ static vespalib::string path_to_str(const std::vector<size_t> &path) {
+ vespalib::string result("/");
+ for (size_t elem: path) {
+ result += fmt("%zu/", elem);
+ }
+ return result;
+ }
+ vespalib::string to_string() const {
+ return fmt("type: %s, path: %s, count: %zu, total_time_ms: %g\n",
+ type_to_str(type).c_str(), path_to_str(path).c_str(), count, total_time_ms);
+ }
+};
+
+struct Node {
+ vespalib::string type = "unknown";
+ bool strict = false;
+ FlowStats flow_stats = FlowStats(0.0, 0.0, 0.0);
+ InFlow in_flow = InFlow(0.0);
+ size_t count = 0;
+ double self_time_ms = 0.0;
+ double total_time_ms = 0.0;
+ std::vector<Node> children;
+ Node(const Inspector &obj) {
+ extract(type, obj["[type]"]);
+ type = strip_name(type);
+ strict = obj["strict"].asBool();
+ flow_stats.estimate = obj["relative_estimate"].asDouble();
+ flow_stats.cost = obj["cost"].asDouble();
+ flow_stats.strict_cost = obj["strict_cost"].asDouble();
+ const Inspector &list = obj["children"];
+ for (size_t i = 0; true; ++i) {
+ const Inspector &child = list[fmt("[%zu]", i)];
+ if (child.valid()) {
+ children.emplace_back(child);
+ } else {
+ break;
+ }
+ }
+ }
+ ~Node();
+ void add_sample(const Sample &sample) {
+ Node *node = this;
+ for (size_t child: sample.path) {
+ if (child < node->children.size()) {
+ node = &node->children[child];
+ } else {
+ fprintf(stderr, "... ignoring bad sample: %s\n", sample.to_string().c_str());
+ return;
+ }
+ }
+ node->count += sample.count;
+ node->self_time_ms += sample.self_time_ms;
+ node->total_time_ms += sample.total_time_ms;
+ }
+ void dump_line(size_t indent) const {
+ fprintf(stderr, "|%10zu ", count);
+ fprintf(stderr, "|%11.3f ", total_time_ms);
+ fprintf(stderr, "|%10.3f | ", self_time_ms);
+ for (size_t i = 0; i < indent; ++i) {
+ fprintf(stderr, " ");
+ }
+ fprintf(stderr, "%s\n", type.c_str());
+ for (const Node &child: children) {
+ child.dump_line(indent + 1);
+ }
+ }
+ void dump() const {
+ fprintf(stderr, "| count | total_time | self_time | structure\n");
+ fprintf(stderr, "+-----------+------------+-----------+-------------------------------\n");
+ dump_line(0);
+ fprintf(stderr, "+-----------+------------+-----------+-------------------------------\n");
+ }
+};
+Node::~Node() = default;
+
+void each_sample_list(const Inspector &list, auto f) {
+ for (size_t i = 0; i < list.entries(); ++i) {
+ f(Sample(list[i]));
+ each_sample_list(list[i]["children"], f);
+ }
+}
+
+void each_sample(const Inspector &prof, auto f) {
+ each_sample_list(prof["roots"], f);
+}
+
+struct State {
+ void analyze(const Inspector &root) {
+ auto bp_list = find_field(root, "optimized");
+ for (const Path &path: bp_list) {
+ const Inspector &node = apply_path(root, path, path.size()-3);
+ const Inspector &key_field = node["distribution-key"];
+ if (key_field.valid()) {
+ int key = key_field.asLong();
+ Node data(apply_path(root, path));
+ auto prof_list = find_tag(node, "match_profiling");
+ double total_ms = 0.0;
+ std::map<Sample::Type,double> time_map;
+ for (const Path &prof_path: prof_list) {
+ const Inspector &prof = apply_path(node, prof_path, prof_path.size()-1);
+ if (prof["profiler"].asString().make_stringref() == "tree") {
+ total_ms += prof["total_time_ms"].asDouble();
+ each_sample(prof, [&](const Sample &sample) {
+ if (sample.type == Sample::Type::SEEK) {
+ data.add_sample(sample);
+ }
+ if (sample.path.empty()) {
+ time_map[sample.type] += sample.total_time_ms;
+ }
+ });
+ }
+ }
+ data.dump();
+ fprintf(stderr, "distribution key: %d, total_time_ms: %g\n", key, total_ms);
+ for (auto [type, time]: time_map) {
+ fprintf(stderr, "sample type %s used %g ms total\n", Sample::type_to_str(type).c_str(), time);
+ }
+ }
+ }
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+void usage(const char *self) {
+ fprintf(stderr, "usage: %s <json query result file>\n", self);
+ fprintf(stderr, " analyze query cost (planning vs profiling)\n");
+ fprintf(stderr, " query result must contain optimized blueprint dump\n");
+ fprintf(stderr, " query result must contain match phase tree profiling\n\n");
+}
+
+struct MyApp {
+ vespalib::string file_name;
+ bool parse_params(int argc, char **argv);
+ int main();
+};
+
+bool
+MyApp::parse_params(int argc, char **argv) {
+ if (argc != 2) {
+ return false;
+ }
+ file_name = argv[1];
+ return true;
+}
+
+int
+MyApp::main()
+{
+ vespalib::MappedFileInput file(file_name);
+ if (!file.valid()) {
+ fprintf(stderr, "could not read input file: '%s'\n",
+ file_name.c_str());
+ return 1;
+ }
+ Slime slime;
+ if(JsonFormat::decode(file, slime) == 0) {
+ fprintf(stderr, "file contains invalid json: '%s'\n",
+ file_name.c_str());
+ return 1;
+ }
+ State state;
+ state.analyze(slime.get());
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ MyApp my_app;
+ vespalib::SignalHandler::PIPE.ignore();
+ if (!my_app.parse_params(argc, argv)) {
+ usage(argv[0]);
+ return 1;
+ }
+ return my_app.main();
+}
+
+//-----------------------------------------------------------------------------
diff --git a/searchlib/src/tests/hitcollector/CMakeLists.txt b/searchlib/src/tests/hitcollector/CMakeLists.txt
index 5cedbcbd7e6..cc62dd82af4 100644
--- a/searchlib/src/tests/hitcollector/CMakeLists.txt
+++ b/searchlib/src/tests/hitcollector/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_executable(searchlib_hitcollector_test_app TEST
hitcollector_test.cpp
DEPENDS
searchlib
+ GTest::gtest
)
vespa_add_test(NAME searchlib_hitcollector_test_app COMMAND searchlib_hitcollector_test_app)
vespa_add_executable(searchlib_sorted_hit_sequence_test_app TEST
@@ -11,5 +12,6 @@ vespa_add_executable(searchlib_sorted_hit_sequence_test_app TEST
sorted_hit_sequence_test.cpp
DEPENDS
searchlib
+ GTest::gtest
)
vespa_add_test(NAME searchlib_sorted_hit_sequence_test_app COMMAND searchlib_sorted_hit_sequence_test_app)
diff --git a/searchlib/src/tests/hitcollector/hitcollector_test.cpp b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
index e6e38181412..60daa571f1d 100644
--- a/searchlib/src/tests/hitcollector/hitcollector_test.cpp
+++ b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
@@ -1,9 +1,9 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/searchlib/fef/fef.h>
#include <vespa/searchlib/queryeval/hitcollector.h>
+#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/log/log.h>
LOG_SETUP("hitcollector_test");
@@ -13,6 +13,8 @@ using namespace search::fef;
using namespace search::queryeval;
using ScoreMap = std::map<uint32_t, feature_t>;
+using DocidVector = std::vector<uint32_t>;
+using RankedHitVector = std::vector<RankedHit>;
using Ranges = std::pair<Scores, Scores>;
@@ -67,11 +69,11 @@ void checkResult(const ResultSet & rs, const std::vector<RankedHit> & exp)
if ( ! exp.empty()) {
const RankedHit * rh = rs.getArray();
ASSERT_TRUE(rh != nullptr);
- ASSERT_EQUAL(rs.getArrayUsed(), exp.size());
+ ASSERT_EQ(rs.getArrayUsed(), exp.size());
for (uint32_t i = 0; i < exp.size(); ++i) {
- EXPECT_EQUAL(rh[i].getDocId(), exp[i].getDocId());
- EXPECT_EQUAL(rh[i].getRank() + 1.0, exp[i].getRank() + 1.0);
+ EXPECT_EQ(rh[i].getDocId(), exp[i].getDocId());
+ EXPECT_DOUBLE_EQ(rh[i].getRank() + 64.0, exp[i].getRank() + 64.0);
}
} else {
ASSERT_TRUE(rs.getArray() == nullptr);
@@ -93,21 +95,24 @@ void checkResult(ResultSet & rs, BitVector * exp)
}
}
-void testAddHit(uint32_t numDocs, uint32_t maxHitsSize)
+void testAddHit(uint32_t numDocs, uint32_t maxHitsSize, const vespalib::string& label)
{
+ SCOPED_TRACE(label);
LOG(info, "testAddHit: no hits");
- { // no hits
+ {
+ SCOPED_TRACE("no hits");
HitCollector hc(numDocs, maxHitsSize);
std::vector<RankedHit> expRh;
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
}
LOG(info, "testAddHit: only ranked hits");
- { // only ranked hits
+ {
+ SCOPED_TRACE("only ranked hits");
HitCollector hc(numDocs, maxHitsSize);
std::vector<RankedHit> expRh;
@@ -121,12 +126,13 @@ void testAddHit(uint32_t numDocs, uint32_t maxHitsSize)
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
}
LOG(info, "testAddHit: both ranked hits and bit vector hits");
- { // both ranked hits and bit vector hits
+ {
+ SCOPED_TRACE("both ranked hits and bitvector hits");
HitCollector hc(numDocs, maxHitsSize);
std::vector<RankedHit> expRh;
BitVector::UP expBv(BitVector::create(numDocs));
@@ -144,14 +150,15 @@ void testAddHit(uint32_t numDocs, uint32_t maxHitsSize)
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, expBv.get()));
+ checkResult(*rs, expRh);
+ checkResult(*rs, expBv.get());
}
}
-TEST("testAddHit") {
- TEST_DO(testAddHit(30, 10));
- TEST_DO(testAddHit(400, 10)); // 400/32 = 12 which is bigger than 10.
+TEST(HitCollectorTest, testAddHit)
+{
+ testAddHit(30, 10, "numDocs==30");
+ testAddHit(400, 10, "numDocs==400"); // 400/32 = 12 which is bigger than 10.
}
struct Fixture {
@@ -197,14 +204,17 @@ struct DescendingScoreFixture : Fixture {
DescendingScoreFixture::~DescendingScoreFixture() = default;
-TEST_F("testReRank - empty", Fixture) {
- EXPECT_EQUAL(0u, f.reRank());
+TEST(HitCollectorTest, rerank_empty)
+{
+ Fixture f;
+ EXPECT_EQ(0u, f.reRank());
}
-TEST_F("testReRank - ascending", AscendingScoreFixture)
+TEST(HitCollectorTest, rerank_ascending)
{
+ AscendingScoreFixture f;
f.addHits();
- EXPECT_EQUAL(5u, f.reRank());
+ EXPECT_EQ(5u, f.reRank());
std::vector<RankedHit> expRh;
for (uint32_t i = 10; i < 20; ++i) { // 10 last are the best
@@ -213,17 +223,18 @@ TEST_F("testReRank - ascending", AscendingScoreFixture)
expRh.back()._rankValue = i + 200; // after reranking
}
}
- EXPECT_EQUAL(expRh.size(), 10u);
+ EXPECT_EQ(expRh.size(), 10u);
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, f.expBv.get()));
+ checkResult(*rs, expRh);
+ checkResult(*rs, f.expBv.get());
}
-TEST_F("testReRank - descending", DescendingScoreFixture)
+TEST(HitCollectorTest, rerank_descending)
{
+ DescendingScoreFixture f;
f.addHits();
- EXPECT_EQUAL(5u, f.reRank());
+ EXPECT_EQ(5u, f.reRank());
std::vector<RankedHit> expRh;
for (uint32_t i = 0; i < 10; ++i) { // 10 first are the best
@@ -232,17 +243,18 @@ TEST_F("testReRank - descending", DescendingScoreFixture)
expRh.back()._rankValue = i + 200; // after reranking
}
}
- EXPECT_EQUAL(expRh.size(), 10u);
+ EXPECT_EQ(expRh.size(), 10u);
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, f.expBv.get()));
+ checkResult(*rs, expRh);
+ checkResult(*rs, f.expBv.get());
}
-TEST_F("testReRank - partial", AscendingScoreFixture)
+TEST(HitCollectorTest, rerank_partial)
{
+ AscendingScoreFixture f;
f.addHits();
- EXPECT_EQUAL(3u, f.reRank(3));
+ EXPECT_EQ(3u, f.reRank(3));
std::vector<RankedHit> expRh;
for (uint32_t i = 10; i < 20; ++i) { // 10 last are the best
@@ -251,36 +263,39 @@ TEST_F("testReRank - partial", AscendingScoreFixture)
expRh.back()._rankValue = i + 200; // after reranking
}
}
- EXPECT_EQUAL(expRh.size(), 10u);
+ EXPECT_EQ(expRh.size(), 10u);
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, f.expBv.get()));
+ checkResult(*rs, expRh);
+ checkResult(*rs, f.expBv.get());
}
-TEST_F("require that hits for 2nd phase candidates can be retrieved", DescendingScoreFixture)
+TEST(HitCollectorTest, require_that_hits_for_2nd_phase_candidates_can_be_retrieved)
{
+ DescendingScoreFixture f;
f.addHits();
std::vector<HitCollector::Hit> scores = extract(f.hc.getSortedHitSequence(5));
- ASSERT_EQUAL(5u, scores.size());
- EXPECT_EQUAL(100, scores[0].second);
- EXPECT_EQUAL(99, scores[1].second);
- EXPECT_EQUAL(98, scores[2].second);
- EXPECT_EQUAL(97, scores[3].second);
- EXPECT_EQUAL(96, scores[4].second);
+ ASSERT_EQ(5u, scores.size());
+ EXPECT_EQ(100, scores[0].second);
+ EXPECT_EQ(99, scores[1].second);
+ EXPECT_EQ(98, scores[2].second);
+ EXPECT_EQ(97, scores[3].second);
+ EXPECT_EQ(96, scores[4].second);
}
-TEST("require that score ranges can be read and set.") {
+TEST(HitCollectorTest, require_that_score_ranges_can_be_read_and_set)
+{
std::pair<Scores, Scores> ranges = std::make_pair(Scores(1.0, 2.0), Scores(3.0, 4.0));
HitCollector hc(20, 10);
hc.setRanges(ranges);
- EXPECT_EQUAL(ranges.first.low, hc.getRanges().first.low);
- EXPECT_EQUAL(ranges.first.high, hc.getRanges().first.high);
- EXPECT_EQUAL(ranges.second.low, hc.getRanges().second.low);
- EXPECT_EQUAL(ranges.second.high, hc.getRanges().second.high);
+ EXPECT_EQ(ranges.first.low, hc.getRanges().first.low);
+ EXPECT_EQ(ranges.first.high, hc.getRanges().first.high);
+ EXPECT_EQ(ranges.second.low, hc.getRanges().second.low);
+ EXPECT_EQ(ranges.second.high, hc.getRanges().second.high);
}
-TEST("testNoHitsToReRank") {
+TEST(HitCollectorTest, no_hits_to_rerank)
+{
uint32_t numDocs = 20;
uint32_t maxHitsSize = 10;
@@ -299,8 +314,8 @@ TEST("testNoHitsToReRank") {
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
}
}
@@ -317,14 +332,15 @@ void testScaling(const std::vector<feature_t> &initScores,
PredefinedScorer scorer(std::move(finalScores));
// perform second phase ranking
- EXPECT_EQUAL(2u, do_reRank(scorer, hc, 2));
+ EXPECT_EQ(2u, do_reRank(scorer, hc, 2));
// check results
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expected));
+ checkResult(*rs, expected);
}
-TEST("testScaling") {
+TEST(HitCollectorTest, scaling)
+{
std::vector<feature_t> initScores(5);
initScores[0] = 1000;
initScores[1] = 2000;
@@ -338,7 +354,8 @@ TEST("testScaling") {
exp[i]._docId = i;
}
- { // scale down and adjust down
+ {
+ SCOPED_TRACE("scale down and adjust down");
exp[0]._rankValue = 0; // scaled
exp[1]._rankValue = 100; // scaled
exp[2]._rankValue = 200; // scaled
@@ -350,9 +367,10 @@ TEST("testScaling") {
finalScores[3] = 300;
finalScores[4] = 400;
- TEST_DO(testScaling(initScores, std::move(finalScores), exp));
+ testScaling(initScores, std::move(finalScores), exp);
}
- { // scale down and adjust up
+ {
+ SCOPED_TRACE("scale down and adjust up");
exp[0]._rankValue = 200; // scaled
exp[1]._rankValue = 300; // scaled
exp[2]._rankValue = 400; // scaled
@@ -364,10 +382,10 @@ TEST("testScaling") {
finalScores[3] = 500;
finalScores[4] = 600;
- TEST_DO(testScaling(initScores, std::move(finalScores), exp));
+ testScaling(initScores, std::move(finalScores), exp);
}
- { // scale up and adjust down
-
+ {
+ SCOPED_TRACE("scale up and adjust down");
exp[0]._rankValue = -500; // scaled (-500)
exp[1]._rankValue = 750; // scaled
exp[2]._rankValue = 2000; // scaled
@@ -379,9 +397,10 @@ TEST("testScaling") {
finalScores[3] = 3250;
finalScores[4] = 4500;
- TEST_DO(testScaling(initScores, std::move(finalScores), exp));
+ testScaling(initScores, std::move(finalScores), exp);
}
- { // minimal scale (second phase range = 0 (4 - 4) -> 1)
+ {
+ SCOPED_TRACE("minimal scale (second phase range = 0 (4 - 4) -> 1)");
exp[0]._rankValue = 1; // scaled
exp[1]._rankValue = 2; // scaled
exp[2]._rankValue = 3; // scaled
@@ -393,9 +412,10 @@ TEST("testScaling") {
finalScores[3] = 4;
finalScores[4] = 4;
- TEST_DO(testScaling(initScores, std::move(finalScores), exp));
+ testScaling(initScores, std::move(finalScores), exp);
}
- { // minimal scale (first phase range = 0 (4000 - 4000) -> 1)
+ {
+ SCOPED_TRACE("minimal scale (first phase range = 0 (4000 - 4000) -> 1)");
std::vector<feature_t> is(initScores);
is[4] = 4000;
exp[0]._rankValue = -299600; // scaled
@@ -409,11 +429,12 @@ TEST("testScaling") {
finalScores[3] = 400;
finalScores[4] = 500;
- TEST_DO(testScaling(is, std::move(finalScores), exp));
+ testScaling(is, std::move(finalScores), exp);
}
}
-TEST("testOnlyBitVector") {
+TEST(HitCollectorTest, only_bitvector)
+{
uint32_t numDocs = 20;
LOG(info, "testOnlyBitVector: test it");
{
@@ -428,8 +449,8 @@ TEST("testOnlyBitVector") {
std::unique_ptr<ResultSet> rs = hc.getResultSet();
std::vector<RankedHit> expRh;
- TEST_DO(checkResult(*rs, expRh)); // no ranked hits
- TEST_DO(checkResult(*rs, expBv.get())); // only bit vector
+ checkResult(*rs, expRh); // no ranked hits
+ checkResult(*rs, expBv.get()); // only bit vector
}
}
@@ -443,9 +464,9 @@ struct MergeResultSetFixture {
{}
};
-TEST_F("require that result set is merged correctly with first phase ranking",
- MergeResultSetFixture)
+TEST(HitCollectorTest, require_that_result_set_is_merged_correctly_with_first_phase_ranking)
{
+ MergeResultSetFixture f;
std::vector<RankedHit> expRh;
for (uint32_t i = 0; i < f.numDocs; ++i) {
f.hc.addHit(i, i + 1000);
@@ -457,7 +478,7 @@ TEST_F("require that result set is merged correctly with first phase ranking",
expRh.back()._rankValue = (i < f.numDocs - f.maxHitsSize) ? default_rank_value : i + 1000;
}
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
+ checkResult(*rs, expRh);
}
void
@@ -474,9 +495,9 @@ addExpectedHitForMergeTest(const MergeResultSetFixture &f, std::vector<RankedHit
}
}
-TEST_F("require that result set is merged correctly with second phase ranking (document scorer)",
- MergeResultSetFixture)
+TEST(HitCollectorTest, require_that_result_set_is_merged_correctly_with_second_phase_ranking_using_document_scorer)
{
+ MergeResultSetFixture f;
// with second phase ranking that triggers rescoring / scaling
BasicScorer scorer(500); // second phase ranking setting score to docId + 500
std::vector<RankedHit> expRh;
@@ -484,12 +505,13 @@ TEST_F("require that result set is merged correctly with second phase ranking (d
f.hc.addHit(i, i + 1000);
addExpectedHitForMergeTest(f, expRh, i);
}
- EXPECT_EQUAL(f.maxHeapSize, do_reRank(scorer, f.hc, f.maxHeapSize));
+ EXPECT_EQ(f.maxHeapSize, do_reRank(scorer, f.hc, f.maxHeapSize));
std::unique_ptr<ResultSet> rs = f.hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
+ checkResult(*rs, expRh);
}
-TEST("require that hits can be added out of order") {
+TEST(HitCollectorTest, require_that_hits_can_be_added_out_of_order)
+{
HitCollector hc(1000, 100);
std::vector<RankedHit> expRh;
// produce expected result in normal order
@@ -503,11 +525,12 @@ TEST("require that hits can be added out of order") {
hc.addHit(i, i + 100);
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
}
-TEST("require that hits can be added out of order when passing array limit") {
+TEST(HitCollectorTest, require_that_hits_can_be_added_out_of_order_when_passing_array_limit)
+{
HitCollector hc(10000, 100);
std::vector<RankedHit> expRh;
// produce expected result in normal order
@@ -525,11 +548,12 @@ TEST("require that hits can be added out of order when passing array limit") {
hc.addHit(i, i + 100);
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
}
-TEST("require that hits can be added out of order only after passing array limit") {
+TEST(HitCollectorTest, require_that_hits_can_be_added_out_of_order_only_after_passing_array_limit)
+{
HitCollector hc(10000, 100);
std::vector<RankedHit> expRh;
// produce expected result in normal order
@@ -548,8 +572,87 @@ TEST("require that hits can be added out of order only after passing array limit
hc.addHit(i, i + 100);
}
std::unique_ptr<ResultSet> rs = hc.getResultSet();
- TEST_DO(checkResult(*rs, expRh));
- TEST_DO(checkResult(*rs, nullptr));
+ checkResult(*rs, expRh);
+ checkResult(*rs, nullptr);
+}
+
+struct RankDropFixture {
+ uint32_t _docid_limit;
+ HitCollector _hc;
+ std::vector<uint32_t> _dropped;
+ RankDropFixture(uint32_t docid_limit, uint32_t max_hits_size)
+ : _docid_limit(docid_limit),
+ _hc(docid_limit, max_hits_size)
+ {
+ }
+ void add(std::vector<RankedHit> hits) {
+ for (const auto& hit : hits) {
+ _hc.addHit(hit.getDocId(), hit.getRank());
+ }
+ }
+ void rerank(ScoreMap score_map, size_t count) {
+ PredefinedScorer scorer(score_map);
+ EXPECT_EQ(count, do_reRank(scorer, _hc, count));
+ }
+ std::unique_ptr<BitVector> make_bv(DocidVector docids) {
+ auto bv = BitVector::create(_docid_limit);
+ for (auto& docid : docids) {
+ bv->setBit(docid);
+ }
+ return bv;
+ }
+
+ void setup() {
+ // Initial 7 hits from first phase
+ add({{5, 1100},{10, 1200},{11, 1300},{12, 1400},{14, 500},{15, 900},{16,1000}});
+ // Rerank two best hits, calculate old and new ranges for reranked
+ // hits that will cause hits not reranked to later be rescored by
+ // dividing by 100.
+ rerank({{11,14},{12,13}}, 2);
+ }
+ void check_result(std::optional<double> rank_drop_limit, RankedHitVector exp_array,
+ std::unique_ptr<BitVector> exp_bv, DocidVector exp_dropped) {
+ auto rs = _hc.get_result_set(rank_drop_limit, &_dropped);
+ checkResult(*rs, exp_array);
+ checkResult(*rs, exp_bv.get());
+ EXPECT_EQ(exp_dropped, _dropped);
+ }
+};
+
+TEST(HitCollectorTest, require_that_second_phase_rank_drop_limit_is_enforced)
+{
+ // Track rank score for all 7 hits from first phase
+ RankDropFixture f(10000, 10);
+ f.setup();
+ f.check_result(9.0, {{5,11},{10,12},{11,14},{12,13},{16,10}},
+ {}, {14, 15});
+}
+
+TEST(HitCollectorTest, require_that_second_phase_rank_drop_limit_is_enforced_when_docid_vector_is_used)
+{
+ // Track rank score for 4 best hits from first phase, overflow to docid vector
+ RankDropFixture f(10000, 4);
+ f.setup();
+ f.check_result(13.0, {{11,14}},
+ {}, {5,10,12,14,15,16});
+}
+
+TEST(HitCollectorTest, require_that_bitvector_is_not_dropped_without_second_phase_rank_drop_limit)
+{
+ // Track rank score for 4 best hits from first phase, overflow to bitvector
+ RankDropFixture f(20, 4);
+ f.setup();
+ f.check_result(std::nullopt, {{5,11},{10,12},{11,14},{12,13}},
+ f.make_bv({5,10,11,12,14,15,16}), {});
+}
+
+TEST(HitCollectorTest, require_that_bitvector_is_dropped_with_second_phase_rank_drop_limit)
+{
+ // Track rank for 4 best hits from first phase, overflow to bitvector
+ RankDropFixture f(20, 4);
+ f.setup();
+ f.check_result(9.0, {{5,11},{10,12},{11,14},{12,13}},
+ {}, {14,15,16});
}
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/hitcollector/sorted_hit_sequence_test.cpp b/searchlib/src/tests/hitcollector/sorted_hit_sequence_test.cpp
index c1c3a550d9b..4eefa5b5dfa 100644
--- a/searchlib/src/tests/hitcollector/sorted_hit_sequence_test.cpp
+++ b/searchlib/src/tests/hitcollector/sorted_hit_sequence_test.cpp
@@ -1,7 +1,7 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/searchlib/queryeval/sorted_hit_sequence.h>
+#include <vespa/vespalib/gtest/gtest.h>
using search::queryeval::SortedHitSequence;
using Hits = std::vector<SortedHitSequence::Hit>;
@@ -10,20 +10,22 @@ using Refs = std::vector<SortedHitSequence::Ref>;
Hits hits({{1,10.0},{2,30.0},{3,20.0}});
Refs refs({1,2,0});
-TEST("require that empty hit sequence is empty") {
+TEST(SortedHitsSEquenceTest, require_that_empty_hit_sequence_is_empty)
+{
EXPECT_TRUE(!SortedHitSequence(nullptr, nullptr, 0).valid());
EXPECT_TRUE(!SortedHitSequence(&hits[0], &refs[0], 0).valid());
}
-TEST("require that sorted hit sequence can be iterated") {
+TEST(SortedHitsSEquenceTest, require_that_sorted_hit_sequence_can_be_iterated)
+{
SortedHitSequence seq(&hits[0], &refs[0], refs.size());
for (const auto &expect: Hits({{2,30.0},{3,20.0},{1,10.0}})) {
ASSERT_TRUE(seq.valid());
- EXPECT_EQUAL(expect.first, seq.get().first);
- EXPECT_EQUAL(expect.second, seq.get().second);
+ EXPECT_EQ(expect.first, seq.get().first);
+ EXPECT_EQ(expect.second, seq.get().second);
seq.next();
}
EXPECT_TRUE(!seq.valid());
}
-TEST_MAIN() { TEST_RUN_ALL(); }
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
index 3154f95bbe1..01587ef485a 100644
--- a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
@@ -193,63 +193,206 @@ struct NoRescorer
};
template <typename Rescorer>
+class RerankRescorer {
+ Rescorer _rescorer;
+ using HitVector = std::vector<HitCollector::Hit>;
+ using Iterator = typename HitVector::const_iterator;
+ Iterator _reranked_cur;
+ Iterator _reranked_end;
+public:
+ RerankRescorer(const Rescorer& rescorer,
+ const HitVector& reranked_hits)
+ : _rescorer(rescorer),
+ _reranked_cur(reranked_hits.begin()),
+ _reranked_end(reranked_hits.end())
+ {
+ }
+
+ double rescore(uint32_t docid, double score) noexcept {
+ if (_reranked_cur != _reranked_end && _reranked_cur->first == docid) {
+ double result = _reranked_cur->second;
+ ++_reranked_cur;
+ return result;
+ } else {
+ return _rescorer.rescore(docid, score);
+ }
+ }
+};
+
+class SimpleHitAdder {
+protected:
+ ResultSet& _rs;
+public:
+ SimpleHitAdder(ResultSet& rs)
+ : _rs(rs)
+ {
+ }
+ void add(uint32_t docid, double rank_value) {
+ _rs.push_back({docid, rank_value});
+ }
+};
+
+class ConditionalHitAdder : public SimpleHitAdder {
+protected:
+ double _second_phase_rank_drop_limit;
+public:
+ ConditionalHitAdder(ResultSet& rs, double second_phase_rank_drop_limit)
+ : SimpleHitAdder(rs),
+ _second_phase_rank_drop_limit(second_phase_rank_drop_limit)
+ {
+ }
+ void add(uint32_t docid, double rank_value) {
+ if (rank_value > _second_phase_rank_drop_limit) {
+ _rs.push_back({docid, rank_value});
+ }
+ }
+};
+
+class TrackingConditionalHitAdder : public ConditionalHitAdder {
+ std::vector<uint32_t>& _dropped;
+public:
+ TrackingConditionalHitAdder(ResultSet& rs, double second_phase_rank_drop_limit, std::vector<uint32_t>& dropped)
+ : ConditionalHitAdder(rs, second_phase_rank_drop_limit),
+ _dropped(dropped)
+ {
+ }
+ void add(uint32_t docid, double rank_value) {
+ if (rank_value > _second_phase_rank_drop_limit) {
+ _rs.push_back({docid, rank_value});
+ } else {
+ _dropped.emplace_back(docid);
+ }
+ }
+};
+
+template <typename HitAdder, typename Rescorer>
void
-add_rescored_hits(ResultSet& rs, const std::vector<HitCollector::Hit>& hits, Rescorer rescorer)
+add_rescored_hits(HitAdder hit_adder, const std::vector<HitCollector::Hit>& hits, Rescorer rescorer)
{
for (auto& hit : hits) {
- rs.push_back({hit.first, rescorer.rescore(hit.first, hit.second)});
+ hit_adder.add(hit.first, rescorer.rescore(hit.first, hit.second));
+ }
+}
+
+template <typename HitAdder, typename Rescorer>
+void
+add_rescored_hits(HitAdder hit_adder, const std::vector<HitCollector::Hit>& hits, const std::vector<HitCollector::Hit>& reranked_hits, Rescorer rescorer)
+{
+ if (reranked_hits.empty()) {
+ add_rescored_hits(hit_adder, hits, rescorer);
+ } else {
+ add_rescored_hits(hit_adder, hits, RerankRescorer(rescorer, reranked_hits));
}
}
template <typename Rescorer>
void
-mixin_rescored_hits(ResultSet& rs, const std::vector<HitCollector::Hit>& hits, const std::vector<uint32_t>& docids, double default_value, Rescorer rescorer)
+add_rescored_hits(ResultSet& rs, const std::vector<HitCollector::Hit>& hits, const std::vector<HitCollector::Hit>& reranked_hits, std::optional<double> second_phase_rank_drop_limit, std::vector<uint32_t>* dropped, Rescorer rescorer)
+{
+ if (second_phase_rank_drop_limit.has_value()) {
+ if (dropped != nullptr) {
+ add_rescored_hits(TrackingConditionalHitAdder(rs, second_phase_rank_drop_limit.value(), *dropped), hits, reranked_hits, rescorer);
+ } else {
+ add_rescored_hits(ConditionalHitAdder(rs, second_phase_rank_drop_limit.value()), hits, reranked_hits, rescorer);
+ }
+ } else {
+ add_rescored_hits(SimpleHitAdder(rs), hits, reranked_hits, rescorer);
+ }
+}
+
+template <typename HitAdder, typename Rescorer>
+void
+mixin_rescored_hits(HitAdder hit_adder, const std::vector<HitCollector::Hit>& hits, const std::vector<uint32_t>& docids, double default_value, Rescorer rescorer)
{
auto hits_cur = hits.begin();
auto hits_end = hits.end();
for (auto docid : docids) {
if (hits_cur != hits_end && docid == hits_cur->first) {
- rs.push_back({docid, rescorer.rescore(docid, hits_cur->second)});
+ hit_adder.add(docid, rescorer.rescore(docid, hits_cur->second));
++hits_cur;
} else {
- rs.push_back({docid, default_value});
+ hit_adder.add(docid, default_value);
+ }
+ }
+}
+
+template <typename HitAdder, typename Rescorer>
+void
+mixin_rescored_hits(HitAdder hit_adder, const std::vector<HitCollector::Hit>& hits, const std::vector<uint32_t>& docids, double default_value, const std::vector<HitCollector::Hit>& reranked_hits, Rescorer rescorer)
+{
+ if (reranked_hits.empty()) {
+ mixin_rescored_hits(hit_adder, hits, docids, default_value, rescorer);
+ } else {
+ mixin_rescored_hits(hit_adder, hits, docids, default_value, RerankRescorer(rescorer, reranked_hits));
+ }
+}
+
+template <typename Rescorer>
+void
+mixin_rescored_hits(ResultSet& rs, const std::vector<HitCollector::Hit>& hits, const std::vector<uint32_t>& docids, double default_value, const std::vector<HitCollector::Hit>& reranked_hits, std::optional<double> second_phase_rank_drop_limit, std::vector<uint32_t>* dropped, Rescorer rescorer)
+{
+ if (second_phase_rank_drop_limit.has_value()) {
+ if (dropped != nullptr) {
+ mixin_rescored_hits(TrackingConditionalHitAdder(rs, second_phase_rank_drop_limit.value(), *dropped), hits, docids, default_value, reranked_hits, rescorer);
+ } else {
+ mixin_rescored_hits(ConditionalHitAdder(rs, second_phase_rank_drop_limit.value()), hits, docids, default_value, reranked_hits, rescorer);
}
+ } else {
+ mixin_rescored_hits(SimpleHitAdder(rs), hits, docids, default_value, reranked_hits, rescorer);
}
}
void
-mergeHitsIntoResultSet(const std::vector<HitCollector::Hit> &hits, ResultSet &result)
+add_bitvector_to_dropped(std::vector<uint32_t>& dropped, vespalib::ConstArrayRef<RankedHit> hits, const BitVector& bv)
{
- uint32_t rhCur(0);
- uint32_t rhEnd(result.getArrayUsed());
- for (const auto &hit : hits) {
- while (rhCur != rhEnd && result[rhCur].getDocId() != hit.first) {
- // just set the iterators right
- ++rhCur;
+ auto hits_cur = hits.begin();
+ auto hits_end = hits.end();
+ auto docid = bv.getFirstTrueBit();
+ auto docid_limit = bv.size();
+ while (docid < docid_limit) {
+ if (hits_cur != hits_end && hits_cur->getDocId() == docid) {
+ ++hits_cur;
+ } else {
+ dropped.emplace_back(docid);
}
- assert(rhCur != rhEnd); // the hits should be a subset of the hits in ranked hit array.
- result[rhCur]._rankValue = hit.second;
+ docid = bv.getNextTrueBit(docid + 1);
}
}
}
std::unique_ptr<ResultSet>
-HitCollector::getResultSet(HitRank default_value)
+HitCollector::get_result_set(std::optional<double> second_phase_rank_drop_limit, std::vector<uint32_t>* dropped)
{
+ /*
+ * Use default_rank_value (i.e. -HUGE_VAL) when hit collector saves
+ * rank scores, otherwise use zero_rank_value (i.e. 0.0).
+ */
+ auto default_value = save_rank_scores() ? search::default_rank_value : search::zero_rank_value;
+
bool needReScore = FirstPhaseRescorer::need_rescore(_ranges);
FirstPhaseRescorer rescorer(_ranges);
+ if (dropped != nullptr) {
+ dropped->clear();
+ }
+
// destroys the heap property or score sort order
sortHitsByDocId();
auto rs = std::make_unique<ResultSet>();
- if ( ! _collector->isDocIdCollector() ) {
+ if ( ! _collector->isDocIdCollector() ||
+ (second_phase_rank_drop_limit.has_value() &&
+ (_bitVector || dropped == nullptr))) {
rs->allocArray(_hits.size());
+ auto* dropped_or_null = dropped;
+ if (second_phase_rank_drop_limit.has_value() && _bitVector) {
+ dropped_or_null = nullptr;
+ }
if (needReScore) {
- add_rescored_hits(*rs, _hits, rescorer);
+ add_rescored_hits(*rs, _hits, _reRankedHits, second_phase_rank_drop_limit, dropped_or_null, rescorer);
} else {
- add_rescored_hits(*rs, _hits, NoRescorer());
+ add_rescored_hits(*rs, _hits, _reRankedHits, second_phase_rank_drop_limit, dropped_or_null, NoRescorer());
}
} else {
if (_unordered) {
@@ -257,14 +400,18 @@ HitCollector::getResultSet(HitRank default_value)
}
rs->allocArray(_docIdVector.size());
if (needReScore) {
- mixin_rescored_hits(*rs, _hits, _docIdVector, default_value, rescorer);
+ mixin_rescored_hits(*rs, _hits, _docIdVector, default_value, _reRankedHits, second_phase_rank_drop_limit, dropped, rescorer);
} else {
- mixin_rescored_hits(*rs, _hits, _docIdVector, default_value, NoRescorer());
+ mixin_rescored_hits(*rs, _hits, _docIdVector, default_value, _reRankedHits, second_phase_rank_drop_limit, dropped, NoRescorer());
}
}
- if (!_reRankedHits.empty()) {
- mergeHitsIntoResultSet(_reRankedHits, *rs);
+ if (second_phase_rank_drop_limit.has_value() && _bitVector) {
+ if (dropped != nullptr) {
+ assert(dropped->empty());
+ add_bitvector_to_dropped(*dropped, {rs->getArray(), rs->getArrayUsed()}, *_bitVector);
+ }
+ _bitVector.reset();
}
if (_bitVector) {
@@ -274,4 +421,10 @@ HitCollector::getResultSet(HitRank default_value)
return rs;
}
+std::unique_ptr<ResultSet>
+HitCollector::getResultSet()
+{
+ return get_result_set(std::nullopt, nullptr);
+}
+
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/hitcollector.h b/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
index 903c2ab5b13..c23fb0a6ef6 100644
--- a/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
+++ b/searchlib/src/vespa/searchlib/queryeval/hitcollector.h
@@ -8,6 +8,7 @@
#include <vespa/searchlib/common/resultset.h>
#include <vespa/vespalib/util/sort.h>
#include <algorithm>
+#include <optional>
#include <vector>
namespace search::queryeval {
@@ -121,6 +122,8 @@ private:
VESPA_DLL_LOCAL void sortHitsByScore(size_t topn);
VESPA_DLL_LOCAL void sortHitsByDocId();
+ bool save_rank_scores() const noexcept { return _maxHitsSize != 0; }
+
public:
HitCollector(const HitCollector &) = delete;
HitCollector &operator=(const HitCollector &) = delete;
@@ -164,15 +167,17 @@ public:
const std::pair<Scores, Scores> &getRanges() const { return _ranges; }
void setRanges(const std::pair<Scores, Scores> &ranges);
+ std::unique_ptr<ResultSet>
+ get_result_set(std::optional<double> second_phase_rank_drop_limit, std::vector<uint32_t>* dropped);
+
/**
* Returns a result set based on the content of this collector.
* Invoking this method will destroy the heap property of the
* ranked hits and the match data heap.
*
- * @param auto pointer to the result set
- * @param default_value rank value to be used for results without rank value
+ * @return unique pointer to the result set
**/
- std::unique_ptr<ResultSet> getResultSet(HitRank default_value = default_rank_value);
+ std::unique_ptr<ResultSet> getResultSet();
};
}
diff --git a/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.cpp b/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.cpp
index c3cb38bd7ac..582c69e943f 100644
--- a/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.cpp
@@ -20,8 +20,8 @@ MergeHandlerMetrics::MergeHandlerMetrics(metrics::MetricSet* owner)
"current node.", owner),
mergeAverageDataReceivedNeeded("mergeavgdatareceivedneeded", {}, "Amount of data transferred from previous node "
"in chain that we needed to apply locally.", owner),
- put_latency("put_latency", {}, "Latency of individual puts that are part of merge operations", owner),
- remove_latency("remove_latency", {}, "Latency of individual removes that are part of merge operations", owner)
+ merge_put_latency("merge_put_latency", {}, "Latency of individual puts that are part of merge operations", owner),
+ merge_remove_latency("merge_remove_latency", {}, "Latency of individual removes that are part of merge operations", owner)
{}
MergeHandlerMetrics::~MergeHandlerMetrics() = default;
diff --git a/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.h b/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.h
index 44b85570357..a2d68011695 100644
--- a/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.h
+++ b/storage/src/vespa/storage/persistence/filestorage/merge_handler_metrics.h
@@ -21,8 +21,8 @@ struct MergeHandlerMetrics {
metrics::DoubleAverageMetric mergeAverageDataReceivedNeeded;
// Individual operation metrics. These capture both count and latency sum, so
// no need for explicit count metric on the side.
- metrics::DoubleAverageMetric put_latency;
- metrics::DoubleAverageMetric remove_latency;
+ metrics::DoubleAverageMetric merge_put_latency;
+ metrics::DoubleAverageMetric merge_remove_latency;
// Iteration over metadata and document payload data is already covered by
// the merge[Meta]Data(Read|Write)Latency metrics, so not repeated here. Can be
// explicitly added if deemed required.
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index f6cb75ae8bb..b3207428f5f 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -527,14 +527,14 @@ MergeHandler::applyDiffEntry(std::shared_ptr<ApplyBucketDiffState> async_results
document::DocumentId docId = doc->getId();
auto complete = std::make_unique<ApplyBucketDiffEntryComplete>(std::move(async_results), std::move(docId),
std::move(throttle_token), "put",
- _clock, _env._metrics.merge_handler_metrics.put_latency);
+ _clock, _env._metrics.merge_handler_metrics.merge_put_latency);
_spi.putAsync(bucket, timestamp, std::move(doc), std::move(complete));
} else {
std::vector<spi::IdAndTimestamp> ids;
ids.emplace_back(document::DocumentId(e._docName), timestamp);
auto complete = std::make_unique<ApplyBucketDiffEntryComplete>(std::move(async_results), ids[0].id,
std::move(throttle_token), "remove",
- _clock, _env._metrics.merge_handler_metrics.remove_latency);
+ _clock, _env._metrics.merge_handler_metrics.merge_remove_latency);
_spi.removeAsync(bucket, std::move(ids), std::move(complete));
}
}
diff --git a/storage/src/vespa/storage/storageserver/rpc/cluster_controller_api_rpc_service.cpp b/storage/src/vespa/storage/storageserver/rpc/cluster_controller_api_rpc_service.cpp
index 4d74eb1974b..3b53c0c9584 100644
--- a/storage/src/vespa/storage/storageserver/rpc/cluster_controller_api_rpc_service.cpp
+++ b/storage/src/vespa/storage/storageserver/rpc/cluster_controller_api_rpc_service.cpp
@@ -124,7 +124,8 @@ void ClusterControllerApiRpcService::RPC_setSystemState2(FRT_RPCRequest* req) {
req->GetParams()->GetValue(0)._string._len);
lib::ClusterState systemState(systemStateStr);
- auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterStateBundle(systemState));
+ auto bundle = std::make_shared<const lib::ClusterStateBundle>(systemState);
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(std::move(bundle));
cmd->setPriority(api::StorageMessage::VERYHIGH);
detach_and_forward_to_enqueuer(std::move(cmd), req);
@@ -167,8 +168,7 @@ void ClusterControllerApiRpcService::RPC_setDistributionStates(FRT_RPCRequest* r
}
LOG(debug, "Got state bundle %s", state_bundle->toString().c_str());
- // TODO add constructor taking in shared_ptr directly instead?
- auto cmd = std::make_shared<api::SetSystemStateCommand>(*state_bundle);
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(std::move(state_bundle));
cmd->setPriority(api::StorageMessage::VERYHIGH);
detach_and_forward_to_enqueuer(std::move(cmd), req);
diff --git a/storage/src/vespa/storageapi/message/state.cpp b/storage/src/vespa/storageapi/message/state.cpp
index 5a50167f584..b4e8655d783 100644
--- a/storage/src/vespa/storageapi/message/state.cpp
+++ b/storage/src/vespa/storageapi/message/state.cpp
@@ -5,8 +5,7 @@
#include <vespa/vdslib/state/clusterstate.h>
#include <ostream>
-namespace storage {
-namespace api {
+namespace storage::api {
IMPLEMENT_COMMAND(GetNodeStateCommand, GetNodeStateReply)
IMPLEMENT_REPLY(GetNodeStateReply)
@@ -45,7 +44,7 @@ GetNodeStateReply::GetNodeStateReply(const GetNodeStateCommand& cmd)
GetNodeStateReply::GetNodeStateReply(const GetNodeStateCommand& cmd,
const lib::NodeState& state)
: StorageReply(cmd),
- _state(new lib::NodeState(state))
+ _state(std::make_unique<lib::NodeState>(state))
{
}
@@ -64,23 +63,31 @@ GetNodeStateReply::print(std::ostream& out, bool verbose,
}
}
+SetSystemStateCommand::SetSystemStateCommand(std::shared_ptr<const lib::ClusterStateBundle> state)
+ : StorageCommand(MessageType::SETSYSTEMSTATE),
+ _state(std::move(state))
+{
+}
+
SetSystemStateCommand::SetSystemStateCommand(const lib::ClusterStateBundle& state)
: StorageCommand(MessageType::SETSYSTEMSTATE),
- _state(state)
+ _state(std::make_shared<const lib::ClusterStateBundle>(state))
{
}
SetSystemStateCommand::SetSystemStateCommand(const lib::ClusterState& state)
: StorageCommand(MessageType::SETSYSTEMSTATE),
- _state(state)
+ _state(std::make_shared<const lib::ClusterStateBundle>(state))
{
}
+SetSystemStateCommand::~SetSystemStateCommand() = default;
+
void
SetSystemStateCommand::print(std::ostream& out, bool verbose,
const std::string& indent) const
{
- out << "SetSystemStateCommand(" << *_state.getBaselineClusterState() << ")";
+ out << "SetSystemStateCommand(" << *_state->getBaselineClusterState() << ")";
if (verbose) {
out << " : ";
StorageCommand::print(out, verbose, indent);
@@ -89,7 +96,7 @@ SetSystemStateCommand::print(std::ostream& out, bool verbose,
SetSystemStateReply::SetSystemStateReply(const SetSystemStateCommand& cmd)
: StorageReply(cmd),
- _state(cmd.getClusterStateBundle())
+ _state(cmd.cluster_state_bundle_ptr())
{
}
@@ -138,5 +145,4 @@ void ActivateClusterStateVersionReply::print(std::ostream& out, bool verbose,
}
}
-} // api
-} // storage
+} // storage::api
diff --git a/storage/src/vespa/storageapi/message/state.h b/storage/src/vespa/storageapi/message/state.h
index 900355b12a2..4aa4c8a8f31 100644
--- a/storage/src/vespa/storageapi/message/state.h
+++ b/storage/src/vespa/storageapi/message/state.h
@@ -61,13 +61,19 @@ public:
* put/get/remove etx)
*/
class SetSystemStateCommand : public StorageCommand {
- lib::ClusterStateBundle _state;
+ std::shared_ptr<const lib::ClusterStateBundle> _state;
public:
+ explicit SetSystemStateCommand(std::shared_ptr<const lib::ClusterStateBundle> state);
explicit SetSystemStateCommand(const lib::ClusterStateBundle &state);
explicit SetSystemStateCommand(const lib::ClusterState &state);
- const lib::ClusterState& getSystemState() const { return *_state.getBaselineClusterState(); }
- const lib::ClusterStateBundle& getClusterStateBundle() const { return _state; }
+ ~SetSystemStateCommand() override;
+
+ [[nodiscard]] const lib::ClusterState& getSystemState() const { return *_state->getBaselineClusterState(); }
+ [[nodiscard]] const lib::ClusterStateBundle& getClusterStateBundle() const { return *_state; }
+ [[nodiscard]] std::shared_ptr<const lib::ClusterStateBundle> cluster_state_bundle_ptr() const noexcept {
+ return _state;
+ }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGECOMMAND(SetSystemStateCommand, onSetSystemState)
@@ -80,14 +86,14 @@ public:
* @brief Reply received after a SetSystemStateCommand.
*/
class SetSystemStateReply : public StorageReply {
- lib::ClusterStateBundle _state;
+ std::shared_ptr<const lib::ClusterStateBundle> _state;
public:
explicit SetSystemStateReply(const SetSystemStateCommand& cmd);
// Not serialized. Available locally
- const lib::ClusterState& getSystemState() const { return *_state.getBaselineClusterState(); }
- const lib::ClusterStateBundle& getClusterStateBundle() const { return _state; }
+ const lib::ClusterState& getSystemState() const { return *_state->getBaselineClusterState(); }
+ const lib::ClusterStateBundle& getClusterStateBundle() const { return *_state; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGEREPLY(SetSystemStateReply, onSetSystemStateReply)
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
index 42079718115..dc902297d6d 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
@@ -19,6 +19,7 @@ import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
@@ -335,6 +336,7 @@ class HttpRequestStrategy implements RequestStrategy {
private final Object monitor = new Object();
private final ClusterFactory clusterFactory;
+ private final ExecutorService executor = Executors.newSingleThreadExecutor();
private AtomicLong inflight = new AtomicLong(0);
private Cluster delegate;
@@ -350,14 +352,15 @@ class HttpRequestStrategy implements RequestStrategy {
usedCounter.incrementAndGet();
Cluster usedCluster = delegate;
usedCluster.dispatch(request, vessel);
- vessel.whenComplete((__, ___) -> {
- synchronized (monitor) {
- if (usedCounter.decrementAndGet() == 0 && usedCluster != delegate) {
- log.log(INFO, "Closing old HTTP client");
- usedCluster.close();
- }
- }
- });
+ vessel.whenCompleteAsync((__, ___) -> {
+ synchronized (monitor) {
+ if (usedCounter.decrementAndGet() == 0 && usedCluster != delegate) {
+ log.log(INFO, "Closing old HTTP client");
+ usedCluster.close();
+ }
+ }
+ },
+ executor);
}
}
@@ -365,6 +368,15 @@ class HttpRequestStrategy implements RequestStrategy {
public void close() {
synchronized (monitor) {
delegate.close();
+ executor.shutdown();
+ try {
+ if ( ! executor.awaitTermination(1, TimeUnit.MINUTES))
+ log.log(WARNING, "Failed shutting down HTTP client within 1 minute");
+ }
+ catch (InterruptedException e) {
+ log.log(WARNING, "Interrupted waiting for HTTP client to shut down");
+ Thread.currentThread().interrupt();
+ }
}
}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
index a95f859df4d..35d6433ffc5 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/impl/HttpRequestStrategyTest.java
@@ -251,10 +251,9 @@ class HttpRequestStrategyTest {
assertFalse(clusters.get(1).closed.get());
secondVessel.complete(HttpResponse.of(504, null));
assertEquals(504, secondResponse.get().code());
- assertTrue(clusters.get(0).closed.get());
- assertFalse(clusters.get(1).closed.get());
strategy.await();
strategy.destroy();
+ assertTrue(clusters.get(0).closed.get());
assertTrue(clusters.get(1).closed.get());
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java b/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
index f96fd65e15c..aef3b90af56 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TensorParser.java
@@ -104,10 +104,12 @@ class TensorParser {
if (type.isEmpty())
throw new IllegalArgumentException("The mixed tensor form requires an explicit tensor type " +
"on the form 'tensor(dimensions):...");
- if (type.get().dimensions().stream().filter(d -> ! d.isIndexed()).count() > 1)
+ if (type.get().dimensions().stream().filter(d -> d.isMapped()).count() > 1)
throw new IllegalArgumentException("The mixed tensor form requires a type with a single mapped dimension, " +
"but got " + type.get());
-
+ if (! MixedValueParser.findMappedDimension(type.get()).isPresent())
+ throw new IllegalArgumentException("No suitable dimension in " + type.get() + " for parsing a tensor on " +
+ "the mixed form: Should have one mapped dimension");
try {
valueString = valueString.trim();
@@ -426,7 +428,7 @@ class TensorParser {
}
private void parse() {
- TensorType.Dimension mappedDimension = findMappedDimension();
+ TensorType.Dimension mappedDimension = findMappedDimension().get();
TensorType mappedSubtype = MixedTensor.createPartialType(builder.type().valueType(), List.of(mappedDimension));
if (dimensionOrder != null)
dimensionOrder.remove(mappedDimension.name());
@@ -448,13 +450,16 @@ class TensorParser {
}
}
- private TensorType.Dimension findMappedDimension() {
- Optional<TensorType.Dimension> mappedDimension = builder.type().dimensions().stream().filter(TensorType.Dimension::isMapped).findAny();
- if (mappedDimension.isPresent()) return mappedDimension.get();
- if (builder.type().rank() == 1 && builder.type().dimensions().get(0).size().isEmpty())
- return builder.type().dimensions().get(0);
- throw new IllegalStateException("No suitable dimension in " + builder.type() +
- " for parsing as a mixed tensor. This is a bug.");
+ private Optional<TensorType.Dimension> findMappedDimension() {
+ return findMappedDimension(builder.type());
+ }
+
+ static Optional<TensorType.Dimension> findMappedDimension(TensorType type) {
+ Optional<TensorType.Dimension> mappedDimension = type.dimensions().stream().filter(TensorType.Dimension::isMapped).findAny();
+ if (mappedDimension.isPresent()) return Optional.of(mappedDimension.get());
+ if (type.rank() == 1 && type.dimensions().get(0).size().isEmpty())
+ return Optional.of(type.dimensions().get(0));
+ return Optional.empty();
}
private void parseDenseSubspace(TensorAddress mappedAddress, List<String> denseDimensionOrder) {
diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
index 5a049eeca04..7bc0556987b 100644
--- a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
+++ b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java
@@ -261,6 +261,8 @@ public class TensorParserTestCase {
"tensor(x[3]):[1, 2]");
assertIllegal("At value position 8: Expected a ']' but got ','",
"tensor(x[3]):[1, 2, 3, 4]");
+ assertIllegal("No suitable dimension in tensor(x[3]) for parsing a tensor on the mixed form: Should have one mapped dimension",
+ "tensor(x[3]):{1:[1,2,3], 2:[2,3,4], 3:[3,4,5]}");
}
private void assertIllegal(String message, String tensor) {
diff --git a/vespalib/src/vespa/vespalib/data/slime/type.h b/vespalib/src/vespa/vespalib/data/slime/type.h
index d3d15526f12..90e79acccd0 100644
--- a/vespalib/src/vespa/vespalib/data/slime/type.h
+++ b/vespalib/src/vespa/vespalib/data/slime/type.h
@@ -19,6 +19,7 @@ protected:
public:
uint32_t getId() const noexcept { return _id; }
+ bool operator==(const Type &rhs) const noexcept { return _id == rhs._id; }
};
/**