summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--_config.yml2
-rw-r--r--client/js/app/yarn.lock165
-rw-r--r--config-model-api/abi-spec.json3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java13
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java7
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/AddExtraFieldsToDocument.java2
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/IndexingOutputs.java9
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/IndexingValidation.java14
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java18
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java14
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java14
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java2
-rw-r--r--config-model/src/test/derived/bolding_dynamic_summary/documenttypes.cfg16
-rw-r--r--config-model/src/test/derived/complex/ilscripts.cfg2
-rw-r--r--config-model/src/test/derived/multiplesummaries/ilscripts.cfg10
-rw-r--r--config-model/src/test/derived/music/ilscripts.cfg4
-rw-r--r--config-model/src/test/derived/newrank/ilscripts.cfg4
-rw-r--r--config-model/src/test/java/com/yahoo/schema/AbstractSchemaTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/AddExtraFieldsToDocumentTest.java23
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/IndexingOutputsTestCase.java19
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java6
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java30
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ConsoleUrls.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillStatus.java23
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingReporterMock.java44
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/FailedInvoiceUpdate.java28
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/InvoiceUpdate.java53
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/ModifiableInvoiceUpdate.java28
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistory.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TenantBilling.java39
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TermsOfServiceApproval.java30
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistoryTest.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainer.java33
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java37
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainerTest.java138
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java58
-rw-r--r--dependency-versions/pom.xml4
-rw-r--r--dist/vespa.spec4
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/tests/eval/compiled_function/compiled_function_test.cpp1
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp30
-rw-r--r--eval/src/tests/eval/map_subspaces/CMakeLists.txt8
-rw-r--r--eval/src/tests/eval/map_subspaces/map_subspaces_test.cpp103
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp36
-rw-r--r--eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp6
-rw-r--r--eval/src/tests/eval/reference_operations/reference_operations_test.cpp62
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp30
-rw-r--r--eval/src/vespa/eval/eval/cell_type.h3
-rw-r--r--eval/src/vespa/eval/eval/function.cpp17
-rw-r--r--eval/src/vespa/eval/eval/function.h6
-rw-r--r--eval/src/vespa/eval/eval/interpreted_function.cpp35
-rw-r--r--eval/src/vespa/eval/eval/key_gen.cpp117
-rw-r--r--eval/src/vespa/eval/eval/llvm/compiled_function.cpp3
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp3
-rw-r--r--eval/src/vespa/eval/eval/make_tensor_function.cpp9
-rw-r--r--eval/src/vespa/eval/eval/node_tools.cpp109
-rw-r--r--eval/src/vespa/eval/eval/node_types.cpp26
-rw-r--r--eval/src/vespa/eval/eval/node_visitor.h250
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.cpp20
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.h24
-rw-r--r--eval/src/vespa/eval/eval/tensor_nodes.cpp21
-rw-r--r--eval/src/vespa/eval/eval/tensor_nodes.h30
-rw-r--r--eval/src/vespa/eval/eval/tensor_spec.cpp22
-rw-r--r--eval/src/vespa/eval/eval/test/eval_spec.cpp2
-rw-r--r--eval/src/vespa/eval/eval/test/reference_evaluation.cpp10
-rw-r--r--eval/src/vespa/eval/eval/test/reference_operations.cpp46
-rw-r--r--eval/src/vespa/eval/eval/test/reference_operations.h2
-rw-r--r--eval/src/vespa/eval/eval/value_type.cpp68
-rw-r--r--eval/src/vespa/eval/eval/value_type.h3
-rw-r--r--eval/src/vespa/eval/instruction/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/instruction/generic_map_subspaces.cpp118
-rw-r--r--eval/src/vespa/eval/instruction/generic_map_subspaces.h17
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java9
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java11
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java1
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java1
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java151
-rw-r--r--screwdriver.yaml21
-rwxr-xr-xscrewdriver/publish-unpublished-rpms-to-archive.sh82
-rwxr-xr-xscrewdriver/upload-rpm-to-cloudsmith.sh36
-rw-r--r--searchcore/src/apps/proton/proton.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoresaver.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp15
-rw-r--r--searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp1
-rw-r--r--searchlib/src/tests/query/querybuilder_test.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvector.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvector.h76
-rw-r--r--searchlib/src/vespa/searchlib/parsequery/parse.h2
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp71
-rw-r--r--storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp26
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp247
-rw-r--r--storage/src/tests/storageserver/service_layer_error_listener_test.cpp3
-rw-r--r--storage/src/vespa/storage/config/stor-server.def32
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp40
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h5
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.cpp94
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.h85
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.cpp6
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.h5
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto13
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp2
-rw-r--r--storage/src/vespa/storageapi/message/bucket.cpp4
-rw-r--r--storage/src/vespa/storageapi/message/bucket.h7
-rw-r--r--storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp2
-rw-r--r--storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp6
-rw-r--r--storageserver/src/vespa/storageserver/app/servicelayerprocess.h6
-rw-r--r--vespalib/src/tests/net/tls/openssl_impl/openssl_impl_test.cpp47
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenode.h82
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenode.hpp101
130 files changed, 2658 insertions, 906 deletions
diff --git a/_config.yml b/_config.yml
new file mode 100644
index 00000000000..e0eed149f70
--- /dev/null
+++ b/_config.yml
@@ -0,0 +1,2 @@
+# Exclude from build for linkcheck job
+exclude: ["airlift-zstd"]
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index b812a8b8e37..7341cdc6132 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -24,9 +24,9 @@
chalk "^2.4.2"
"@babel/compat-data@^7.22.9":
- version "7.22.20"
- resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.20.tgz#8df6e96661209623f1975d66c35ffca66f3306d0"
- integrity sha512-BQYjKbpXjoXwFW5jGqiizJQQT/aC7pFm9Ok1OWssonuguICi264lbgMzRp2ZMmRSlfkX6DsWDDcsrctK8Rwfiw==
+ version "7.23.2"
+ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.2.tgz#6a12ced93455827037bfb5ed8492820d60fc32cc"
+ integrity sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==
"@babel/core@^7.1.0", "@babel/core@^7.12.17":
version "7.22.9"
@@ -70,22 +70,22 @@
json5 "^2.2.3"
semver "^6.3.1"
-"@babel/core@^7.22.20":
- version "7.22.20"
- resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.20.tgz#e3d0eed84c049e2a2ae0a64d27b6a37edec385b7"
- integrity sha512-Y6jd1ahLubuYweD/zJH+vvOY141v4f9igNQAQ+MBgq9JlHS2iTsZKn1aMsb3vGccZsXI16VzTBw52Xx0DWmtnA==
+"@babel/core@^7.23.2":
+ version "7.23.2"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.23.2.tgz#ed10df0d580fff67c5f3ee70fd22e2e4c90a9f94"
+ integrity sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==
dependencies:
"@ampproject/remapping" "^2.2.0"
"@babel/code-frame" "^7.22.13"
- "@babel/generator" "^7.22.15"
+ "@babel/generator" "^7.23.0"
"@babel/helper-compilation-targets" "^7.22.15"
- "@babel/helper-module-transforms" "^7.22.20"
- "@babel/helpers" "^7.22.15"
- "@babel/parser" "^7.22.16"
+ "@babel/helper-module-transforms" "^7.23.0"
+ "@babel/helpers" "^7.23.2"
+ "@babel/parser" "^7.23.0"
"@babel/template" "^7.22.15"
- "@babel/traverse" "^7.22.20"
- "@babel/types" "^7.22.19"
- convert-source-map "^1.7.0"
+ "@babel/traverse" "^7.23.2"
+ "@babel/types" "^7.23.0"
+ convert-source-map "^2.0.0"
debug "^4.1.0"
gensync "^1.0.0-beta.2"
json5 "^2.2.3"
@@ -188,17 +188,6 @@
"@babel/helper-split-export-declaration" "^7.22.6"
"@babel/helper-validator-identifier" "^7.22.15"
-"@babel/helper-module-transforms@^7.22.20":
- version "7.22.20"
- resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.20.tgz#da9edc14794babbe7386df438f3768067132f59e"
- integrity sha512-dLT7JVWIUUxKOs1UnJUBR3S70YK+pKX6AbJgB2vMIvEkZkrfJDbYDJesnPshtKV4LhDOR3Oc5YULeDizRek+5A==
- dependencies:
- "@babel/helper-environment-visitor" "^7.22.20"
- "@babel/helper-module-imports" "^7.22.15"
- "@babel/helper-simple-access" "^7.22.5"
- "@babel/helper-split-export-declaration" "^7.22.6"
- "@babel/helper-validator-identifier" "^7.22.20"
-
"@babel/helper-module-transforms@^7.22.5":
version "7.22.9"
resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.22.9.tgz#92dfcb1fbbb2bc62529024f72d942a8c97142129"
@@ -210,6 +199,17 @@
"@babel/helper-split-export-declaration" "^7.22.6"
"@babel/helper-validator-identifier" "^7.22.5"
+"@babel/helper-module-transforms@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz#3ec246457f6c842c0aee62a01f60739906f7047e"
+ integrity sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==
+ dependencies:
+ "@babel/helper-environment-visitor" "^7.22.20"
+ "@babel/helper-module-imports" "^7.22.15"
+ "@babel/helper-simple-access" "^7.22.5"
+ "@babel/helper-split-export-declaration" "^7.22.6"
+ "@babel/helper-validator-identifier" "^7.22.20"
+
"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0":
version "7.22.5"
resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295"
@@ -262,6 +262,15 @@
"@babel/traverse" "^7.22.11"
"@babel/types" "^7.22.11"
+"@babel/helpers@^7.23.2":
+ version "7.23.2"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.23.2.tgz#2832549a6e37d484286e15ba36a5330483cac767"
+ integrity sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==
+ dependencies:
+ "@babel/template" "^7.22.15"
+ "@babel/traverse" "^7.23.2"
+ "@babel/types" "^7.23.0"
+
"@babel/highlight@^7.22.13":
version "7.22.20"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.20.tgz#4ca92b71d80554b01427815e06f2df965b9c1f54"
@@ -271,7 +280,12 @@
chalk "^2.4.2"
js-tokens "^4.0.0"
-"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.15", "@babel/parser@^7.22.16":
+"@babel/parser@^7.1.0", "@babel/parser@^7.20.7", "@babel/parser@^7.22.15", "@babel/parser@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719"
+ integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==
+
+"@babel/parser@^7.14.7", "@babel/parser@^7.22.16":
version "7.22.16"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.16.tgz#180aead7f247305cce6551bea2720934e2fa2c95"
integrity sha512-+gPfKv8UWeKKeJTUxe59+OobVcrYHETCsORl61EmSkmgymguYk/X5bp7GuUIXaFsc6y++v8ZxPsLSSuujqDphA==
@@ -281,11 +295,6 @@
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.13.tgz#23fb17892b2be7afef94f573031c2f4b42839a2b"
integrity sha512-3l6+4YOvc9wx7VlCSw4yQfcBo01ECA8TicQfbnCPuCEpRQrf+gTUyGdxNw+pyTUyywp6JRD1w0YQs9TpBXYlkw==
-"@babel/parser@^7.23.0":
- version "7.23.0"
- resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.0.tgz#da950e622420bf96ca0d0f2909cdddac3acd8719"
- integrity sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==
-
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d"
@@ -430,7 +439,7 @@
"@babel/parser" "^7.22.15"
"@babel/types" "^7.22.15"
-"@babel/traverse@^7.22.11", "@babel/traverse@^7.22.15", "@babel/traverse@^7.22.17", "@babel/traverse@^7.22.20", "@babel/traverse@^7.22.8":
+"@babel/traverse@^7.22.11", "@babel/traverse@^7.22.15", "@babel/traverse@^7.22.17", "@babel/traverse@^7.22.8", "@babel/traverse@^7.23.2":
version "7.23.2"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.2.tgz#329c7a06735e144a506bdb2cad0268b7f46f4ad8"
integrity sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==
@@ -446,13 +455,13 @@
debug "^4.1.0"
globals "^11.1.0"
-"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.15", "@babel/types@^7.22.17", "@babel/types@^7.22.19", "@babel/types@^7.22.5":
- version "7.22.19"
- resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.19.tgz#7425343253556916e440e662bb221a93ddb75684"
- integrity sha512-P7LAw/LbojPzkgp5oznjE6tQEIWbp4PkkfrZDINTro9zgBRtI324/EYsiSI7lhPbpIQ+DCeR2NNmMWANGGfZsg==
+"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0":
+ version "7.23.0"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.0.tgz#8c1f020c9df0e737e4e247c0619f58c68458aaeb"
+ integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==
dependencies:
"@babel/helper-string-parser" "^7.22.5"
- "@babel/helper-validator-identifier" "^7.22.19"
+ "@babel/helper-validator-identifier" "^7.22.20"
to-fast-properties "^2.0.0"
"@babel/types@^7.22.10", "@babel/types@^7.22.11", "@babel/types@^7.3.3":
@@ -464,13 +473,13 @@
"@babel/helper-validator-identifier" "^7.22.15"
to-fast-properties "^2.0.0"
-"@babel/types@^7.23.0":
- version "7.23.0"
- resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.0.tgz#8c1f020c9df0e737e4e247c0619f58c68458aaeb"
- integrity sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==
+"@babel/types@^7.22.17":
+ version "7.22.19"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.19.tgz#7425343253556916e440e662bb221a93ddb75684"
+ integrity sha512-P7LAw/LbojPzkgp5oznjE6tQEIWbp4PkkfrZDINTro9zgBRtI324/EYsiSI7lhPbpIQ+DCeR2NNmMWANGGfZsg==
dependencies:
"@babel/helper-string-parser" "^7.22.5"
- "@babel/helper-validator-identifier" "^7.22.20"
+ "@babel/helper-validator-identifier" "^7.22.19"
to-fast-properties "^2.0.0"
"@bcoe/v8-coverage@^0.2.3":
@@ -1068,7 +1077,7 @@
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
-"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.9":
+"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.18":
version "0.3.19"
resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz#f8a3249862f91be48d3127c3cfe992f79b4b8811"
integrity sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==
@@ -1076,6 +1085,14 @@
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"
+"@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.9":
+ version "0.3.20"
+ resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.20.tgz#72e45707cf240fa6b081d0366f8265b0cd10197f"
+ integrity sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==
+ dependencies:
+ "@jridgewell/resolve-uri" "^3.1.0"
+ "@jridgewell/sourcemap-codec" "^1.4.14"
+
"@mantine/core@^6.0.0":
version "6.0.21"
resolved "https://registry.yarnpkg.com/@mantine/core/-/core-6.0.21.tgz#6e3a1b8d0f6869518a644d5f5e3d55a5db7e1e51"
@@ -1272,10 +1289,10 @@
"@types/babel__template" "*"
"@types/babel__traverse" "*"
-"@types/babel__core@^7.20.2":
- version "7.20.2"
- resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.2.tgz#215db4f4a35d710256579784a548907237728756"
- integrity sha512-pNpr1T1xLUc2l3xJKuPtsEky3ybxN3m4fJkknfIpTCTfIZCDW57oAg+EfCgIIp2rvCe0Wn++/FfodDS4YXxBwA==
+"@types/babel__core@^7.20.3":
+ version "7.20.3"
+ resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.3.tgz#d5625a50b6f18244425a1359a858c73d70340778"
+ integrity sha512-54fjTSeSHwfan8AyHWrKbfBWiEUrNTZsUwPTDSNaaP1QDQIZbeNUg3a59E9D+375MzUw/x1vx2/0F5LBz+AeYA==
dependencies:
"@babel/parser" "^7.20.7"
"@babel/types" "^7.20.7"
@@ -1284,24 +1301,24 @@
"@types/babel__traverse" "*"
"@types/babel__generator@*":
- version "7.6.5"
- resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.5.tgz#281f4764bcbbbc51fdded0f25aa587b4ce14da95"
- integrity sha512-h9yIuWbJKdOPLJTbmSpPzkF67e659PbQDba7ifWm5BJ8xTv+sDmS7rFmywkWOvXedGTivCdeGSIIX8WLcRTz8w==
+ version "7.6.6"
+ resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.6.tgz#676f89f67dc8ddaae923f70ebc5f1fa800c031a8"
+ integrity sha512-66BXMKb/sUWbMdBNdMvajU7i/44RkrA3z/Yt1c7R5xejt8qh84iU54yUWCtm0QwGJlDcf/gg4zd/x4mpLAlb/w==
dependencies:
"@babel/types" "^7.0.0"
"@types/babel__template@*":
- version "7.4.2"
- resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.2.tgz#843e9f1f47c957553b0c374481dc4772921d6a6b"
- integrity sha512-/AVzPICMhMOMYoSx9MoKpGDKdBRsIXMNByh1PXSZoa+v6ZoLa8xxtsT/uLQ/NJm0XVAWl/BvId4MlDeXJaeIZQ==
+ version "7.4.3"
+ resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.3.tgz#db9ac539a2fe05cfe9e168b24f360701bde41f5f"
+ integrity sha512-ciwyCLeuRfxboZ4isgdNZi/tkt06m8Tw6uGbBSBgWrnnZGNXiEyM27xc/PjXGQLqlZ6ylbgHMnm7ccF9tCkOeQ==
dependencies:
"@babel/parser" "^7.1.0"
"@babel/types" "^7.0.0"
"@types/babel__traverse@*":
- version "7.20.2"
- resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.2.tgz#4ddf99d95cfdd946ff35d2b65c978d9c9bf2645d"
- integrity sha512-ojlGK1Hsfce93J0+kn3H5R73elidKUaZonirN33GSmgTUMpzI/MIFfSpF3haANe3G1bEBS9/9/QEqwTzwqFsKw==
+ version "7.20.3"
+ resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.3.tgz#a971aa47441b28ef17884ff945d0551265a2d058"
+ integrity sha512-Lsh766rGEFbaxMIDH7Qa+Yha8cMVI3qAK6CHt3OR0YfxOIn5Z54iHiyDRycHrBqeIiqGa20Kpsv1cavfBKkRSw==
dependencies:
"@babel/types" "^7.20.7"
@@ -1388,14 +1405,14 @@
integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==
"@vitejs/plugin-react@^4":
- version "4.1.0"
- resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.1.0.tgz#e4f56f46fd737c5d386bb1f1ade86ba275fe09bd"
- integrity sha512-rM0SqazU9iqPUraQ2JlIvReeaxOoRj6n+PzB1C0cBzIbd8qP336nC39/R9yPi3wVcah7E7j/kdU1uCUqMEU4OQ==
+ version "4.1.1"
+ resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.1.1.tgz#a10254dc76778027407d01b6ddbca53b23852a72"
+ integrity sha512-Jie2HERK+uh27e+ORXXwEP5h0Y2lS9T2PRGbfebiHGlwzDO0dEnd2aNtOR/qjBlPb1YgxwAONeblL1xqLikLag==
dependencies:
- "@babel/core" "^7.22.20"
+ "@babel/core" "^7.23.2"
"@babel/plugin-transform-react-jsx-self" "^7.22.5"
"@babel/plugin-transform-react-jsx-source" "^7.22.5"
- "@types/babel__core" "^7.20.2"
+ "@types/babel__core" "^7.20.3"
react-refresh "^0.14.0"
acorn-jsx@^5.3.2:
@@ -1809,12 +1826,12 @@ braces@^3.0.2:
fill-range "^7.0.1"
browserslist@^4.21.9:
- version "4.21.11"
- resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.11.tgz#35f74a3e51adc4d193dcd76ea13858de7b8fecb8"
- integrity sha512-xn1UXOKUz7DjdGlg9RrUr0GGiWzI97UQJnugHtH0OLDfJB7jMgoIkYvRIEO1l9EeEERVqeqLYOcFBW9ldjypbQ==
+ version "4.22.1"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.22.1.tgz#ba91958d1a59b87dab6fed8dfbcb3da5e2e9c619"
+ integrity sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==
dependencies:
- caniuse-lite "^1.0.30001538"
- electron-to-chromium "^1.4.526"
+ caniuse-lite "^1.0.30001541"
+ electron-to-chromium "^1.4.535"
node-releases "^2.0.13"
update-browserslist-db "^1.0.13"
@@ -1876,10 +1893,10 @@ camelcase@^6.2.0:
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
-caniuse-lite@^1.0.30001538:
- version "1.0.30001539"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001539.tgz#325a387ab1ed236df2c12dc6cd43a4fff9903a44"
- integrity sha512-hfS5tE8bnNiNvEOEkm8HElUHroYwlqMMENEzELymy77+tJ6m+gA2krtHl5hxJaj71OlpC2cHZbdSMX1/YEqEkA==
+caniuse-lite@^1.0.30001541:
+ version "1.0.30001559"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001559.tgz#95a982440d3d314c471db68d02664fb7536c5a30"
+ integrity sha512-cPiMKZgqgkg5LY3/ntGeLFUpi6tzddBNS58A4tnTgQw1zON7u2sZMU7SzOeVH4tj20++9ggL+V6FDOFMTaFFYA==
capture-exit@^2.0.0:
version "2.0.0"
@@ -2219,10 +2236,10 @@ dom-helpers@^5.0.1:
"@babel/runtime" "^7.8.7"
csstype "^3.0.2"
-electron-to-chromium@^1.4.526:
- version "1.4.528"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.528.tgz#7c900fd73d9d2e8bb0dab0e301f25f0f4776ef2c"
- integrity sha512-UdREXMXzLkREF4jA8t89FQjA8WHI6ssP38PMY4/4KhXFQbtImnghh4GkCgrtiZwLKUKVD2iTVXvDVQjfomEQuA==
+electron-to-chromium@^1.4.535:
+ version "1.4.574"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.574.tgz#6de04d7c6e244e5ffcae76d2e2a33b02cab66781"
+ integrity sha512-bg1m8L0n02xRzx4LsTTMbBPiUd9yIR+74iPtS/Ao65CuXvhVZHP0ym1kSdDG3yHFDXqHQQBKujlN1AQ8qZnyFg==
emittery@^0.13.1:
version "0.13.1"
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index 38d9da5b9da..fab30efd00d 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -1286,7 +1286,8 @@
"public int contentLayerMetadataFeatureLevel()",
"public boolean dynamicHeapSize()",
"public java.lang.String unknownConfigDefinition()",
- "public int searchHandlerThreadpool()"
+ "public int searchHandlerThreadpool()",
+ "public long mergingMaxMemoryUsagePerNode()"
],
"fields" : [ ]
},
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 3fd5af98f04..833e2f020bc 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -117,6 +117,7 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"bjorncs"}) default boolean dynamicHeapSize() { return false; }
@ModelFeatureFlag(owners = {"hmusum"}) default String unknownConfigDefinition() { return "warn"; }
@ModelFeatureFlag(owners = {"hmusum"}) default int searchHandlerThreadpool() { return 2; }
+ @ModelFeatureFlag(owners = {"vekterli"}) default long mergingMaxMemoryUsagePerNode() { return -1; }
}
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
index b652f9e8a68..f49bb224394 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
@@ -25,12 +25,7 @@ public class Quota {
/** The max budget in dollars per hour */
private final Optional<BigDecimal> budget;
- public Quota(Optional<Integer> maxClusterSize, Optional<Integer> budget) {
- this(maxClusterSize, budget.map(BigDecimal::new), true);
- }
-
- // TODO: Remove unused argument
- private Quota(Optional<Integer> maxClusterSize, Optional<BigDecimal> budget, boolean isDecimal) {
+ public Quota(Optional<Integer> maxClusterSize, Optional<BigDecimal> budget) {
this.maxClusterSize = Objects.requireNonNull(maxClusterSize);
this.budget = Objects.requireNonNull(budget);
}
@@ -38,15 +33,15 @@ public class Quota {
public static Quota fromSlime(Inspector inspector) {
var clusterSize = SlimeUtils.optionalInteger(inspector.field("clusterSize"));
var budget = budgetFromSlime(inspector.field("budget"));
- return new Quota(clusterSize.stream().boxed().findFirst(), budget, true);
+ return new Quota(clusterSize.stream().boxed().findFirst(), budget);
}
public Quota withBudget(BigDecimal budget) {
- return new Quota(this.maxClusterSize, Optional.of(budget), true);
+ return new Quota(this.maxClusterSize, Optional.of(budget));
}
public Quota withClusterSize(int clusterSize) {
- return new Quota(Optional.of(clusterSize), this.budget, true);
+ return new Quota(Optional.of(clusterSize), this.budget);
}
public Slime toSlime() {
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 1b35460523e..1bda8a509f1 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -87,6 +87,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private List<DataplaneToken> dataplaneTokens;
private int contentLayerMetadataFeatureLevel = 0;
private boolean dynamicHeapSize = false;
+ private long mergingMaxMemoryUsagePerNode = -1;
@Override public ModelContext.FeatureFlags featureFlags() { return this; }
@Override public boolean multitenant() { return multitenant; }
@@ -146,6 +147,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public List<DataplaneToken> dataplaneTokens() { return dataplaneTokens; }
@Override public int contentLayerMetadataFeatureLevel() { return contentLayerMetadataFeatureLevel; }
@Override public boolean dynamicHeapSize() { return dynamicHeapSize; }
+ @Override public long mergingMaxMemoryUsagePerNode() { return mergingMaxMemoryUsagePerNode; }
public TestProperties sharedStringRepoNoReclaim(boolean sharedStringRepoNoReclaim) {
this.sharedStringRepoNoReclaim = sharedStringRepoNoReclaim;
@@ -383,6 +385,11 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
public TestProperties setDynamicHeapSize(boolean b) { this.dynamicHeapSize = b; return this; }
+ public TestProperties setMergingMaxMemoryUsagePerNode(long maxUsage) {
+ this.mergingMaxMemoryUsagePerNode = maxUsage;
+ return this;
+ }
+
public static class Spec implements ConfigServerSpec {
private final String hostName;
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/AddExtraFieldsToDocument.java b/config-model/src/main/java/com/yahoo/schema/processing/AddExtraFieldsToDocument.java
index 52652de81d4..67297245ff1 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/AddExtraFieldsToDocument.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/AddExtraFieldsToDocument.java
@@ -70,7 +70,7 @@ public class AddExtraFieldsToDocument extends Processor {
}
private void addSdField(Schema schema, SDDocumentType document, SDField field, boolean validate) {
- if (! field.hasIndex() && field.getAttributes().isEmpty()) {
+ if (! field.hasIndex() && field.getAttributes().isEmpty() && !field.doesSummarying()) {
return;
}
for (Attribute atr : field.getAttributes().values()) {
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/IndexingOutputs.java b/config-model/src/main/java/com/yahoo/schema/processing/IndexingOutputs.java
index 3dd2ffdb02e..071c2878ae8 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/IndexingOutputs.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/IndexingOutputs.java
@@ -137,6 +137,15 @@ public class IndexingOutputs extends Processor {
for (String fieldName : summaryFields) {
ret.add(new SummaryExpression(fieldName));
}
+ /*
+ * Write to summary field source. AddExtraFieldsToDocument processor adds the "copy"
+ * summary transform to summary fields without a corresponding explicitly declared
+ * document field (2023-11-01). Future vespa versions will stop adding document
+ * fields for those summary fields.
+ */
+ if (!summaryFields.contains(field.getName())) {
+ ret.add(new SummaryExpression(field.getName()));
+ }
} else {
throw new UnsupportedOperationException(exp.getClass().getName());
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/IndexingValidation.java b/config-model/src/main/java/com/yahoo/schema/processing/IndexingValidation.java
index 9a98958fca9..3f336544a99 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/IndexingValidation.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/IndexingValidation.java
@@ -131,10 +131,18 @@ public class IndexingValidation extends Processor {
} else if (exp instanceof SummaryExpression) {
SummaryField field = schema.getSummaryField(fieldName);
if (field == null) {
- throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
+ // Use document field if summary field is not found
+ SDField sdField = schema.getConcreteField(fieldName);
+ if (sdField != null && sdField.doesSummarying()) {
+ fieldDesc = "document field";
+ fieldType = sdField.getDataType();
+ } else {
+ throw new VerificationException(exp, "Summary field '" + fieldName + "' not found.");
+ }
+ } else {
+ fieldDesc = "summary field";
+ fieldType = field.getDataType();
}
- fieldDesc = "summary field";
- fieldType = field.getDataType();
} else {
throw new UnsupportedOperationException();
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java b/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
index 7dd968c5454..8ae3ec7a3fa 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/TextMatch.java
@@ -47,13 +47,7 @@ public class TextMatch extends Processor {
}
if (fieldType != DataType.STRING) continue;
- Set<String> dynamicSummary = new TreeSet<>();
- Set<String> staticSummary = new TreeSet<>();
- new IndexingOutputs(schema, deployLogger, rankProfileRegistry, queryProfiles).findSummaryTo(schema,
- field,
- dynamicSummary,
- staticSummary);
- MyVisitor visitor = new MyVisitor(dynamicSummary);
+ MyVisitor visitor = new MyVisitor();
visitor.visit(script);
if ( ! visitor.requiresTokenize) continue;
@@ -78,23 +72,15 @@ public class TextMatch extends Processor {
private static class MyVisitor extends ExpressionVisitor {
- final Set<String> dynamicSummaryFields;
boolean requiresTokenize = false;
- MyVisitor(Set<String> dynamicSummaryFields) {
- this.dynamicSummaryFields = dynamicSummaryFields;
- }
+ MyVisitor() { }
@Override
protected void doVisit(Expression exp) {
if (exp instanceof IndexExpression) {
requiresTokenize = true;
}
- if (exp instanceof SummaryExpression &&
- dynamicSummaryFields.contains(((SummaryExpression)exp).getFieldName()))
- {
- requiresTokenize = true;
- }
}
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java b/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
index 63eee474095..34dcc9139b3 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/TypedTransformProvider.java
@@ -39,9 +39,17 @@ public abstract class TypedTransformProvider extends ValueTransformProvider {
}
else if (exp instanceof SummaryExpression) {
Field field = schema.getSummaryField(fieldName);
- if (field == null)
- throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
- fieldType = field.getDataType();
+ if (field == null) {
+ // Use document field if summary field is not found
+ var sdField = schema.getConcreteField(fieldName);
+ if (sdField != null && sdField.doesSummarying()) {
+ fieldType = sdField.getDataType();
+ } else {
+ throw new IllegalArgumentException("Summary field '" + fieldName + "' not found.");
+ }
+ } else {
+ fieldType = field.getDataType();
+ }
}
else {
throw new UnsupportedOperationException();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
index b88ab0c5a45..1865db0ec1c 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
@@ -1,6 +1,7 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.storagecluster;
+import com.yahoo.config.model.api.ModelContext;
import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
@@ -10,10 +11,10 @@ import com.yahoo.vespa.model.content.cluster.ContentCluster;
*/
public class StorServerProducer implements StorServerConfig.Producer {
public static class Builder {
- StorServerProducer build(ModelElement element) {
+ StorServerProducer build(ModelContext.Properties properties, ModelElement element) {
ModelElement tuning = element.child("tuning");
- StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element));
+ StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element), properties.featureFlags());
if (tuning == null) return producer;
ModelElement merges = tuning.child("merges");
@@ -28,6 +29,7 @@ public class StorServerProducer implements StorServerConfig.Producer {
private final String clusterName;
private Integer maxMergesPerNode;
private Integer queueSize;
+ private Long mergingMaxMemoryUsagePerNode;
private StorServerProducer setMaxMergesPerNode(Integer value) {
if (value != null) {
@@ -42,8 +44,9 @@ public class StorServerProducer implements StorServerConfig.Producer {
return this;
}
- StorServerProducer(String clusterName) {
+ StorServerProducer(String clusterName, ModelContext.FeatureFlags featureFlags) {
this.clusterName = clusterName;
+ this.mergingMaxMemoryUsagePerNode = featureFlags.mergingMaxMemoryUsagePerNode();
}
@Override
@@ -60,5 +63,10 @@ public class StorServerProducer implements StorServerConfig.Producer {
if (queueSize != null) {
builder.max_merge_queue_size(queueSize);
}
+ if (mergingMaxMemoryUsagePerNode != null) {
+ builder.merge_throttling_memory_limit(
+ new StorServerConfig.Merge_throttling_memory_limit.Builder()
+ .max_usage_bytes(mergingMaxMemoryUsagePerNode));
+ }
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
index ce2899877a7..701da93a329 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
@@ -35,7 +35,7 @@ public class StorageCluster extends TreeConfigProducer<StorageNode>
return new StorageCluster(ancestor,
ContentCluster.getClusterId(clusterElem),
new FileStorProducer.Builder().build(deployState.getProperties(), cluster, clusterElem),
- new StorServerProducer.Builder().build(clusterElem),
+ new StorServerProducer.Builder().build(deployState.getProperties(), clusterElem),
new StorVisitorProducer.Builder().build(clusterElem),
new PersistenceProducer.Builder().build(clusterElem));
}
diff --git a/config-model/src/test/derived/bolding_dynamic_summary/documenttypes.cfg b/config-model/src/test/derived/bolding_dynamic_summary/documenttypes.cfg
index f88a22d8979..3e043d3dad8 100644
--- a/config-model/src/test/derived/bolding_dynamic_summary/documenttypes.cfg
+++ b/config-model/src/test/derived/bolding_dynamic_summary/documenttypes.cfg
@@ -48,14 +48,14 @@ doctype[].idx 10015
doctype[].internalid -877171244
doctype[].inherits[].idx 10000
doctype[].contentstruct 10016
-doctype[].fieldsets{[]}.fields[] "arr_1"
-doctype[].fieldsets{[]}.fields[] "arr_2"
-doctype[].fieldsets{[]}.fields[] "arr_3"
-doctype[].fieldsets{[]}.fields[] "arr_4"
-doctype[].fieldsets{[]}.fields[] "str_1"
-doctype[].fieldsets{[]}.fields[] "str_2"
-doctype[].fieldsets{[]}.fields[] "str_3"
-doctype[].fieldsets{[]}.fields[] "str_4"
+doctype[].fieldsets{[document]}.fields[] "arr_1"
+doctype[].fieldsets{[document]}.fields[] "arr_2"
+doctype[].fieldsets{[document]}.fields[] "arr_3"
+doctype[].fieldsets{[document]}.fields[] "arr_4"
+doctype[].fieldsets{[document]}.fields[] "str_1"
+doctype[].fieldsets{[document]}.fields[] "str_2"
+doctype[].fieldsets{[document]}.fields[] "str_3"
+doctype[].fieldsets{[document]}.fields[] "str_4"
doctype[].arraytype[].idx 10017
doctype[].arraytype[].elementtype 10012
doctype[].arraytype[].internalid -1486737430
diff --git a/config-model/src/test/derived/complex/ilscripts.cfg b/config-model/src/test/derived/complex/ilscripts.cfg
index f7f6c9dd720..6074333bd24 100644
--- a/config-model/src/test/derived/complex/ilscripts.cfg
+++ b/config-model/src/test/derived/complex/ilscripts.cfg
@@ -27,7 +27,7 @@ ilscript[].content[] "clear_state | guard { input location | tokenize normalize
ilscript[].content[] "clear_state | guard { input yEaR | to_array | attribute year_arr; }"
ilscript[].content[] "clear_state | guard { input yEaR - 1900 | attribute year_sub; }"
ilscript[].content[] "clear_state | guard { input title | tokenize normalize stem:\"BEST\" | index title | summary title; }"
-ilscript[].content[] "clear_state | guard { input dyntitle | tokenize normalize stem:\"BEST\" | summary dyntitle; }"
+ilscript[].content[] "clear_state | guard { input dyntitle | summary dyntitle; }"
ilscript[].content[] "clear_state | guard { input special1 | tokenize normalize | index special1; }"
ilscript[].content[] "clear_state | guard { input special2 | tokenize normalize | index special2; }"
ilscript[].content[] "clear_state | guard { input special3 | tokenize normalize | index special3; }"
diff --git a/config-model/src/test/derived/multiplesummaries/ilscripts.cfg b/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
index 6429932ab0e..514dfffac9d 100644
--- a/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
+++ b/config-model/src/test/derived/multiplesummaries/ilscripts.cfg
@@ -14,13 +14,13 @@ ilscript[].docfield[] "h"
ilscript[].docfield[] "loc"
ilscript[].docfield[] "mytags"
ilscript[].content[] "clear_state | guard { input loc | to_pos | zcurve | attribute loc_pos_zcurve; }"
-ilscript[].content[] "clear_state | guard { input a | tokenize normalize stem:\"BEST\" | summary abolded2 | summary aboldeddynamic | summary adynamic2 | attribute a; }"
-ilscript[].content[] "clear_state | guard { input adynamic | tokenize normalize stem:\"BEST\" | summary adynamic | attribute adynamic; }"
-ilscript[].content[] "clear_state | guard { input abolded | tokenize normalize stem:\"BEST\" | summary abolded | attribute abolded; }"
+ilscript[].content[] "clear_state | guard { input a | summary abolded2 | summary aboldeddynamic | summary adynamic2 | summary a | attribute a; }"
+ilscript[].content[] "clear_state | guard { input adynamic | summary adynamic | attribute adynamic; }"
+ilscript[].content[] "clear_state | guard { input abolded | summary abolded | attribute abolded; }"
ilscript[].content[] "clear_state | guard { input b | summary anotherb | summary b; }"
ilscript[].content[] "clear_state | guard { input c | summary c | attribute c; }"
-ilscript[].content[] "clear_state | guard { input d | tokenize normalize stem:\"BEST\" | summary d; }"
-ilscript[].content[] "clear_state | guard { input e | tokenize normalize stem:\"BEST\" | summary dynamice | summary e; }"
+ilscript[].content[] "clear_state | guard { input d | summary d; }"
+ilscript[].content[] "clear_state | guard { input e | summary dynamice | summary e; }"
ilscript[].content[] "clear_state | guard { input f | summary f; }"
ilscript[].content[] "clear_state | guard { input g | summary g; }"
ilscript[].content[] "clear_state | guard { input h | summary h; }"
diff --git a/config-model/src/test/derived/music/ilscripts.cfg b/config-model/src/test/derived/music/ilscripts.cfg
index 7a02d836db5..ba292c4013a 100644
--- a/config-model/src/test/derived/music/ilscripts.cfg
+++ b/config-model/src/test/derived/music/ilscripts.cfg
@@ -39,7 +39,7 @@ ilscript[].docfield[] "powermetalvalue"
ilscript[].docfield[] "progvalue"
ilscript[].content[] "clear_state | guard { input hiphopvalue | split \";\" | attribute hiphopvalue_arr; }"
ilscript[].content[] "clear_state | guard { input metalvalue | split \";\" | attribute metalvalue_arr; }"
-ilscript[].content[] "clear_state | guard { input bgndata | tokenize normalize stem:\"BEST\" | summary bgndata; }"
+ilscript[].content[] "clear_state | guard { input bgndata | summary bgndata; }"
ilscript[].content[] "clear_state | guard { input sales | summary sales | attribute sales; }"
ilscript[].content[] "clear_state | guard { input pto | summary pto | attribute pto; }"
ilscript[].content[] "clear_state | guard { input keys | tokenize normalize stem:\"BEST\" | index keys; }"
@@ -66,7 +66,7 @@ ilscript[].content[] "clear_state | guard { input artist | tokenize normalize st
ilscript[].content[] "clear_state | guard { input artistspid | summary artistspid; }"
ilscript[].content[] "clear_state | guard { input title | tokenize normalize stem:\"BEST\" | summary title | index title; }"
ilscript[].content[] "clear_state | guard { input newestedition | summary newestedition | attribute newestedition; }"
-ilscript[].content[] "clear_state | guard { input bgnpto | tokenize normalize stem:\"BEST\" | summary bgnpto; }"
+ilscript[].content[] "clear_state | guard { input bgnpto | summary bgnpto; }"
ilscript[].content[] "clear_state | guard { input year | summary year | attribute year; }"
ilscript[].content[] "clear_state | guard { input did | summary did | attribute did; }"
ilscript[].content[] "clear_state | guard { input scorekey | summary scorekey; }"
diff --git a/config-model/src/test/derived/newrank/ilscripts.cfg b/config-model/src/test/derived/newrank/ilscripts.cfg
index 6986f12f62a..ec46d9acc68 100644
--- a/config-model/src/test/derived/newrank/ilscripts.cfg
+++ b/config-model/src/test/derived/newrank/ilscripts.cfg
@@ -33,7 +33,7 @@ ilscript[].docfield[] "year"
ilscript[].docfield[] "did"
ilscript[].docfield[] "scorekey"
ilscript[].docfield[] "cbid"
-ilscript[].content[] "clear_state | guard { input bgndata | tokenize normalize stem:\"BEST\" | summary bgndata; }"
+ilscript[].content[] "clear_state | guard { input bgndata | summary bgndata; }"
ilscript[].content[] "clear_state | guard { input sales | summary sales | attribute sales; }"
ilscript[].content[] "clear_state | guard { input pto | summary pto | attribute pto; }"
ilscript[].content[] "clear_state | guard { input keys | tokenize normalize stem:\"BEST\" | index keys; }"
@@ -60,7 +60,7 @@ ilscript[].content[] "clear_state | guard { input artist | tokenize normalize st
ilscript[].content[] "clear_state | guard { input artistspid | summary artistspid; }"
ilscript[].content[] "clear_state | guard { input title | tokenize normalize stem:\"BEST\" | summary title | index title; }"
ilscript[].content[] "clear_state | guard { input newestedition | summary newestedition | attribute newestedition; }"
-ilscript[].content[] "clear_state | guard { input bgnpto | tokenize normalize stem:\"BEST\" | summary bgnpto; }"
+ilscript[].content[] "clear_state | guard { input bgnpto | summary bgnpto; }"
ilscript[].content[] "clear_state | guard { input year | summary year | attribute year; }"
ilscript[].content[] "clear_state | guard { input did | summary did | attribute did; }"
ilscript[].content[] "clear_state | guard { input scorekey | summary scorekey | attribute scorekey; }"
diff --git a/config-model/src/test/java/com/yahoo/schema/AbstractSchemaTestCase.java b/config-model/src/test/java/com/yahoo/schema/AbstractSchemaTestCase.java
index 55aa437dcb7..42005efaa8d 100644
--- a/config-model/src/test/java/com/yahoo/schema/AbstractSchemaTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/AbstractSchemaTestCase.java
@@ -47,6 +47,7 @@ public abstract class AbstractSchemaTestCase {
StringBuilder b = new StringBuilder();
try (BufferedReader r = IOUtils.createReader(file)) {
int character;
+ int lastChar = -1;
boolean lastWasNewline = false;
boolean inBrackets = false;
while (-1 != (character = r.read())) {
@@ -72,8 +73,9 @@ public abstract class AbstractSchemaTestCase {
inBrackets = false;
if (! inBrackets)
b.appendCodePoint(character);
- if (character == '[')
+ if (character == '[' && lastChar != '{')
inBrackets = true;
+ lastChar = character;
}
}
return b.toString();
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/AddExtraFieldsToDocumentTest.java b/config-model/src/test/java/com/yahoo/schema/processing/AddExtraFieldsToDocumentTest.java
index 43b403c42c6..aad6df62993 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/AddExtraFieldsToDocumentTest.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/AddExtraFieldsToDocumentTest.java
@@ -49,6 +49,29 @@ public class AddExtraFieldsToDocumentTest {
assertNull(schema.getDocument().getField("my_c"));
}
+ @Test
+ public void testExtraFieldIsAddedWhenBeingASummarySource() throws ParseException {
+ var sd = """
+ search renamed {
+ document renamed {
+ field foo type string { }
+ }
+ field bar type string {
+ indexing: input foo | summary
+ summary baz { }
+ }
+ field bar2 type string {
+ indexing: input foo
+ summary baz2 { }
+ }
+ }
+ """;
+ var builder = ApplicationBuilder.createFromString(sd);
+ var schema = builder.getSchema();
+ assertNotNull(schema.getDocument().getDocumentType().getField("bar"));
+ assertNull(schema.getDocument().getDocumentType().getField("bar2"));
+ }
+
private void assertSummary(Schema schema, String dsName, String name, SummaryTransform transform, String source) {
var docsum = schema.getSummary(dsName);
var field = docsum.getSummaryField(name);
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/IndexingOutputsTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/IndexingOutputsTestCase.java
index 58956b89a07..76680f4ce9a 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/IndexingOutputsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/IndexingOutputsTestCase.java
@@ -62,4 +62,23 @@ public class IndexingOutputsTestCase {
}
}
+ @Test
+ void requireThatSummaryFieldSourceIsPopulated() throws ParseException {
+ var sd = """
+ search renamed {
+ document renamed {
+ field foo type string { }
+ }
+ field bar type string {
+ indexing: input foo | summary
+ summary baz { }
+ summary dyn_baz { dynamic }
+ }
+ }
+ """;
+ var builder = ApplicationBuilder.createFromString(sd);
+ var schema = builder.getSchema();
+ assertEquals("{ input foo | summary baz | summary dyn_baz | summary bar; }",
+ schema.getConcreteField("bar").getIndexingScript().toString());
+ }
}
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
index 62f36a37d87..0b561563421 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/IndexingScriptRewriterTestCase.java
@@ -48,7 +48,7 @@ public class IndexingScriptRewriterTestCase extends AbstractSchemaTestCase {
void testDynamicSummaryRewriting() {
SDField field = createField("test", DataType.STRING, "{ summary }");
field.addSummaryField(createDynamicSummaryField(field, "dyn"));
- assertIndexingScript("{ input test | tokenize normalize stem:\"BEST\" | summary dyn; }", field);
+ assertIndexingScript("{ input test | summary dyn | summary test; }", field);
}
@Test
@@ -113,7 +113,7 @@ public class IndexingScriptRewriterTestCase extends AbstractSchemaTestCase {
"clear_state | guard { input chatter | tokenize normalize stem:\"BEST\" | index chatter; }",
"clear_state | guard { input description | tokenize normalize stem:\"BEST\" | summary description | summary dyndesc | index description; }",
"clear_state | guard { input exactemento_src | lowercase | tokenize normalize stem:\"BEST\" | index exactemento | summary exactemento; }",
- "clear_state | guard { input longdesc | tokenize normalize stem:\"BEST\" | summary dyndesc2 | summary dynlong | summary longdesc | summary longstat; }",
+ "clear_state | guard { input longdesc | summary dyndesc2 | summary dynlong | summary longdesc | summary longstat; }",
"clear_state | guard { input measurement | attribute measurement | summary measurement; }",
"clear_state | guard { input measurement | to_array | attribute measurement_arr; }",
"clear_state | guard { input popularity | attribute popularity; }",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
index 0c2a26d9c1d..6b58cac3f6c 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/search/IndexingScriptChangeValidatorTest.java
@@ -116,12 +116,10 @@ public class IndexingScriptChangeValidatorTest {
}
@Test
- void requireThatSettingDynamicSummaryRequireReindexing() throws Exception {
+ void requireThatSettingDynamicSummaryIsOk() throws Exception {
new Fixture(FIELD + " { indexing: summary }",
FIELD + " { indexing: summary \n summary: dynamic }").
- assertValidation(expectedReindexingAction("summary field 'f1' transform: 'none' -> 'dynamicteaser'",
- "{ input f1 | summary f1; }",
- "{ input f1 | tokenize normalize stem:\"BEST\" | summary f1; }"));
+ assertValidation();
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
index e1c4620e9b7..e7b2c549fa5 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
@@ -185,6 +185,36 @@ public class StorageClusterTest {
}
@Test
+ void merge_throttler_memory_limit_config_has_expected_defaults() {
+ var config = configFromProperties(new TestProperties());
+ var limit = config.merge_throttling_memory_limit();
+
+ assertEquals(-1L, limit.max_usage_bytes()); // TODO change default
+ assertMergeAutoScaleConfigHasExpectedValues(limit);
+ }
+
+ void assertMergeAutoScaleConfigHasExpectedValues(StorServerConfig.Merge_throttling_memory_limit limit) {
+ assertEquals(128L*1024*1024, limit.auto_lower_bound_bytes());
+ assertEquals(2L*1024*1024*1024, limit.auto_upper_bound_bytes());
+ assertEquals(0.03, limit.auto_phys_mem_scale_factor(), 0.000001);
+ }
+
+ @Test
+ void merge_throttler_memory_limit_is_controlled_by_feature_flag() {
+ var config = configFromProperties(new TestProperties().setMergingMaxMemoryUsagePerNode(-1));
+ assertEquals(-1L, config.merge_throttling_memory_limit().max_usage_bytes());
+
+ config = configFromProperties(new TestProperties().setMergingMaxMemoryUsagePerNode(0));
+ assertEquals(0L, config.merge_throttling_memory_limit().max_usage_bytes());
+
+ config = configFromProperties(new TestProperties().setMergingMaxMemoryUsagePerNode(1_234_456_789));
+ assertEquals(1_234_456_789L, config.merge_throttling_memory_limit().max_usage_bytes());
+
+ // Feature flag should not affect the other config values
+ assertMergeAutoScaleConfigHasExpectedValues(config.merge_throttling_memory_limit());
+ }
+
+ @Test
void testVisitors() {
StorVisitorConfig.Builder builder = new StorVisitorConfig.Builder();
parse(cluster("bees",
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 5ad8eee90e8..2564584a7df 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -209,6 +209,7 @@ public class ModelContextImpl implements ModelContext {
private final boolean dynamicHeapSize;
private final String unknownConfigDefinition;
private final int searchHandlerThreadpool;
+ private final long mergingMaxMemoryUsagePerNode;
public FeatureFlags(FlagSource source, ApplicationId appId, Version version) {
this.defaultTermwiseLimit = flagValue(source, appId, version, Flags.DEFAULT_TERM_WISE_LIMIT);
@@ -253,6 +254,7 @@ public class ModelContextImpl implements ModelContext {
this.dynamicHeapSize = flagValue(source, appId, version, Flags.DYNAMIC_HEAP_SIZE);
this.unknownConfigDefinition = flagValue(source, appId, version, Flags.UNKNOWN_CONFIG_DEFINITION);
this.searchHandlerThreadpool = flagValue(source, appId, version, Flags.SEARCH_HANDLER_THREADPOOL);
+ this.mergingMaxMemoryUsagePerNode = flagValue(source, appId, version, Flags.MERGING_MAX_MEMORY_USAGE_PER_NODE);
}
@Override public int heapSizePercentage() { return heapPercentage; }
@@ -305,6 +307,7 @@ public class ModelContextImpl implements ModelContext {
@Override public boolean dynamicHeapSize() { return dynamicHeapSize; }
@Override public String unknownConfigDefinition() { return unknownConfigDefinition; }
@Override public int searchHandlerThreadpool() { return searchHandlerThreadpool; }
+ @Override public long mergingMaxMemoryUsagePerNode() { return mergingMaxMemoryUsagePerNode; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, Version vespaVersion, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
index 44344b6d394..e45af84f6f0 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/HostedDeployNodeAllocationTest.java
@@ -26,6 +26,7 @@ import java.util.Set;
import java.util.stream.Collectors;
import static com.yahoo.vespa.config.server.deploy.DeployTester.createHostedModelFactory;
+import static java.math.BigDecimal.valueOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -79,7 +80,7 @@ public class HostedDeployNodeAllocationTest {
tester.deployApp("src/test/apps/hosted/", new PrepareParams.Builder()
.vespaVersion("7.3")
.containerEndpoints(endpoints)
- .quota(new Quota(Optional.of(4), Optional.of(0))));
+ .quota(new Quota(Optional.of(4), Optional.of(valueOf(0)))));
fail("Expected to get a QuotaExceededException");
} catch (QuotaExceededException e) {
assertEquals("main: The resources used cost $1.02 but your quota is $0.00: Contact support to upgrade your plan.", e.getMessage());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
index 867a958cbe3..7fcaae05224 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
@@ -25,6 +25,7 @@ import java.util.Optional;
import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_ID_PATH;
import static com.yahoo.vespa.config.server.session.SessionData.SESSION_DATA_PATH;
import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.SESSIONSTATE_ZK_SUBPATH;
+import static java.math.BigDecimal.valueOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -144,7 +145,7 @@ public class SessionZooKeeperClientTest {
@Test
public void require_quota_written_and_parsed() {
- var quota = Optional.of(new Quota(Optional.of(23), Optional.of(32)));
+ var quota = Optional.of(new Quota(Optional.of(23), Optional.of(valueOf(32))));
var zkc = createSessionZKClient(4);
zkc.writeQuota(quota);
assertEquals(quota, zkc.readQuota());
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ConsoleUrls.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ConsoleUrls.java
index 82cddb46d9a..e741fb8d203 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ConsoleUrls.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ConsoleUrls.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.Bill;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import java.net.URI;
@@ -25,6 +26,8 @@ public class ConsoleUrls {
this.root = root.toString().replaceFirst("/$", ""); // Remove trailing slash
}
+ public ConsoleUrls(String hostname) { this(URI.create("https://" + hostname)); }
+
public String root() {
return root;
}
@@ -40,6 +43,8 @@ public class ConsoleUrls {
public String tenantBilling(TenantName t) { return "%s/tenant/%s/account/billing".formatted(root, t.value()); }
+ public String tenantBilling(TenantName t, Bill.Id id) { return "%s/bill/%s".formatted(tenantBilling(t), id.value()); }
+
public String prodApplicationOverview(TenantName tenantName, ApplicationName applicationName) {
return "%s/tenant/%s/application/%s/prod/instance".formatted(root, tenantName.value(), applicationName.value());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillStatus.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillStatus.java
index 4f35b47219a..17698aff6f4 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillStatus.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillStatus.java
@@ -1,13 +1,15 @@
package com.yahoo.vespa.hosted.controller.api.integration.billing;
+import java.util.Arrays;
+
/**
* @author gjoranv
*/
public enum BillStatus {
- OPEN, // All bills start in this state. The bill can be modified and exported/synced to external systems.
- FROZEN, // Syncing to external systems is switched off. No changes can be made.
- CLOSED, // End state for a valid bill.
- VOID; // End state, indicating that the bill is not valid.
+ OPEN, // All bills start in this state. The bill can be modified and exported/synced to external systems.
+ FROZEN, // Syncing to external systems is switched off. No changes can be made.
+ SUCCESSFUL, // Final state for a valid bill.
+ VOID; // Final state, indicating that the bill is not valid.
// Legacy states, used by historical bills
private static final String LEGACY_ISSUED = "ISSUED";
@@ -24,10 +26,21 @@ public enum BillStatus {
return value;
}
+ /**
+ * Returns true if the bill is in a final state.
+ */
+ public boolean isFinal() {
+ return this == SUCCESSFUL || this == VOID;
+ }
+
public static BillStatus from(String status) {
if (LEGACY_ISSUED.equals(status) || LEGACY_EXPORTED.equals(status)) return OPEN;
if (LEGACY_CANCELED.equals(status)) return VOID;
- return Enum.valueOf(BillStatus.class, status.toUpperCase());
+
+ return Arrays.stream(values())
+ .filter(s -> s.value.equals(status))
+ .findFirst()
+ .orElseThrow(() -> new IllegalArgumentException("Unknown bill status: " + status));
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingReporterMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingReporterMock.java
index 899b31da361..21efa954cb0 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingReporterMock.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/BillingReporterMock.java
@@ -16,7 +16,7 @@ public class BillingReporterMock implements BillingReporter {
private final Clock clock;
private final BillingDatabaseClient dbClient;
- private final Map<Bill.Id, String> exportedBills = new HashMap<>();
+ private final Map<Bill.Id, InvoiceUpdate> exportedBills = new HashMap<>();
public BillingReporterMock(Clock clock, BillingDatabaseClient dbClient) {
this.clock = clock;
@@ -30,28 +30,58 @@ public class BillingReporterMock implements BillingReporter {
@Override
public InvoiceUpdate maintainInvoice(CloudTenant tenant, Bill bill) {
- if (exportedBills.containsKey(bill.id())) {
+ if (! exportedBills.containsKey(bill.id())) {
+ // Given that it has been exported earlier (caller's responsibility), we can assume it has been removed.
+ return InvoiceUpdate.removed(bill.id());
+ }
+ if (exportedBills.get(bill.id()).type() == InvoiceUpdate.Type.MODIFIED) {
+ // modifyInvoice() has been called -> add a marker line item
+ if (bill.status() != BillStatus.OPEN) throw new IllegalArgumentException("Bill should be OPEN");
dbClient.addLineItem(bill.tenant(), maintainedMarkerItem(), Optional.of(bill.id()));
- return ModifiableInvoiceUpdate.of(bill.id(), 1, 0, 0);
- } else {
- return FailedInvoiceUpdate.removed(bill.id());
}
+ return exportedBills.get(bill.id());
}
@Override
public String exportBill(Bill bill, String exportMethod, CloudTenant tenant) {
// Replace bill with a copy with exportedId set
var exportedId = "EXPORTED-" + bill.id().value();
- exportedBills.put(bill.id(), exportedId);
+ exportedBills.put(bill.id(), InvoiceUpdate.modifiable(bill.id(), null));
dbClient.setExportedInvoiceId(bill.id(), exportedId);
return exportedId;
}
+ public void modifyInvoice(Bill.Id billId) {
+ ensureExported(billId);
+ var itemsUpdate = new InvoiceUpdate.ItemsUpdate(1, 0, 0);
+ exportedBills.put(billId, InvoiceUpdate.modifiable(billId, itemsUpdate));
+ }
+
+ public void freezeInvoice(Bill.Id billId) {
+ ensureExported(billId);
+ exportedBills.put(billId, InvoiceUpdate.unmodifiable(billId));
+ }
+
+ public void payInvoice(Bill.Id billId) {
+ ensureExported(billId);
+ exportedBills.put(billId, InvoiceUpdate.paid(billId));
+ }
+
+ public void voidInvoice(Bill.Id billId) {
+ ensureExported(billId);
+ exportedBills.put(billId, InvoiceUpdate.voided(billId));
+ }
+
// Emulates deleting a bill in the external system.
- public void deleteExportedBill(Bill.Id billId) {
+ public void deleteInvoice(Bill.Id billId) {
+ ensureExported(billId);
exportedBills.remove(billId);
}
+ private void ensureExported(Bill.Id billId) {
+ if (! exportedBills.containsKey(billId)) throw new IllegalArgumentException("Bill not exported");
+ }
+
private static Bill.LineItem maintainedMarkerItem() {
return new Bill.LineItem("maintained", "", BigDecimal.valueOf(0.0), "", "", ZonedDateTime.now());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/FailedInvoiceUpdate.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/FailedInvoiceUpdate.java
deleted file mode 100644
index 9a93c7b05ec..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/FailedInvoiceUpdate.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package com.yahoo.vespa.hosted.controller.api.integration.billing;
-
-/**
- * @author gjoranv
- */
-public class FailedInvoiceUpdate extends InvoiceUpdate {
-
- public enum Reason {
- UNMODIFIABLE,
- REMOVED
- }
-
- public final Reason reason;
-
- public FailedInvoiceUpdate(Bill.Id billId, Reason reason) {
- super(billId, ItemsUpdate.empty());
- this.reason = reason;
- }
-
- public static FailedInvoiceUpdate unmodifiable(Bill.Id billId) {
- return new FailedInvoiceUpdate(billId, Reason.UNMODIFIABLE);
- }
-
- public static FailedInvoiceUpdate removed(Bill.Id billId) {
- return new FailedInvoiceUpdate(billId, Reason.REMOVED);
- }
-
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/InvoiceUpdate.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/InvoiceUpdate.java
index bb76834a483..4dfd9ef1fee 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/InvoiceUpdate.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/InvoiceUpdate.java
@@ -1,39 +1,54 @@
package com.yahoo.vespa.hosted.controller.api.integration.billing;
-import java.util.Objects;
+import java.util.Optional;
/**
- * Helper to track changes to an invoice.
+ * Helper to track changes to an invoice made by the controller. This should be independent
+ * of which external system that is being used.
*
* @author gjoranv
*/
-public abstract class InvoiceUpdate {
+public record InvoiceUpdate(Bill.Id billId, Type type, Optional<ItemsUpdate> itemsUpdate) {
+
+ public enum Type {
+ UNMODIFIED, // The invoice was modifiable, but not modified by us
+ MODIFIED, // The invoice was modified by us
+ UNMODIFIABLE, // The invoice was unmodifiable in the external system
+ REMOVED, // Removed from the external system, presumably for a valid reason
+ PAID, // Reported paid from the external system
+ VOIDED // Voided in the external system
+ }
+
+ public InvoiceUpdate {
+ if (type != Type.MODIFIED && itemsUpdate.isPresent())
+ throw new IllegalArgumentException("Items update is only allowed for modified invoices. Update type was " + type);
+ }
- final Bill.Id billId;
- final ItemsUpdate itemsUpdate;
+ public static InvoiceUpdate modifiable(Bill.Id billId, ItemsUpdate itemsUpdate) {
+ if (itemsUpdate == null || itemsUpdate.isEmpty()) {
+ return new InvoiceUpdate(billId, Type.UNMODIFIED, Optional.empty());
+ } else {
+ return new InvoiceUpdate(billId, Type.MODIFIED, Optional.of(itemsUpdate));
+ }
+ }
- InvoiceUpdate(Bill.Id billId, ItemsUpdate itemsUpdate) {
- this.billId = billId;
- this.itemsUpdate = itemsUpdate;
+ public static InvoiceUpdate unmodifiable(Bill.Id billId) {
+ return new InvoiceUpdate(billId, Type.UNMODIFIABLE, Optional.empty());
}
- public Bill.Id billId() {
- return billId;
+ public static InvoiceUpdate removed(Bill.Id billId) {
+ return new InvoiceUpdate(billId, Type.REMOVED, Optional.empty());
}
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- InvoiceUpdate that = (InvoiceUpdate) o;
- return Objects.equals(billId, that.billId) && Objects.equals(itemsUpdate, that.itemsUpdate);
+ public static InvoiceUpdate paid(Bill.Id billId) {
+ return new InvoiceUpdate(billId, Type.PAID, Optional.empty());
}
- @Override
- public int hashCode() {
- return Objects.hash(billId, itemsUpdate);
+ public static InvoiceUpdate voided(Bill.Id billId) {
+ return new InvoiceUpdate(billId, Type.VOIDED, Optional.empty());
}
+
public record ItemsUpdate(int itemsAdded, int itemsRemoved, int itemsModified) {
public boolean isEmpty() {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/ModifiableInvoiceUpdate.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/ModifiableInvoiceUpdate.java
deleted file mode 100644
index 75cce564fc7..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/ModifiableInvoiceUpdate.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package com.yahoo.vespa.hosted.controller.api.integration.billing;
-
-/**
- * @author gjoranv
- */
-public class ModifiableInvoiceUpdate extends InvoiceUpdate {
-
- public ModifiableInvoiceUpdate(Bill.Id billId, ItemsUpdate itemsUpdate) {
- super(billId, itemsUpdate);
- }
-
- public ItemsUpdate itemsUpdate() {
- return itemsUpdate;
- }
-
- public boolean isEmpty() {
- return itemsUpdate.isEmpty();
- }
-
- public static ModifiableInvoiceUpdate of(Bill.Id billId, int itemsAdded, int itemsRemoved, int itemsModified) {
- return new ModifiableInvoiceUpdate(billId, new ItemsUpdate(itemsAdded, itemsRemoved, itemsModified));
- }
-
- @Override
- public boolean equals(Object o) {
- return super.equals(o);
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistory.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistory.java
index f0c7f806c8c..788995555a8 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistory.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistory.java
@@ -53,7 +53,7 @@ public class StatusHistory {
return switch(current) {
case OPEN -> true;
case FROZEN -> newStatus != BillStatus.OPEN; // This could be subject to change.
- case CLOSED -> newStatus == BillStatus.CLOSED;
+ case SUCCESSFUL -> newStatus == BillStatus.SUCCESSFUL;
case VOID -> newStatus == BillStatus.VOID;
};
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 54f53d64f76..9a3ea71660b 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -253,7 +253,9 @@ enum PathGroup {
emailVerification("/user/v1/email/verify"),
/** Path used for dataplane token */
- dataplaneToken(Matcher.tenant,"/application/v4/tenant/{tenant}/token", "/application/v4/tenant/{tenant}/token/{ignored}");
+ dataplaneToken(Matcher.tenant,"/application/v4/tenant/{tenant}/token", "/application/v4/tenant/{tenant}/token/{ignored}"),
+
+ termsOfService(Matcher.tenant, "/application/v4/tenant/{tenant}/terms-of-service");
final List<String> pathSpecs;
final List<Matcher> matchers;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
index d1a8b2ef0c3..0468be5f30c 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
@@ -191,6 +191,10 @@ enum Policy {
dataplaneToken(Privilege.grant(Action.all())
.on(PathGroup.dataplaneToken)
+ .in(SystemName.PublicCd, SystemName.Public)),
+
+ termsOfService(Privilege.grant(Action.create, Action.delete)
+ .on(PathGroup.termsOfService)
.in(SystemName.PublicCd, SystemName.Public));
private final Set<Privilege> privileges;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
index 31c8560c908..0b5359ac826 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
@@ -69,7 +69,8 @@ public enum RoleDefinition {
Policy.applicationManager,
Policy.keyRevokal,
Policy.billingInformationRead,
- Policy.accessRequests
+ Policy.accessRequests,
+ Policy.termsOfService
),
/** Headless — the application specific role identified by deployment keys for production */
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TenantBilling.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TenantBilling.java
index 6e3b26661e5..1db84240fe2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TenantBilling.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TenantBilling.java
@@ -13,17 +13,21 @@ public class TenantBilling {
private final TaxId taxId;
private final PurchaseOrder purchaseOrder;
private final Email invoiceEmail;
+ private final TermsOfServiceApproval tosApproval;
- public TenantBilling(TenantContact contact, TenantAddress address, TaxId taxId, PurchaseOrder purchaseOrder, Email invoiceEmail) {
+ public TenantBilling(TenantContact contact, TenantAddress address, TaxId taxId, PurchaseOrder purchaseOrder,
+ Email invoiceEmail, TermsOfServiceApproval tosApproval) {
this.contact = Objects.requireNonNull(contact);
this.address = Objects.requireNonNull(address);
this.taxId = Objects.requireNonNull(taxId);
this.purchaseOrder = Objects.requireNonNull(purchaseOrder);
this.invoiceEmail = Objects.requireNonNull(invoiceEmail);
+ this.tosApproval = Objects.requireNonNull(tosApproval);
}
public static TenantBilling empty() {
- return new TenantBilling(TenantContact.empty(), TenantAddress.empty(), TaxId.empty(), PurchaseOrder.empty(), Email.empty());
+ return new TenantBilling(TenantContact.empty(), TenantAddress.empty(), TaxId.empty(), PurchaseOrder.empty(),
+ Email.empty(), TermsOfServiceApproval.empty());
}
public TenantContact contact() {
@@ -46,24 +50,30 @@ public class TenantBilling {
return invoiceEmail;
}
+ public TermsOfServiceApproval getToSApproval() { return tosApproval; }
+
public TenantBilling withContact(TenantContact updatedContact) {
- return new TenantBilling(updatedContact, this.address, this.taxId, this.purchaseOrder, this.invoiceEmail);
+ return new TenantBilling(updatedContact, this.address, this.taxId, this.purchaseOrder, this.invoiceEmail, tosApproval);
}
public TenantBilling withAddress(TenantAddress updatedAddress) {
- return new TenantBilling(this.contact, updatedAddress, this.taxId, this.purchaseOrder, this.invoiceEmail);
+ return new TenantBilling(this.contact, updatedAddress, this.taxId, this.purchaseOrder, this.invoiceEmail, tosApproval);
}
public TenantBilling withTaxId(TaxId updatedTaxId) {
- return new TenantBilling(this.contact, this.address, updatedTaxId, this.purchaseOrder, this.invoiceEmail);
+ return new TenantBilling(this.contact, this.address, updatedTaxId, this.purchaseOrder, this.invoiceEmail, tosApproval);
}
public TenantBilling withPurchaseOrder(PurchaseOrder updatedPurchaseOrder) {
- return new TenantBilling(this.contact, this.address, this.taxId, updatedPurchaseOrder, this.invoiceEmail);
+ return new TenantBilling(this.contact, this.address, this.taxId, updatedPurchaseOrder, this.invoiceEmail, tosApproval);
}
public TenantBilling withInvoiceEmail(Email updatedInvoiceEmail) {
- return new TenantBilling(this.contact, this.address, this.taxId, this.purchaseOrder, updatedInvoiceEmail);
+ return new TenantBilling(this.contact, this.address, this.taxId, this.purchaseOrder, updatedInvoiceEmail, tosApproval);
+ }
+
+ public TenantBilling withToSApproval(TermsOfServiceApproval approval) {
+ return new TenantBilling(contact, address, taxId, purchaseOrder, invoiceEmail, approval);
}
public boolean isEmpty() {
@@ -75,16 +85,14 @@ public class TenantBilling {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TenantBilling that = (TenantBilling) o;
- return Objects.equals(contact, that.contact) &&
- Objects.equals(address, that.address) &&
- Objects.equals(taxId, that.taxId) &&
- Objects.equals(purchaseOrder, that.purchaseOrder) &&
- Objects.equals(invoiceEmail, that.invoiceEmail);
+ return Objects.equals(contact, that.contact) && Objects.equals(address, that.address)
+ && Objects.equals(taxId, that.taxId) && Objects.equals(purchaseOrder, that.purchaseOrder)
+ && Objects.equals(invoiceEmail, that.invoiceEmail) && Objects.equals(tosApproval, that.tosApproval);
}
@Override
public int hashCode() {
- return Objects.hash(contact, address, taxId, purchaseOrder, invoiceEmail);
+ return Objects.hash(contact, address, taxId, purchaseOrder, invoiceEmail, tosApproval);
}
@Override
@@ -92,9 +100,10 @@ public class TenantBilling {
return "TenantBilling{" +
"contact=" + contact +
", address=" + address +
- ", taxId='" + taxId + '\'' +
- ", purchaseOrder='" + purchaseOrder + '\'' +
+ ", taxId=" + taxId +
+ ", purchaseOrder=" + purchaseOrder +
", invoiceEmail=" + invoiceEmail +
+ ", tosApproval=" + tosApproval +
'}';
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TermsOfServiceApproval.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TermsOfServiceApproval.java
new file mode 100644
index 00000000000..61fba17c473
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/tenant/TermsOfServiceApproval.java
@@ -0,0 +1,30 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.tenant;
+
+import com.yahoo.vespa.hosted.controller.api.role.SimplePrincipal;
+
+import java.time.Instant;
+import java.util.Optional;
+
+/**
+ * @author bjorncs
+ */
+public record TermsOfServiceApproval(Instant approvedAt, Optional<SimplePrincipal> approvedBy) {
+
+ public TermsOfServiceApproval(Instant at, SimplePrincipal by) { this(at, Optional.of(by)); }
+
+ public TermsOfServiceApproval(String at, String by) {
+ this(at.isBlank() ? Instant.EPOCH : Instant.parse(at), by.isBlank() ? Optional.empty() : Optional.of(new SimplePrincipal(by)));
+ }
+
+ public TermsOfServiceApproval {
+ if (approvedBy.isEmpty() && !Instant.EPOCH.equals(approvedAt))
+ throw new IllegalArgumentException("Missing approver");
+ }
+
+ public static TermsOfServiceApproval empty() { return new TermsOfServiceApproval(Instant.EPOCH, Optional.empty()); }
+
+ public boolean hasApproved() { return approvedBy.isPresent(); }
+ public boolean isEmpty() { return approvedBy.isEmpty() && Instant.EPOCH.equals(approvedAt); }
+}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistoryTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistoryTest.java
index 46a4c7e199c..8318a0449ea 100644
--- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistoryTest.java
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/billing/StatusHistoryTest.java
@@ -21,7 +21,7 @@ public class StatusHistoryTest {
void open_can_change_to_any_status() {
var history = StatusHistory.open(clock);
history.checkValidTransition(BillStatus.FROZEN);
- history.checkValidTransition(BillStatus.CLOSED);
+ history.checkValidTransition(BillStatus.SUCCESSFUL);
history.checkValidTransition(BillStatus.VOID);
}
@@ -29,7 +29,7 @@ public class StatusHistoryTest {
void frozen_cannot_change_to_open() {
var history = new StatusHistory(historyWith(BillStatus.FROZEN));
- history.checkValidTransition(BillStatus.CLOSED);
+ history.checkValidTransition(BillStatus.SUCCESSFUL);
history.checkValidTransition(BillStatus.VOID);
assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.OPEN));
@@ -37,7 +37,7 @@ public class StatusHistoryTest {
@Test
void closed_cannot_change() {
- var history = new StatusHistory(historyWith(BillStatus.CLOSED));
+ var history = new StatusHistory(historyWith(BillStatus.SUCCESSFUL));
assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.OPEN));
assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.FROZEN));
@@ -50,7 +50,7 @@ public class StatusHistoryTest {
assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.OPEN));
assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.FROZEN));
- assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.CLOSED));
+ assertThrows(IllegalArgumentException.class, () -> history.checkValidTransition(BillStatus.SUCCESSFUL));
}
@Test
@@ -61,8 +61,8 @@ public class StatusHistoryTest {
history = new StatusHistory(historyWith(BillStatus.FROZEN));
history.checkValidTransition(BillStatus.FROZEN);
- history = new StatusHistory(historyWith(BillStatus.CLOSED));
- history.checkValidTransition(BillStatus.CLOSED);
+ history = new StatusHistory(historyWith(BillStatus.SUCCESSFUL));
+ history.checkValidTransition(BillStatus.SUCCESSFUL);
history = new StatusHistory(historyWith(BillStatus.VOID));
history.checkValidTransition(BillStatus.VOID);
@@ -71,12 +71,12 @@ public class StatusHistoryTest {
@Test
void it_validates_status_history_in_constructor() {
assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.FROZEN, BillStatus.OPEN)));
- assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.CLOSED, BillStatus.OPEN)));
- assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.CLOSED, BillStatus.FROZEN)));
- assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.CLOSED, BillStatus.VOID)));
+ assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.SUCCESSFUL, BillStatus.OPEN)));
+ assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.SUCCESSFUL, BillStatus.FROZEN)));
+ assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.SUCCESSFUL, BillStatus.VOID)));
assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.VOID, BillStatus.OPEN)));
assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.VOID, BillStatus.FROZEN)));
- assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.VOID, BillStatus.CLOSED)));
+ assertThrows(IllegalArgumentException.class, () -> new StatusHistory(historyWith(BillStatus.VOID, BillStatus.SUCCESSFUL)));
}
private SortedMap<ZonedDateTime, BillStatus> historyWith(BillStatus... statuses) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainer.java
index 5c37e0e4d0b..7434fce31bf 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainer.java
@@ -10,9 +10,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.billing.BillStatus;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingDatabaseClient;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingReporter;
-import com.yahoo.vespa.hosted.controller.api.integration.billing.FailedInvoiceUpdate;
import com.yahoo.vespa.hosted.controller.api.integration.billing.InvoiceUpdate;
-import com.yahoo.vespa.hosted.controller.api.integration.billing.ModifiableInvoiceUpdate;
import com.yahoo.vespa.hosted.controller.api.integration.billing.Plan;
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanRegistry;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
@@ -73,18 +71,35 @@ public class BillingReportMaintainer extends ControllerMaintainer {
var tenants = cloudTenants();
var billsNeedingMaintenance = databaseClient.readBills().stream()
.filter(bill -> bill.getExportedId().isPresent())
- .filter(exported -> exported.status() == BillStatus.OPEN)
+ .filter(exported -> ! exported.status().isFinal())
.toList();
for (var bill : billsNeedingMaintenance) {
var exportedId = bill.getExportedId().orElseThrow();
var update = reporter.maintainInvoice(tenants.get(bill.tenant()), bill);
- if (update instanceof ModifiableInvoiceUpdate modifiable && ! modifiable.isEmpty()) {
- log.fine(invoiceMessage(bill.id(), exportedId) + " was updated with " + modifiable.itemsUpdate());
- } else if (update instanceof FailedInvoiceUpdate failed && failed.reason == FailedInvoiceUpdate.Reason.REMOVED) {
- log.fine(invoiceMessage(bill.id(), exportedId) + " has been deleted in the external system");
- // Reset the exportedId to null, so that we don't maintain it again
- databaseClient.setExportedInvoiceId(bill.id(), null);
+ switch (update.type()) {
+ case UNMODIFIED -> log.finer(() ->invoiceMessage(bill.id(), exportedId) + " was not modified");
+ case MODIFIED -> log.fine(invoiceMessage(bill.id(), exportedId) + " was updated with " + update.itemsUpdate().get());
+ case UNMODIFIABLE -> {
+ // This check is needed to avoid setting the status multiple times
+ if (bill.status() != BillStatus.FROZEN) {
+ log.fine(() -> invoiceMessage(bill.id(), exportedId) + " is now unmodifiable");
+ databaseClient.setStatus(bill.id(), "system", BillStatus.FROZEN);
+ }
+ }
+ case REMOVED -> {
+ log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been deleted in the external system");
+ // Reset the exportedId to null, so that we don't maintain it again
+ databaseClient.setExportedInvoiceId(bill.id(), null);
+ }
+ case PAID -> {
+ log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been paid in the external system");
+ databaseClient.setStatus(bill.id(), "system", BillStatus.SUCCESSFUL);
+ }
+ case VOIDED -> {
+ log.fine(() -> invoiceMessage(bill.id(), exportedId) + " has been voided in the external system");
+ databaseClient.setStatus(bill.id(), "system", BillStatus.VOID);
+ }
}
updates.add(update);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
index 7801efe504b..961925cf620 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
@@ -36,6 +36,7 @@ import com.yahoo.vespa.hosted.controller.tenant.TenantBilling;
import com.yahoo.vespa.hosted.controller.tenant.TenantContact;
import com.yahoo.vespa.hosted.controller.tenant.TenantContacts;
import com.yahoo.vespa.hosted.controller.tenant.TenantInfo;
+import com.yahoo.vespa.hosted.controller.tenant.TermsOfServiceApproval;
import java.net.URI;
import java.security.Principal;
@@ -101,6 +102,9 @@ public class TenantSerializer {
private static final String taxIdCodeField = "code";
private static final String purchaseOrderField = "purchaseOrder";
private static final String invoiceEmailField = "invoiceEmail";
+ private static final String tosApprovalField = "tosApproval";
+ private static final String tosApprovalAtField = "at";
+ private static final String tosApprovalByField = "by";
private static final String awsIdField = "awsId";
private static final String roleField = "role";
@@ -292,6 +296,7 @@ public class TenantSerializer {
private TenantBilling tenantInfoBillingContactFromSlime(Inspector billingObject) {
var taxIdInspector = billingObject.field(taxIdField);
var taxId = switch (taxIdInspector.type()) {
+ // TODO(bjorncs, 2023-11-02): Remove legacy tax id format
case STRING -> TaxId.legacy(taxIdInspector.asString());
case OBJECT -> {
var taxIdCountry = taxIdInspector.field(taxIdCountryField).asString();
@@ -304,6 +309,13 @@ public class TenantSerializer {
};
var purchaseOrder = new PurchaseOrder(billingObject.field(purchaseOrderField).asString());
var invoiceEmail = new Email(billingObject.field(invoiceEmailField).asString(), false);
+ var tosApprovalInspector = billingObject.field(tosApprovalField);
+ var tosApproval = switch (tosApprovalInspector.type()) {
+ case OBJECT -> new TermsOfServiceApproval(tosApprovalInspector.field(tosApprovalAtField).asString(),
+ tosApprovalInspector.field(tosApprovalByField).asString());
+ case NIX -> TermsOfServiceApproval.empty();
+ default -> throw new IllegalArgumentException(taxIdInspector.type().name());
+ };
return TenantBilling.empty()
.withContact(TenantContact.from(
@@ -313,7 +325,8 @@ public class TenantSerializer {
.withAddress(tenantInfoAddressFromSlime(billingObject.field("address")))
.withTaxId(taxId)
.withPurchaseOrder(purchaseOrder)
- .withInvoiceEmail(invoiceEmail);
+ .withInvoiceEmail(invoiceEmail)
+ .withToSApproval(tosApproval);
}
private List<TenantSecretStore> secretStoresFromSlime(Inspector secretStoresObject) {
@@ -382,6 +395,11 @@ public class TenantSerializer {
billingCursor.setString(purchaseOrderField, billingContact.getPurchaseOrder().value());
billingCursor.setString(invoiceEmailField, billingContact.getInvoiceEmail().getEmailAddress());
toSlime(billingContact.address(), billingCursor);
+ if (!billingContact.getToSApproval().isEmpty()) {
+ var tosApprovalCursor = billingCursor.setObject(tosApprovalField);
+ tosApprovalCursor.setString(tosApprovalAtField, billingContact.getToSApproval().approvedAt().toString());
+ tosApprovalCursor.setString(tosApprovalByField, billingContact.getToSApproval().approvedBy().get().getName());
+ }
}
private void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor parentCursor) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 1fd8e7c8f3b..ebcc81ab756 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -87,6 +87,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretSto
import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.vespa.hosted.controller.api.role.SimplePrincipal;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -135,6 +136,7 @@ import com.yahoo.vespa.hosted.controller.tenant.TenantBilling;
import com.yahoo.vespa.hosted.controller.tenant.TenantContact;
import com.yahoo.vespa.hosted.controller.tenant.TenantContacts;
import com.yahoo.vespa.hosted.controller.tenant.TenantInfo;
+import com.yahoo.vespa.hosted.controller.tenant.TermsOfServiceApproval;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
@@ -386,6 +388,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private HttpResponse handlePOST(Path path, HttpRequest request) {
if (path.matches("/application/v4/tenant/{tenant}")) return createTenant(path.get("tenant"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/terms-of-service")) return approveTermsOfService(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/key")) return addDeveloperKey(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/token/{tokenid}")) return generateToken(path.get("tenant"), path.get("tokenid"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return createApplication(path.get("tenant"), path.get("application"), request);
@@ -434,6 +437,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if (path.matches("/application/v4/tenant/{tenant}/archive-access/aws")) return removeAwsArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/archive-access/gcp")) return removeGcpArchiveAccess(path.get("tenant"));
if (path.matches("/application/v4/tenant/{tenant}/secret-store/{name}")) return deleteSecretStore(path.get("tenant"), path.get("name"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/terms-of-service")) return unapproveTermsOfService(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/token/{tokenid}")) return deleteToken(path.get("tenant"), path.get("tokenid"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return removeAllProdDeployments(path.get("tenant"), path.get("application"));
@@ -702,6 +706,10 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
taxIdCursor.setString("code", billingContact.getTaxId().code().value());
root.setString("purchaseOrder", billingContact.getPurchaseOrder().value());
root.setString("invoiceEmail", billingContact.getInvoiceEmail().getEmailAddress());
+ var tosApprovalCursor = root.setObject("tosApproval");
+ var tosApproval = billingContact.getToSApproval();
+ tosApprovalCursor.setString("at", !tosApproval.isEmpty() ? tosApproval.approvedAt().toString() : "");
+ tosApprovalCursor.setString("by", !tosApproval.isEmpty() ? tosApproval.approvedBy().get().getName() : "");
toSlime(billingContact.address(), root); // will create "address" on the parent
}
@@ -812,6 +820,10 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
billingCursor.setString("purchaseOrder", billingContact.getPurchaseOrder().value());
billingCursor.setString("invoiceEmail", billingContact.getInvoiceEmail().getEmailAddress());
toSlime(billingContact.address(), billingCursor);
+ var tosApprovalCursor = billingCursor.setObject("tosApproval");
+ var tosApproval = billingContact.getToSApproval();
+ tosApprovalCursor.setString("at", !tosApproval.isEmpty() ? tosApproval.approvedAt().toString() : "");
+ tosApprovalCursor.setString("by", !tosApproval.isEmpty() ? tosApproval.approvedBy().get().getName() : "");
}
private void toSlime(TenantContacts contacts, Cursor parentCursor) {
@@ -1239,6 +1251,31 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
+ private HttpResponse approveTermsOfService(String tenant, HttpRequest req) {
+ if (controller.tenants().require(TenantName.from(tenant)).type() != Tenant.Type.cloud)
+ throw new IllegalArgumentException("Tenant '" + tenant + "' is not a cloud tenant");
+ var approvedBy = SimplePrincipal.of(req.getJDiscRequest().getUserPrincipal());
+ var approvedAt = controller.clock().instant();
+
+ controller.tenants().lockOrThrow(TenantName.from(tenant), LockedTenant.Cloud.class, t -> {
+ var updatedTenant = t.withInfo(t.get().info().withBilling(t.get().info().billingContact().withToSApproval(
+ new TermsOfServiceApproval(approvedAt, approvedBy))));
+ controller.tenants().store(updatedTenant);
+ });
+ return new MessageResponse("Terms of service approved by %s".formatted(approvedBy.getName()));
+ }
+
+ private HttpResponse unapproveTermsOfService(String tenant, HttpRequest req) {
+ if (controller.tenants().require(TenantName.from(tenant)).type() != Tenant.Type.cloud)
+ throw new IllegalArgumentException("Tenant '" + tenant + "' is not a cloud tenant");
+ controller.tenants().lockOrThrow(TenantName.from(tenant), LockedTenant.Cloud.class, t -> {
+ var updatedTenant = t.withInfo(t.get().info().withBilling(t.get().info().billingContact().withToSApproval(
+ TermsOfServiceApproval.empty())));
+ controller.tenants().store(updatedTenant);
+ });
+ return new MessageResponse("Terms of service approval removed");
+ }
+
private HttpResponse addDeveloperKey(String tenantName, HttpRequest request) {
if (controller.tenants().require(TenantName.from(tenantName)).type() != Tenant.Type.cloud)
throw new IllegalArgumentException("Tenant '" + tenantName + "' is not a cloud tenant");
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainerTest.java
index 8d1848539f0..6adabad557d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/BillingReportMaintainerTest.java
@@ -4,10 +4,11 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.Bill;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillStatus;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingDatabaseClient;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingReporterMock;
-import com.yahoo.vespa.hosted.controller.api.integration.billing.FailedInvoiceUpdate;
-import com.yahoo.vespa.hosted.controller.api.integration.billing.ModifiableInvoiceUpdate;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.InvoiceUpdate;
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanRegistryMock;
import com.yahoo.vespa.hosted.controller.tenant.BillingReference;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
@@ -16,7 +17,10 @@ import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.time.LocalDate;
import java.time.ZoneOffset;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -26,6 +30,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
public class BillingReportMaintainerTest {
private final ControllerTester tester = new ControllerTester(SystemName.PublicCd);
private final BillingReportMaintainer maintainer = new BillingReportMaintainer(tester.controller(), Duration.ofMinutes(10));
+ private final BillingDatabaseClient billingDb = tester.controller().serviceRegistry().billingDatabase();
+ private final BillingReporterMock reporter = (BillingReporterMock) tester.controller().serviceRegistry().billingReporter();
@Test
void only_billable_tenants_are_maintained() {
@@ -46,29 +52,30 @@ public class BillingReportMaintainerTest {
}
@Test
- void only_open_bills_with_exported_id_are_maintained() {
+ void only_non_final_bills_with_exported_id_are_maintained() {
var t1 = tester.createTenant("t1");
- var billingDb = tester.controller().serviceRegistry().billingDatabase();
- var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
- var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
-
- var bill1 = billingDb.createBill(t1, start, end, "non-exported");
- var bill2 = billingDb.createBill(t1, start, end, "exported");
- var bill3 = billingDb.createBill(t1, start, end, "exported-and-frozen");
+ var bill1 = createBill(t1, "non-exported", billingDb);
+ var bill2 = createBill(t1, "exported-and-modified", billingDb);
+ var bill3 = createBill(t1, "exported-and-frozen", billingDb);
+ var bill4 = createBill(t1, "exported-and-successful", billingDb);
+ var bill5 = createBill(t1, "exported-and-void", billingDb);
billingDb.setStatus(bill3, "foo", BillStatus.FROZEN);
+ billingDb.setStatus(bill4, "foo", BillStatus.SUCCESSFUL);
+ billingDb.setStatus(bill5, "foo", BillStatus.VOID);
- var reporter = tester.controller().serviceRegistry().billingReporter();
- reporter.exportBill(billingDb.readBill(bill2).get(), "FOO", cloudTenant(t1));
- reporter.exportBill(billingDb.readBill(bill3).get(), "FOO", cloudTenant(t1));
- var updates = maintainer.maintainInvoices();
+ exportBills(t1, bill2, bill3);
+ reporter.modifyInvoice(bill2);
+ var updates = toMap(maintainer.maintainInvoices());
assertTrue(billingDb.readBill(bill1).get().getExportedId().isEmpty());
- // Only the exported open bill is maintained
- assertEquals(1, updates.size());
- assertEquals(bill2, updates.get(0).billId());
- assertEquals(ModifiableInvoiceUpdate.class, updates.get(0).getClass());
+ // Only the exported non-final bills are maintained
+ assertEquals(2, updates.size());
+ assertEquals(Set.of(bill2, bill3), updates.keySet());
+
+ var bill2Update = updates.get(bill2);
+ assertEquals(InvoiceUpdate.Type.MODIFIED, bill2Update.type());
var exportedBill = billingDb.readBill(bill2).get();
assertEquals("EXPORTED-" + exportedBill.id().value(), exportedBill.getExportedId().get());
// Verify that the bill has been updated with a marker line item by the mock
@@ -76,37 +83,30 @@ public class BillingReportMaintainerTest {
assertEquals(1, lineItems.size());
assertEquals("maintained", lineItems.get(0).id());
- // The frozen bill is untouched by the maintainer
+ // Verify that the frozen bill is unmodified and has not changed state.
+ var bill3Update = updates.get(bill3);
+ assertEquals(InvoiceUpdate.Type.UNMODIFIED, bill3Update.type());
var frozenBill = billingDb.readBill(bill3).get();
- assertEquals("EXPORTED-" + frozenBill.id().value(), frozenBill.getExportedId().get());
- assertEquals(0, frozenBill.lineItems().size());
+ assertEquals(BillStatus.FROZEN, frozenBill.status());
}
@Test
void bills_whose_invoice_has_been_deleted_in_the_external_system_are_no_longer_maintained() {
var t1 = tester.createTenant("t1");
- var billingDb = tester.controller().serviceRegistry().billingDatabase();
-
- var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
- var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
-
- var bill1 = billingDb.createBill(t1, start, end, "exported-then-deleted");
-
- var reporter = (BillingReporterMock)tester.controller().serviceRegistry().billingReporter();
- reporter.exportBill(billingDb.readBill(bill1).get(), "FOO", cloudTenant(t1));
+ var bill1 = createBill(t1, "exported-then-deleted", billingDb);
+ exportBills(t1, bill1);
var updates = maintainer.maintainInvoices();
assertEquals(1, updates.size());
- assertEquals(ModifiableInvoiceUpdate.class, updates.get(0).getClass());
+ assertEquals(InvoiceUpdate.Type.UNMODIFIED, updates.get(0).type());
// Delete invoice from the external system
- reporter.deleteExportedBill(bill1);
+ reporter.deleteInvoice(bill1);
// Maintainer should report that the invoice has been removed
updates = maintainer.maintainInvoices();
assertEquals(1, updates.size());
- assertEquals(FailedInvoiceUpdate.class, updates.get(0).getClass());
- assertEquals(FailedInvoiceUpdate.Reason.REMOVED, ((FailedInvoiceUpdate)updates.get(0)).reason);
+ assertEquals(InvoiceUpdate.Type.REMOVED, updates.get(0).type());
// The bill should no longer be maintained
updates = maintainer.maintainInvoices();
@@ -116,19 +116,12 @@ public class BillingReportMaintainerTest {
@Test
void it_is_allowed_to_re_export_bills_whose_invoice_has_been_deleted_in_the_external_system() {
var t1 = tester.createTenant("t1");
- var billingDb = tester.controller().serviceRegistry().billingDatabase();
-
- var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
- var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
-
- var bill1 = billingDb.createBill(t1, start, end, "exported-then-deleted");
-
- var reporter = (BillingReporterMock)tester.controller().serviceRegistry().billingReporter();
+ var bill1 = createBill(t1, "exported-then-deleted", billingDb);
// Export the bill, then delete it in the external system
- reporter.exportBill(billingDb.readBill(bill1).get(), "FOO", cloudTenant(t1));
+ exportBills(t1, bill1);
maintainer.maintainInvoices();
- reporter.deleteExportedBill(bill1);
+ reporter.deleteInvoice(bill1);
maintainer.maintainInvoices();
// Ensure it is currently ignored by the maintainer
@@ -136,10 +129,63 @@ public class BillingReportMaintainerTest {
assertEquals(0, updates.size());
// Re-export the bill and verify that it is maintained again
- reporter.exportBill(billingDb.readBill(bill1).get(), "FOO", cloudTenant(t1));
+ exportBills(t1, bill1);
updates = maintainer.maintainInvoices();
assertEquals(1, updates.size());
- assertEquals(ModifiableInvoiceUpdate.class, updates.get(0).getClass());
+ assertEquals(InvoiceUpdate.Type.UNMODIFIED, updates.get(0).type());
+ }
+
+ @Test
+ void bill_state_is_updated_upon_changes_in_the_external_system() {
+ var t1 = tester.createTenant("t1");
+ var frozen = createBill(t1, "foo", billingDb);
+ var paid = createBill(t1, "foo", billingDb);
+ var voided = createBill(t1, "foo", billingDb);
+ exportBills(t1, frozen, paid, voided);
+
+ var updates = toMap(maintainer.maintainInvoices());
+ assertEquals(3, updates.size());
+ updates.forEach((id, update) -> {
+ assertEquals(InvoiceUpdate.Type.UNMODIFIED, update.type());
+ assertEquals(BillStatus.OPEN, billingDb.readBill(id).get().status());
+ });
+
+ reporter.freezeInvoice(frozen);
+ reporter.payInvoice(paid);
+ reporter.voidInvoice(voided);
+ updates = toMap(maintainer.maintainInvoices());
+
+ assertEquals(3, updates.size());
+
+ assertEquals(InvoiceUpdate.Type.UNMODIFIABLE, updates.get(frozen).type());
+ assertEquals(BillStatus.FROZEN, billingDb.readBill(frozen).get().status());
+
+ assertEquals(InvoiceUpdate.Type.PAID, updates.get(paid).type());
+ assertEquals(BillStatus.SUCCESSFUL, billingDb.readBill(paid).get().status());
+
+ assertEquals(InvoiceUpdate.Type.VOIDED, updates.get(voided).type());
+ assertEquals(BillStatus.VOID, billingDb.readBill(voided).get().status());
+ }
+
+ private static Map<Bill.Id, InvoiceUpdate> toMap(Iterable<InvoiceUpdate> updates) {
+ var map = new HashMap<Bill.Id, InvoiceUpdate>();
+ for (var update : updates) {
+ map.put(update.billId(), update);
+ }
+ return map;
+ }
+
+ private static Bill.Id createBill(TenantName tenantName, String agent, BillingDatabaseClient billingDb) {
+ var start = LocalDate.of(2020, 5, 23).atStartOfDay(ZoneOffset.UTC);
+ var end = start.toLocalDate().plusDays(6).atStartOfDay(ZoneOffset.UTC);
+ return billingDb.createBill(tenantName, start, end, agent);
+ }
+
+ private void exportBills(TenantName tenantName, Bill.Id... billIds) {
+ for (var billId : billIds) {
+ var bill = billingDb.readBill(billId).get();
+ reporter.exportBill(bill, "FOO", cloudTenant(tenantName));
+ }
}
private CloudTenant cloudTenant(TenantName tenantName) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializerTest.java
index f2fc43933df..493d4df90a9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializerTest.java
@@ -31,6 +31,7 @@ import com.yahoo.vespa.hosted.controller.tenant.TenantBilling;
import com.yahoo.vespa.hosted.controller.tenant.TenantContact;
import com.yahoo.vespa.hosted.controller.tenant.TenantContacts;
import com.yahoo.vespa.hosted.controller.tenant.TenantInfo;
+import com.yahoo.vespa.hosted.controller.tenant.TermsOfServiceApproval;
import org.junit.jupiter.api.Test;
import java.net.URI;
@@ -240,6 +241,7 @@ public class TenantSerializerTest {
.withPurchaseOrder(new PurchaseOrder("PO42"))
.withTaxId(new TaxId("NO", "no_vat", "123456789MVA"))
.withInvoiceEmail(new Email("billing@mycomp.any", false))
+ .withToSApproval(new TermsOfServiceApproval(Instant.ofEpochMilli(1234L), new SimplePrincipal("ceo@mycomp.any")))
);
Slime slime = new Slime();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
index f8ae7c8ea50..eb1885423b1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
@@ -112,7 +112,11 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
"code": ""
},
"purchaseOrder":"",
- "invoiceEmail":""
+ "invoiceEmail":"",
+ "tosApproval": {
+ "at": "",
+ "by": ""
+ }
}
""";
var request = request("/application/v4/tenant/scoober/info/billing", GET)
@@ -143,6 +147,44 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
.roles(Set.of(Role.administrator(tenantName)));
tester.assertResponse(updateRequest, "{\"message\":\"Tenant info updated\"}", 200);
+ var approveToSRequest = request("/application/v4/tenant/scoober/terms-of-service", POST)
+ .data("{}").roles(Set.of(Role.administrator(tenantName)));
+ tester.assertResponse(approveToSRequest, "{\"message\":\"Terms of service approved by user@test\"}", 200);
+
+ expectedResponse = """
+ {
+ "contact": {
+ "name":"name",
+ "email":"foo@example",
+ "emailVerified": false,
+ "phone":"phone"
+ },
+ "taxId": {
+ "country": "NO",
+ "type": "no_vat",
+ "code": "123456789MVA"
+ },
+ "purchaseOrder":"PO9001",
+ "invoiceEmail":"billing@mycomp.any",
+ "tosApproval": {
+ "at": "2020-09-13T12:26:40Z",
+ "by": "user@test"
+ },
+ "address": {
+ "addressLines":"addressLines",
+ "postalCodeOrZip":"postalCodeOrZip",
+ "city":"city",
+ "stateRegionProvince":"stateRegionProvince",
+ "country":"country"
+ }
+ }
+ """;
+ tester.assertJsonResponse(request, expectedResponse, 200);
+
+ var unapproveToSRequest = request("/application/v4/tenant/scoober/terms-of-service", DELETE)
+ .data("{}").roles(Set.of(Role.administrator(tenantName)));
+ tester.assertResponse(unapproveToSRequest, "{\"message\":\"Terms of service approval removed\"}", 200);
+
expectedResponse = """
{
"contact": {
@@ -158,6 +200,10 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
},
"purchaseOrder":"PO9001",
"invoiceEmail":"billing@mycomp.any",
+ "tosApproval": {
+ "at": "",
+ "by": ""
+ },
"address": {
"addressLines":"addressLines",
"postalCodeOrZip":"postalCodeOrZip",
@@ -245,7 +291,11 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
"code": ""
},
"purchaseOrder":"",
- "invoiceEmail":""
+ "invoiceEmail":"",
+ "tosApproval": {
+ "at": "",
+ "by": ""
+ }
},
"contacts": [
{"audiences":["tenant"],"email":"contact1@example.com","emailVerified":false}
@@ -287,6 +337,10 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
"city":"city",
"stateRegionProvince":"stateRegionProvince",
"country":"country"
+ },
+ "tosApproval": {
+ "at": "",
+ "by": ""
}
},
"contacts": [
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 4a2f6d48086..45f00977381 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -67,7 +67,7 @@
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
<athenz.vespa.version>1.11.45</athenz.vespa.version>
- <aws-sdk.vespa.version>1.12.565</aws-sdk.vespa.version>
+ <aws-sdk.vespa.version>1.12.580</aws-sdk.vespa.version>
<!-- Athenz END -->
<!-- WARNING: If you change curator version, you also need to update
@@ -126,7 +126,7 @@
<org.json.vespa.version>20231013</org.json.vespa.version>
<org.lz4.vespa.version>1.8.0</org.lz4.vespa.version>
<prometheus.client.vespa.version>0.16.0</prometheus.client.vespa.version>
- <protobuf.vespa.version>3.24.4</protobuf.vespa.version>
+ <protobuf.vespa.version>3.25.0</protobuf.vespa.version>
<questdb.vespa.version>7.3.3</questdb.vespa.version>
<spifly.vespa.version>1.3.6</spifly.vespa.version>
<snappy.vespa.version>1.1.10.5</snappy.vespa.version>
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 7cef6b4d045..03f789b5852 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -149,7 +149,7 @@ Requires: vespa-xxhash >= 0.8.1
Requires: xxhash-libs >= 0.8.1
%endif
%if 0%{?el8}
-Requires: vespa-openssl >= 3.1.2
+Requires: vespa-openssl >= 3.1.4
%else
Requires: openssl-libs
%endif
@@ -180,7 +180,7 @@ Summary: Vespa - The open big data serving engine - C++ libraries
Requires: %{name}-base-libs = %{version}-%{release}
Requires: libicu
%if 0%{?el8}
-Requires: vespa-openssl >= 3.1.2
+Requires: vespa-openssl >= 3.1.4
%else
Requires: openssl-libs
%endif
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index fac6691bbff..11d5ecddaf5 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -29,6 +29,7 @@ vespa_define_module(
src/tests/eval/int8float
src/tests/eval/interpreted_function
src/tests/eval/llvm_stress
+ src/tests/eval/map_subspaces
src/tests/eval/multiply_add
src/tests/eval/nested_loop
src/tests/eval/node_tools
diff --git a/eval/src/tests/eval/compiled_function/compiled_function_test.cpp b/eval/src/tests/eval/compiled_function/compiled_function_test.cpp
index 071e4766629..7b1f9a84b6d 100644
--- a/eval/src/tests/eval/compiled_function/compiled_function_test.cpp
+++ b/eval/src/tests/eval/compiled_function/compiled_function_test.cpp
@@ -50,6 +50,7 @@ TEST("require that lazy parameter passing works") {
std::vector<vespalib::string> unsupported = {
"map(",
+ "map_subspaces(",
"join(",
"merge(",
"reduce(",
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index ca35b8db66d..4ba715ea192 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -150,18 +150,20 @@ TEST("require that basic addition works") {
//-----------------------------------------------------------------------------
-TEST("require that functions with non-compilable lambdas cannot be interpreted") {
+TEST("require that functions with non-compilable simple lambdas cannot be interpreted") {
auto good_map = Function::parse("map(a,f(x)(x+1))");
auto good_join = Function::parse("join(a,b,f(x,y)(x+y))");
+ auto good_merge = Function::parse("merge(a,b,f(x,y)(x+y))");
auto bad_map = Function::parse("map(a,f(x)(map(x,f(i)(i+1))))");
auto bad_join = Function::parse("join(a,b,f(x,y)(join(x,y,f(i,j)(i+j))))");
- for (const Function *good: {good_map.get(), good_join.get()}) {
+ auto bad_merge = Function::parse("merge(a,b,f(x,y)(join(x,y,f(i,j)(i+j))))");
+ for (const Function *good: {good_map.get(), good_join.get(), good_merge.get()}) {
if (!EXPECT_TRUE(!good->has_error())) {
fprintf(stderr, "parse error: %s\n", good->get_error().c_str());
}
EXPECT_TRUE(!InterpretedFunction::detect_issues(*good));
}
- for (const Function *bad: {bad_map.get(), bad_join.get()}) {
+ for (const Function *bad: {bad_map.get(), bad_join.get(), bad_merge.get()}) {
if (!EXPECT_TRUE(!bad->has_error())) {
fprintf(stderr, "parse error: %s\n", bad->get_error().c_str());
}
@@ -172,6 +174,28 @@ TEST("require that functions with non-compilable lambdas cannot be interpreted")
<< std::endl;
}
+TEST("require that functions with non-interpretable complex lambdas cannot be interpreted") {
+ auto good_tensor_lambda = Function::parse("tensor(x[5])(map(x,f(y)(y)))");
+ auto good_map_subspaces = Function::parse("map_subspaces(a,f(x)(concat(x,x,y)))");
+ auto bad_tensor_lambda = Function::parse("tensor(x[5])(map(x,f(y)(map(y,f(i)(i+1)))))");
+ auto bad_map_subspaces = Function::parse("map_subspaces(a,f(x)(map(x,f(y)(map(y,f(i)(i+1))))))");
+ for (const Function *good: {good_tensor_lambda.get(), good_map_subspaces.get()}) {
+ if (!EXPECT_TRUE(!good->has_error())) {
+ fprintf(stderr, "parse error: %s\n", good->get_error().c_str());
+ }
+ EXPECT_TRUE(!InterpretedFunction::detect_issues(*good));
+ }
+ for (const Function *bad: {bad_tensor_lambda.get(), bad_map_subspaces.get()}) {
+ if (!EXPECT_TRUE(!bad->has_error())) {
+ fprintf(stderr, "parse error: %s\n", bad->get_error().c_str());
+ }
+ EXPECT_TRUE(InterpretedFunction::detect_issues(*bad));
+ }
+ std::cerr << "Example function issues:" << std::endl
+ << InterpretedFunction::detect_issues(*bad_map_subspaces).list
+ << std::endl;
+}
+
//-----------------------------------------------------------------------------
TEST("require that compilation meta-data can be collected") {
diff --git a/eval/src/tests/eval/map_subspaces/CMakeLists.txt b/eval/src/tests/eval/map_subspaces/CMakeLists.txt
new file mode 100644
index 00000000000..90b2ce07791
--- /dev/null
+++ b/eval/src/tests/eval/map_subspaces/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_map_subspaces_test_app TEST
+ SOURCES
+ map_subspaces_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_map_subspaces_test_app COMMAND eval_map_subspaces_test_app)
diff --git a/eval/src/tests/eval/map_subspaces/map_subspaces_test.cpp b/eval/src/tests/eval/map_subspaces/map_subspaces_test.cpp
new file mode 100644
index 00000000000..278d49992be
--- /dev/null
+++ b/eval/src/tests/eval/map_subspaces/map_subspaces_test.cpp
@@ -0,0 +1,103 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/simple_value.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/eval/eval/test/eval_fixture.h>
+#include <vespa/eval/eval/tensor_nodes.h>
+
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/stash.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval::tensor_function;
+
+void verify(const vespalib::string &a, const vespalib::string &expr, const vespalib::string &result) {
+ EvalFixture::ParamRepo param_repo;
+ param_repo.add("a", TensorSpec::from_expr(a));
+ auto expect = TensorSpec::from_expr(result);
+ EXPECT_FALSE(ValueType::from_spec(expect.type()).is_error());
+ EXPECT_EQUAL(EvalFixture::ref(expr, param_repo), expect);
+ EXPECT_EQUAL(EvalFixture::prod(expr, param_repo), expect);
+}
+
+//-----------------------------------------------------------------------------
+
+TEST("require that simple map_subspaces work") {
+ TEST_DO(verify("tensor(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}",
+ "map_subspaces(a,f(t)(tensor(y[2])(t{y:(y)}+t{y:(y+1)})))",
+ "tensor(x{},y[2]):{foo:[3,5],bar:[9,11]}"));
+}
+
+TEST("require that scalars can be used with map_subspaces") {
+ TEST_DO(verify("3.0",
+ "map_subspaces(a,f(n)(n+5.0))",
+ "8.0"));
+}
+
+TEST("require that outer cell type is decayed when inner type is double") {
+ TEST_DO(verify("tensor<int8>(x{}):{foo:3,bar:7}",
+ "map_subspaces(a,f(n)(n+2))",
+ "tensor<float>(x{}):{foo:5,bar:9}"));
+}
+
+TEST("require that inner cell type is used directly without decay") {
+ TEST_DO(verify("tensor(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}",
+ "map_subspaces(a,f(t)(cell_cast(t,int8)))",
+ "tensor<int8>(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}"));
+ TEST_DO(verify("tensor(y[3]):[1,2,3]",
+ "map_subspaces(a,f(t)(cell_cast(t,int8)))",
+ "tensor<int8>(y[3]):[1,2,3]"));
+}
+
+TEST("require that map_subspaces can be nested") {
+ TEST_DO(verify("tensor(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}",
+ "map_subspaces(a,f(a)(5+map_subspaces(a,f(t)(tensor(y[2])(t{y:(y)}+t{y:(y+1)})))))",
+ "tensor(x{},y[2]):{foo:[8,10],bar:[14,16]}"));
+}
+
+size_t count_nodes(const NodeTypes &types) {
+ size_t cnt = 0;
+ types.each([&](const auto &, const auto &){++cnt;});
+ return cnt;
+}
+
+void check_errors(const NodeTypes &types) {
+ for (const auto &err: types.errors()) {
+ fprintf(stderr, "%s\n", err.c_str());
+ }
+ ASSERT_EQUAL(types.errors().size(), 0u);
+}
+
+TEST("require that type resolving also include nodes from the mapping lambda function") {
+ auto fun = Function::parse("map_subspaces(a,f(a)(map_subspaces(a,f(t)(tensor(y[2])(t{y:(y)}+t{y:(y+1)})))))");
+ NodeTypes types(*fun, {ValueType::from_spec("tensor(x{},y[3])")});
+ check_errors(types);
+ auto map_subspaces = nodes::as<nodes::TensorMapSubspaces>(fun->root());
+ ASSERT_TRUE(map_subspaces != nullptr);
+ EXPECT_EQUAL(types.get_type(*map_subspaces).to_spec(), "tensor(x{},y[2])");
+ EXPECT_EQUAL(types.get_type(map_subspaces->lambda().root()).to_spec(), "tensor(y[2])");
+
+ NodeTypes copy = types.export_types(fun->root());
+ check_errors(copy);
+ EXPECT_EQUAL(count_nodes(types), count_nodes(copy));
+
+ NodeTypes map_types = copy.export_types(map_subspaces->lambda().root());
+ check_errors(map_types);
+ EXPECT_LESS(count_nodes(map_types), count_nodes(copy));
+
+ auto inner_map = nodes::as<nodes::TensorMapSubspaces>(map_subspaces->lambda().root());
+ ASSERT_TRUE(inner_map != nullptr);
+ NodeTypes inner_types = map_types.export_types(inner_map->lambda().root());
+ check_errors(inner_types);
+ EXPECT_LESS(count_nodes(inner_types), count_nodes(map_types));
+
+ // [lambda, peek, t, y, +, peek, t, y, +, 1] are the 10 nodes:
+ EXPECT_EQUAL(count_nodes(inner_types), 10u);
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index d58f9fda943..ad5426e2a99 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -354,6 +354,42 @@ TEST("require that tensor cell_cast resolves correct type") {
TEST_DO(verify("cell_cast(tensor<float>(x{},y[5]),int8)", "tensor<int8>(x{},y[5])"));
}
+TEST("require that tensor map_subspace resolves correct type") {
+ // double input
+ TEST_DO(verify("map_subspaces(double, f(a)(a))", "double"));
+ TEST_DO(verify("map_subspaces(double, f(a)(tensor<int8>(y[2]):[a,a]))", "tensor<int8>(y[2])"));
+
+ // sparse input
+ TEST_DO(verify("map_subspaces(tensor<float>(x{}), f(a)(a))", "tensor<float>(x{})"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(x{}), f(a)(a))", "tensor<float>(x{})")); // NB: decay
+ TEST_DO(verify("map_subspaces(tensor<float>(x{}), f(a)(tensor<int8>(y[2]):[a,a]))", "tensor<int8>(x{},y[2])"));
+
+ // dense input
+ TEST_DO(verify("map_subspaces(tensor<float>(y[10]), f(a)(a))", "tensor<float>(y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(y[10]), f(a)(a))", "tensor<int8>(y[10])")); // NB: no decay
+ TEST_DO(verify("map_subspaces(tensor<float>(y[10]), f(a)(reduce(a,sum)))", "double"));
+ TEST_DO(verify("map_subspaces(tensor<float>(y[10]), f(a)(cell_cast(a,int8)))", "tensor<int8>(y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(y[10]), f(a)(a*tensor<int8>(z[2]):[a{y:0},a{y:1}]))", "tensor<float>(y[10],z[2])"));
+
+ // mixed input
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(a)(a))", "tensor<float>(x{},y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(x{},y[10]), f(a)(a))", "tensor<int8>(x{},y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(x{},y[10]), f(a)(map_subspaces(a, f(b)(b))))", "tensor<int8>(x{},y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(x{},y[10]), f(a)(map(a, f(b)(b))))", "tensor<float>(x{},y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(y)(cell_cast(y,int8)))", "tensor<int8>(x{},y[10])"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(y)(reduce(y,sum)))", "tensor<float>(x{})"));
+ TEST_DO(verify("map_subspaces(tensor<int8>(x{},y[10]), f(y)(reduce(y,sum)))", "tensor<float>(x{})"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(y)(concat(concat(y,y,y),y,y)))", "tensor<float>(x{},y[30])"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(y)(y*tensor<float>(z[5])(z+3)))", "tensor<float>(x{},y[10],z[5])"));
+
+ // error cases
+ TEST_DO(verify("map_subspaces(error, f(a)(a))", "error"));
+ TEST_DO(verify("map_subspaces(double, f(a)(tensor(x[5])(x)+tensor(x[7])(x)))", "error"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{}), f(a)(tensor(y{}):{a:3}))", "error"));
+ TEST_DO(verify("map_subspaces(tensor<float>(y[10]), f(a)(a+tensor(y[7])(y)))", "error"));
+ TEST_DO(verify("map_subspaces(tensor<float>(x{},y[10]), f(y)(y*tensor<float>(x[5])(x+3)))", "error"));
+}
+
TEST("require that double only expressions can be detected") {
auto plain_fun = Function::parse("1+2");
auto complex_fun = Function::parse("reduce(a,sum)");
diff --git a/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp b/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp
index bcb738781ad..6df1a7fdb34 100644
--- a/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp
+++ b/eval/src/tests/eval/reference_evaluation/reference_evaluation_test.cpp
@@ -130,6 +130,12 @@ TEST(ReferenceEvaluationTest, map_expression_works) {
EXPECT_EQ(ref_eval("map(a,f(x)(x*2+3))", {a}), expect);
}
+TEST(ReferenceEvaluationTest, map_subspaces_expression_works) {
+ auto a = make_val("tensor(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}");
+ auto expect = make_val("tensor(x{},y[2]):{foo:[3,5],bar:[9,11]}");
+ EXPECT_EQ(ref_eval("map_subspaces(a,f(x)(tensor(y[2])(x{y:(y)}+x{y:(y+1)})))", {a}), expect);
+}
+
TEST(ReferenceEvaluationTest, join_expression_works) {
auto a = make_val("tensor(x[2]):[1,2]");
auto b = make_val("tensor(y[2]):[3,4]");
diff --git a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
index ee876f67f34..e125a29d75f 100644
--- a/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
+++ b/eval/src/tests/eval/reference_operations/reference_operations_test.cpp
@@ -49,6 +49,12 @@ TensorSpec sparse_1d_all_two() {
.add({{"c", "qux"}}, 2.0);
}
+TensorSpec spec(const vespalib::string &expr) {
+ auto result = TensorSpec::from_expr(expr);
+ EXPECT_FALSE(ValueType::from_spec(result.type()).is_error());
+ return result;
+}
+
//-----------------------------------------------------------------------------
TEST(ReferenceConcatTest, concat_numbers) {
@@ -234,6 +240,62 @@ TEST(ReferenceMapTest, map_mixed_tensor) {
//-----------------------------------------------------------------------------
+TEST(ReferenceMapSubspacesTest, map_vectors) {
+ auto input = spec("tensor(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}");
+ auto fun = [&](const TensorSpec &space) {
+ EXPECT_EQ(space.type(), "tensor(y[3])");
+ size_t i = 0;
+ double a = 0.0;
+ double b = 0.0;
+ for (const auto &[addr, value]: space.cells()) {
+ if (i < 2) {
+ a += value;
+ }
+ if (i > 0) {
+ b += value;
+ }
+ ++i;
+ }
+ TensorSpec result("tensor(y[2])");
+ result.add({{"y", 0}}, a);
+ result.add({{"y", 1}}, b);
+ return result;
+ };
+ auto output = ReferenceOperations::map_subspaces(input, fun);
+ auto expect = spec("tensor(x{},y[2]):{foo:[3,5],bar:[9,11]}");
+ EXPECT_EQ(output, expect);
+}
+
+TEST(ReferenceMapSubspacesTest, map_numbers_with_external_decay) {
+ auto input = spec("tensor<bfloat16>(x{}):{foo:3,bar:5}");
+ auto fun = [&](const TensorSpec &space) {
+ EXPECT_EQ(space.type(), "double");
+ TensorSpec result("double");
+ result.add({}, space.cells().begin()->second + 4.0);
+ return result;
+ };
+ auto output = ReferenceOperations::map_subspaces(input, fun);
+ auto expect = spec("tensor<float>(x{}):{foo:7,bar:9}");
+ EXPECT_EQ(output, expect);
+}
+
+TEST(ReferenceMapSubspacesTest, cast_cells_without_internal_decay) {
+ auto input = spec("tensor<float>(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}");
+ auto fun = [&](const TensorSpec &space) {
+ EXPECT_EQ(space.type(), "tensor<float>(y[3])");
+ TensorSpec result("tensor<bfloat16>(y[3])");
+ for (const auto &[addr, value]: space.cells()) {
+ result.add(addr, value);
+ }
+ return result;
+ };
+ auto output = ReferenceOperations::map_subspaces(input, fun);
+ auto expect = spec("tensor<bfloat16>(x{},y[3]):{foo:[1,2,3],bar:[4,5,6]}");
+ EXPECT_EQ(output, expect);
+}
+
+//-----------------------------------------------------------------------------
+
TEST(ReferenceMergeTest, simple_mixed_merge) {
auto a = mixed_5d_input(false);
auto b = TensorSpec("tensor(a[3],b[1],c{},d[5],e{})")
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index 84b9d685ee8..f472a5fb6a6 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -351,6 +351,36 @@ TEST("require that mapped dimensions can be obtained") {
TEST_DO(my_check(type("tensor(a[1],b[1],x{},y[10],z[1])").mapped_dimensions()));
}
+TEST("require that mapped dimensions can be stripped") {
+ EXPECT_EQUAL(type("error").strip_mapped_dimensions(), type("error"));
+ EXPECT_EQUAL(type("double").strip_mapped_dimensions(), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").strip_mapped_dimensions(), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(x[10])").strip_mapped_dimensions(), type("tensor<float>(x[10])"));
+ EXPECT_EQUAL(type("tensor<float>(a[1],b{},c[2],d{},e[3],f{})").strip_mapped_dimensions(), type("tensor<float>(a[1],c[2],e[3])"));
+}
+
+TEST("require that indexed dimensions can be stripped") {
+ EXPECT_EQUAL(type("error").strip_indexed_dimensions(), type("error"));
+ EXPECT_EQUAL(type("double").strip_indexed_dimensions(), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(x{})").strip_indexed_dimensions(), type("tensor<float>(x{})"));
+ EXPECT_EQUAL(type("tensor<float>(x[10])").strip_indexed_dimensions(), type("double"));
+ EXPECT_EQUAL(type("tensor<float>(a[1],b{},c[2],d{},e[3],f{})").strip_indexed_dimensions(), type("tensor<float>(b{},d{},f{})"));
+}
+
+TEST("require that value types can be wrapped inside each other") {
+ EXPECT_EQUAL(type("error").wrap(type("error")), type("error"));
+ EXPECT_EQUAL(type("double").wrap(type("error")), type("error"));
+ EXPECT_EQUAL(type("error").wrap(type("double")), type("error"));
+ EXPECT_EQUAL(type("double").wrap(type("double")), type("double"));
+ EXPECT_EQUAL(type("tensor<int8>(x{})").wrap(type("tensor<int8>(y[10])")), type("tensor<int8>(x{},y[10])"));
+ EXPECT_EQUAL(type("tensor<int8>(a{},c{})").wrap(type("tensor<int8>(b[10],d[5])")), type("tensor<int8>(a{},b[10],c{},d[5])"));
+ EXPECT_EQUAL(type("tensor<int8>(x{})").wrap(type("tensor<int8>(x[10])")), type("error")); // dimension name conflict
+ EXPECT_EQUAL(type("tensor<int8>(x{},z[2])").wrap(type("tensor<int8>(y[10])")), type("error")); // outer cannot have indexed dimensions
+ EXPECT_EQUAL(type("tensor<int8>(x{})").wrap(type("tensor<int8>(y[10],z{})")), type("error")); // inner cannot have mapped dimensions
+ EXPECT_EQUAL(type("double").wrap(type("tensor<int8>(y[10])")), type("tensor<int8>(y[10])")); // NB: no decay
+ EXPECT_EQUAL(type("tensor<int8>(x{})").wrap(type("double")), type("tensor<float>(x{})")); // NB: decay
+}
+
TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(type("error").dimension_index("x"), ValueType::Dimension::npos);
EXPECT_EQUAL(type("double").dimension_index("x"), ValueType::Dimension::npos);
diff --git a/eval/src/vespa/eval/eval/cell_type.h b/eval/src/vespa/eval/eval/cell_type.h
index b1fa29a75a5..c15a5b68dba 100644
--- a/eval/src/vespa/eval/eval/cell_type.h
+++ b/eval/src/vespa/eval/eval/cell_type.h
@@ -129,6 +129,9 @@ struct CellMeta {
// convenience functions to be used for specific operations
constexpr CellMeta map() const { return decay(); }
+ constexpr CellMeta wrap(CellMeta inner) const {
+ return (inner.is_scalar) ? decay() : inner;
+ }
constexpr CellMeta reduce(bool output_is_scalar) const {
return normalize(cell_type, output_is_scalar).decay();
}
diff --git a/eval/src/vespa/eval/eval/function.cpp b/eval/src/vespa/eval/eval/function.cpp
index edcd241b6bf..a39d8dda228 100644
--- a/eval/src/vespa/eval/eval/function.cpp
+++ b/eval/src/vespa/eval/eval/function.cpp
@@ -573,6 +573,13 @@ void parse_tensor_map(ParseContext &ctx) {
ctx.push_expression(std::make_unique<nodes::TensorMap>(std::move(child), std::move(lambda)));
}
+void parse_tensor_map_subspaces(ParseContext &ctx) {
+ Node_UP child = get_expression(ctx);
+ ctx.eat(',');
+ auto lambda = parse_lambda(ctx, 1);
+ ctx.push_expression(std::make_unique<nodes::TensorMapSubspaces>(std::move(child), std::move(lambda)));
+}
+
void parse_tensor_join(ParseContext &ctx) {
Node_UP lhs = get_expression(ctx);
ctx.eat(',');
@@ -856,6 +863,8 @@ bool maybe_parse_call(ParseContext &ctx, const vespalib::string &name) {
parse_call(ctx, std::move(call));
} else if (name == "map") {
parse_tensor_map(ctx);
+ } else if (name == "map_subspaces") {
+ parse_tensor_map_subspaces(ctx);
} else if (name == "join") {
parse_tensor_join(ctx);
} else if (name == "merge") {
@@ -1118,4 +1127,12 @@ Function::unwrap(vespalib::stringref input,
//-----------------------------------------------------------------------------
+void
+Function::Issues::add_nested_issues(const vespalib::string &context, const Issues &issues)
+{
+ for (const auto &issue: issues.list) {
+ list.push_back(context + ": " + issue);
+ }
+}
+
}
diff --git a/eval/src/vespa/eval/eval/function.h b/eval/src/vespa/eval/eval/function.h
index 0f79d66ead6..ae23d0093fb 100644
--- a/eval/src/vespa/eval/eval/function.h
+++ b/eval/src/vespa/eval/eval/function.h
@@ -74,8 +74,10 @@ public:
**/
struct Issues {
std::vector<vespalib::string> list;
- operator bool() const { return !list.empty(); }
- Issues(std::vector<vespalib::string> &&list_in) : list(std::move(list_in)) {}
+ operator bool() const noexcept { return !list.empty(); }
+ Issues() noexcept : list() {}
+ Issues(std::vector<vespalib::string> &&list_in) noexcept : list(std::move(list_in)) {}
+ void add_nested_issues(const vespalib::string &context, const Issues &issues);
};
};
diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp
index e4304049b8e..c0aa7d1703b 100644
--- a/eval/src/vespa/eval/eval/interpreted_function.cpp
+++ b/eval/src/vespa/eval/eval/interpreted_function.cpp
@@ -18,7 +18,7 @@ namespace vespalib::eval {
namespace {
-const Function *get_lambda(const nodes::Node &node) {
+const Function *get_simple_lambda(const nodes::Node &node) {
if (auto ptr = nodes::as<nodes::TensorMap>(node)) {
return &ptr->lambda();
}
@@ -31,6 +31,16 @@ const Function *get_lambda(const nodes::Node &node) {
return nullptr;
}
+const Function *get_complex_lambda(const nodes::Node &node) {
+ if (auto ptr = nodes::as<nodes::TensorLambda>(node)) {
+ return &ptr->lambda();
+ }
+ if (auto ptr = nodes::as<nodes::TensorMapSubspaces>(node)) {
+ return &ptr->lambda();
+ }
+ return nullptr;
+}
+
void my_nop(InterpretedFunction::State &, uint64_t) {}
} // namespace vespalib::<unnamed>
@@ -148,18 +158,29 @@ Function::Issues
InterpretedFunction::detect_issues(const Function &function)
{
struct NotSupported : NodeTraverser {
- std::vector<vespalib::string> issues;
+ Function::Issues issues;
bool open(const nodes::Node &) override { return true; }
void close(const nodes::Node &node) override {
- auto lambda = get_lambda(node);
- if (lambda && CompiledFunction::detect_issues(*lambda)) {
- issues.push_back(make_string("lambda function that cannot be compiled within %s",
- getClassName(node).c_str()));
+ // map/join/merge: simple scalar lambdas must be compilable with llvm
+ if (auto lambda = get_simple_lambda(node)) {
+ auto inner_issues = CompiledFunction::detect_issues(*lambda);
+ if (inner_issues) {
+ auto ctx = make_string("within %s simple lambda", getClassName(node).c_str());
+ issues.add_nested_issues(ctx, inner_issues);
+ }
+ }
+ // tensor lambda/map_subspaces: complex lambdas that may be interpreted and use tensor math
+ if (auto lambda = get_complex_lambda(node)) {
+ auto inner_issues = InterpretedFunction::detect_issues(*lambda);
+ if (inner_issues) {
+ auto ctx = make_string("within %s complex lambda", getClassName(node).c_str());
+ issues.add_nested_issues(ctx, inner_issues);
+ }
}
}
} checker;
function.root().traverse(checker);
- return Function::Issues(std::move(checker.issues));
+ return std::move(checker.issues);
}
InterpretedFunction::EvalSingle::EvalSingle(const ValueBuilderFactory &factory, Instruction op, const LazyParams &params)
diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp
index 2df20ac0d63..6d45aeafc26 100644
--- a/eval/src/vespa/eval/eval/key_gen.cpp
+++ b/eval/src/vespa/eval/eval/key_gen.cpp
@@ -39,64 +39,65 @@ struct KeyGen : public NodeVisitor, public NodeTraverser {
add_double(node.get_entry(i).get_const_double_value());
}
}
- void visit(const Neg &) override { add_byte( 5); }
- void visit(const Not &) override { add_byte( 6); }
- void visit(const If &node) override { add_byte( 7); add_double(node.p_true()); }
- void visit(const Error &) override { add_byte( 9); }
- void visit(const TensorMap &) override { add_byte(10); } // lambda should be part of key
- void visit(const TensorJoin &) override { add_byte(11); } // lambda should be part of key
- void visit(const TensorMerge &) override { add_byte(12); } // lambda should be part of key
- void visit(const TensorReduce &) override { add_byte(13); } // aggr/dimensions should be part of key
- void visit(const TensorRename &) override { add_byte(14); } // dimensions should be part of key
- void visit(const TensorConcat &) override { add_byte(15); } // dimension should be part of key
- void visit(const TensorCellCast &) override { add_byte(16); } // cell type should be part of key
- void visit(const TensorCreate &) override { add_byte(17); } // type/addr should be part of key
- void visit(const TensorLambda &) override { add_byte(18); } // type/lambda should be part of key
- void visit(const TensorPeek &) override { add_byte(19); } // addr should be part of key
- void visit(const Add &) override { add_byte(20); }
- void visit(const Sub &) override { add_byte(21); }
- void visit(const Mul &) override { add_byte(22); }
- void visit(const Div &) override { add_byte(23); }
- void visit(const Mod &) override { add_byte(24); }
- void visit(const Pow &) override { add_byte(25); }
- void visit(const Equal &) override { add_byte(26); }
- void visit(const NotEqual &) override { add_byte(27); }
- void visit(const Approx &) override { add_byte(28); }
- void visit(const Less &) override { add_byte(29); }
- void visit(const LessEqual &) override { add_byte(30); }
- void visit(const Greater &) override { add_byte(31); }
- void visit(const GreaterEqual &) override { add_byte(32); }
- void visit(const And &) override { add_byte(34); }
- void visit(const Or &) override { add_byte(35); }
- void visit(const Cos &) override { add_byte(36); }
- void visit(const Sin &) override { add_byte(37); }
- void visit(const Tan &) override { add_byte(38); }
- void visit(const Cosh &) override { add_byte(39); }
- void visit(const Sinh &) override { add_byte(40); }
- void visit(const Tanh &) override { add_byte(41); }
- void visit(const Acos &) override { add_byte(42); }
- void visit(const Asin &) override { add_byte(43); }
- void visit(const Atan &) override { add_byte(44); }
- void visit(const Exp &) override { add_byte(45); }
- void visit(const Log10 &) override { add_byte(46); }
- void visit(const Log &) override { add_byte(47); }
- void visit(const Sqrt &) override { add_byte(48); }
- void visit(const Ceil &) override { add_byte(49); }
- void visit(const Fabs &) override { add_byte(50); }
- void visit(const Floor &) override { add_byte(51); }
- void visit(const Atan2 &) override { add_byte(52); }
- void visit(const Ldexp &) override { add_byte(53); }
- void visit(const Pow2 &) override { add_byte(54); }
- void visit(const Fmod &) override { add_byte(55); }
- void visit(const Min &) override { add_byte(56); }
- void visit(const Max &) override { add_byte(57); }
- void visit(const IsNan &) override { add_byte(58); }
- void visit(const Relu &) override { add_byte(59); }
- void visit(const Sigmoid &) override { add_byte(60); }
- void visit(const Elu &) override { add_byte(61); }
- void visit(const Erf &) override { add_byte(62); }
- void visit(const Bit &) override { add_byte(63); }
- void visit(const Hamming &) override { add_byte(64); }
+ void visit(const Neg &) override { add_byte( 5); }
+ void visit(const Not &) override { add_byte( 6); }
+ void visit(const If &node) override { add_byte( 7); add_double(node.p_true()); }
+ void visit(const Error &) override { add_byte( 8); }
+ void visit(const TensorMap &) override { add_byte( 9); } // lambda should be part of key
+ void visit(const TensorMapSubspaces &) override { add_byte(10); } // lambda should be part of key
+ void visit(const TensorJoin &) override { add_byte(11); } // lambda should be part of key
+ void visit(const TensorMerge &) override { add_byte(12); } // lambda should be part of key
+ void visit(const TensorReduce &) override { add_byte(13); } // aggr/dimensions should be part of key
+ void visit(const TensorRename &) override { add_byte(14); } // dimensions should be part of key
+ void visit(const TensorConcat &) override { add_byte(15); } // dimension should be part of key
+ void visit(const TensorCellCast &) override { add_byte(16); } // cell type should be part of key
+ void visit(const TensorCreate &) override { add_byte(17); } // type/addr should be part of key
+ void visit(const TensorLambda &) override { add_byte(18); } // type/lambda should be part of key
+ void visit(const TensorPeek &) override { add_byte(19); } // addr should be part of key
+ void visit(const Add &) override { add_byte(20); }
+ void visit(const Sub &) override { add_byte(21); }
+ void visit(const Mul &) override { add_byte(22); }
+ void visit(const Div &) override { add_byte(23); }
+ void visit(const Mod &) override { add_byte(24); }
+ void visit(const Pow &) override { add_byte(25); }
+ void visit(const Equal &) override { add_byte(26); }
+ void visit(const NotEqual &) override { add_byte(27); }
+ void visit(const Approx &) override { add_byte(28); }
+ void visit(const Less &) override { add_byte(29); }
+ void visit(const LessEqual &) override { add_byte(30); }
+ void visit(const Greater &) override { add_byte(31); }
+ void visit(const GreaterEqual &) override { add_byte(32); }
+ void visit(const And &) override { add_byte(34); }
+ void visit(const Or &) override { add_byte(35); }
+ void visit(const Cos &) override { add_byte(36); }
+ void visit(const Sin &) override { add_byte(37); }
+ void visit(const Tan &) override { add_byte(38); }
+ void visit(const Cosh &) override { add_byte(39); }
+ void visit(const Sinh &) override { add_byte(40); }
+ void visit(const Tanh &) override { add_byte(41); }
+ void visit(const Acos &) override { add_byte(42); }
+ void visit(const Asin &) override { add_byte(43); }
+ void visit(const Atan &) override { add_byte(44); }
+ void visit(const Exp &) override { add_byte(45); }
+ void visit(const Log10 &) override { add_byte(46); }
+ void visit(const Log &) override { add_byte(47); }
+ void visit(const Sqrt &) override { add_byte(48); }
+ void visit(const Ceil &) override { add_byte(49); }
+ void visit(const Fabs &) override { add_byte(50); }
+ void visit(const Floor &) override { add_byte(51); }
+ void visit(const Atan2 &) override { add_byte(52); }
+ void visit(const Ldexp &) override { add_byte(53); }
+ void visit(const Pow2 &) override { add_byte(54); }
+ void visit(const Fmod &) override { add_byte(55); }
+ void visit(const Min &) override { add_byte(56); }
+ void visit(const Max &) override { add_byte(57); }
+ void visit(const IsNan &) override { add_byte(58); }
+ void visit(const Relu &) override { add_byte(59); }
+ void visit(const Sigmoid &) override { add_byte(60); }
+ void visit(const Elu &) override { add_byte(61); }
+ void visit(const Erf &) override { add_byte(62); }
+ void visit(const Bit &) override { add_byte(63); }
+ void visit(const Hamming &) override { add_byte(64); }
// traverse
bool open(const Node &node) override { node.accept(*this); return true; }
diff --git a/eval/src/vespa/eval/eval/llvm/compiled_function.cpp b/eval/src/vespa/eval/eval/llvm/compiled_function.cpp
index 50a8f731942..bd52b30b708 100644
--- a/eval/src/vespa/eval/eval/llvm/compiled_function.cpp
+++ b/eval/src/vespa/eval/eval/llvm/compiled_function.cpp
@@ -128,6 +128,7 @@ CompiledFunction::detect_issues(const nodes::Node &node)
bool open(const nodes::Node &) override { return true; }
void close(const nodes::Node &node) override {
if (nodes::check_type<nodes::TensorMap,
+ nodes::TensorMapSubspaces,
nodes::TensorJoin,
nodes::TensorMerge,
nodes::TensorReduce,
@@ -139,7 +140,7 @@ CompiledFunction::detect_issues(const nodes::Node &node)
nodes::TensorPeek>(node))
{
issues.push_back(make_string("unsupported node type: %s",
- getClassName(node).c_str()));
+ getClassName(node).c_str()));
}
}
} checker;
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
index 5266aa64b8c..ca95d822be7 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
@@ -456,6 +456,9 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const TensorMap &node) override {
make_error(node.num_children());
}
+ void visit(const TensorMapSubspaces &node) override {
+ make_error(node.num_children());
+ }
void visit(const TensorJoin &node) override {
make_error(node.num_children());
}
diff --git a/eval/src/vespa/eval/eval/make_tensor_function.cpp b/eval/src/vespa/eval/eval/make_tensor_function.cpp
index fe9c9704f6c..0b671a7725e 100644
--- a/eval/src/vespa/eval/eval/make_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/make_tensor_function.cpp
@@ -51,6 +51,12 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
stack.back() = tensor_function::map(a, function, stash);
}
+ void make_map_subspaces(const TensorMapSubspaces &node) {
+ assert(stack.size() >= 1);
+ const auto &a = stack.back().get();
+ stack.back() = tensor_function::map_subspaces(a, node.lambda(), types.export_types(node.lambda().root()), stash);
+ }
+
void make_join(const Node &, operation::op2_t function) {
assert(stack.size() >= 2);
const auto &b = stack.back().get();
@@ -198,6 +204,9 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
make_map(node, token.get()->get().get_function<1>());
}
}
+ void visit(const TensorMapSubspaces &node) override {
+ make_map_subspaces(node);
+ }
void visit(const TensorJoin &node) override {
if (auto op2 = operation::lookup_op2(node.lambda())) {
make_join(node, op2.value());
diff --git a/eval/src/vespa/eval/eval/node_tools.cpp b/eval/src/vespa/eval/eval/node_tools.cpp
index 482111aba8d..20712b833ee 100644
--- a/eval/src/vespa/eval/eval/node_tools.cpp
+++ b/eval/src/vespa/eval/eval/node_tools.cpp
@@ -125,64 +125,65 @@ struct CopyNode : NodeTraverser, NodeVisitor {
}
// tensor nodes
- void visit(const TensorMap &node) override { not_implemented(node); }
- void visit(const TensorJoin &node) override { not_implemented(node); }
- void visit(const TensorMerge &node) override { not_implemented(node); }
- void visit(const TensorReduce &node) override { not_implemented(node); }
- void visit(const TensorRename &node) override { not_implemented(node); }
- void visit(const TensorConcat &node) override { not_implemented(node); }
- void visit(const TensorCellCast &node) override { not_implemented(node); }
- void visit(const TensorCreate &node) override { not_implemented(node); }
- void visit(const TensorLambda &node) override { not_implemented(node); }
- void visit(const TensorPeek &node) override { not_implemented(node); }
+ void visit(const TensorMap &node) override { not_implemented(node); }
+ void visit(const TensorMapSubspaces &node) override { not_implemented(node); }
+ void visit(const TensorJoin &node) override { not_implemented(node); }
+ void visit(const TensorMerge &node) override { not_implemented(node); }
+ void visit(const TensorReduce &node) override { not_implemented(node); }
+ void visit(const TensorRename &node) override { not_implemented(node); }
+ void visit(const TensorConcat &node) override { not_implemented(node); }
+ void visit(const TensorCellCast &node) override { not_implemented(node); }
+ void visit(const TensorCreate &node) override { not_implemented(node); }
+ void visit(const TensorLambda &node) override { not_implemented(node); }
+ void visit(const TensorPeek &node) override { not_implemented(node); }
// operator nodes
- void visit(const Add &node) override { copy_operator(node); }
- void visit(const Sub &node) override { copy_operator(node); }
- void visit(const Mul &node) override { copy_operator(node); }
- void visit(const Div &node) override { copy_operator(node); }
- void visit(const Mod &node) override { copy_operator(node); }
- void visit(const Pow &node) override { copy_operator(node); }
- void visit(const Equal &node) override { copy_operator(node); }
- void visit(const NotEqual &node) override { copy_operator(node); }
- void visit(const Approx &node) override { copy_operator(node); }
- void visit(const Less &node) override { copy_operator(node); }
- void visit(const LessEqual &node) override { copy_operator(node); }
- void visit(const Greater &node) override { copy_operator(node); }
- void visit(const GreaterEqual &node) override { copy_operator(node); }
- void visit(const And &node) override { copy_operator(node); }
- void visit(const Or &node) override { copy_operator(node); }
+ void visit(const Add &node) override { copy_operator(node); }
+ void visit(const Sub &node) override { copy_operator(node); }
+ void visit(const Mul &node) override { copy_operator(node); }
+ void visit(const Div &node) override { copy_operator(node); }
+ void visit(const Mod &node) override { copy_operator(node); }
+ void visit(const Pow &node) override { copy_operator(node); }
+ void visit(const Equal &node) override { copy_operator(node); }
+ void visit(const NotEqual &node) override { copy_operator(node); }
+ void visit(const Approx &node) override { copy_operator(node); }
+ void visit(const Less &node) override { copy_operator(node); }
+ void visit(const LessEqual &node) override { copy_operator(node); }
+ void visit(const Greater &node) override { copy_operator(node); }
+ void visit(const GreaterEqual &node) override { copy_operator(node); }
+ void visit(const And &node) override { copy_operator(node); }
+ void visit(const Or &node) override { copy_operator(node); }
// call nodes
- void visit(const Cos &node) override { copy_call(node); }
- void visit(const Sin &node) override { copy_call(node); }
- void visit(const Tan &node) override { copy_call(node); }
- void visit(const Cosh &node) override { copy_call(node); }
- void visit(const Sinh &node) override { copy_call(node); }
- void visit(const Tanh &node) override { copy_call(node); }
- void visit(const Acos &node) override { copy_call(node); }
- void visit(const Asin &node) override { copy_call(node); }
- void visit(const Atan &node) override { copy_call(node); }
- void visit(const Exp &node) override { copy_call(node); }
- void visit(const Log10 &node) override { copy_call(node); }
- void visit(const Log &node) override { copy_call(node); }
- void visit(const Sqrt &node) override { copy_call(node); }
- void visit(const Ceil &node) override { copy_call(node); }
- void visit(const Fabs &node) override { copy_call(node); }
- void visit(const Floor &node) override { copy_call(node); }
- void visit(const Atan2 &node) override { copy_call(node); }
- void visit(const Ldexp &node) override { copy_call(node); }
- void visit(const Pow2 &node) override { copy_call(node); }
- void visit(const Fmod &node) override { copy_call(node); }
- void visit(const Min &node) override { copy_call(node); }
- void visit(const Max &node) override { copy_call(node); }
- void visit(const IsNan &node) override { copy_call(node); }
- void visit(const Relu &node) override { copy_call(node); }
- void visit(const Sigmoid &node) override { copy_call(node); }
- void visit(const Elu &node) override { copy_call(node); }
- void visit(const Erf &node) override { copy_call(node); }
- void visit(const Bit &node) override { copy_call(node); }
- void visit(const Hamming &node) override { copy_call(node); }
+ void visit(const Cos &node) override { copy_call(node); }
+ void visit(const Sin &node) override { copy_call(node); }
+ void visit(const Tan &node) override { copy_call(node); }
+ void visit(const Cosh &node) override { copy_call(node); }
+ void visit(const Sinh &node) override { copy_call(node); }
+ void visit(const Tanh &node) override { copy_call(node); }
+ void visit(const Acos &node) override { copy_call(node); }
+ void visit(const Asin &node) override { copy_call(node); }
+ void visit(const Atan &node) override { copy_call(node); }
+ void visit(const Exp &node) override { copy_call(node); }
+ void visit(const Log10 &node) override { copy_call(node); }
+ void visit(const Log &node) override { copy_call(node); }
+ void visit(const Sqrt &node) override { copy_call(node); }
+ void visit(const Ceil &node) override { copy_call(node); }
+ void visit(const Fabs &node) override { copy_call(node); }
+ void visit(const Floor &node) override { copy_call(node); }
+ void visit(const Atan2 &node) override { copy_call(node); }
+ void visit(const Ldexp &node) override { copy_call(node); }
+ void visit(const Pow2 &node) override { copy_call(node); }
+ void visit(const Fmod &node) override { copy_call(node); }
+ void visit(const Min &node) override { copy_call(node); }
+ void visit(const Max &node) override { copy_call(node); }
+ void visit(const IsNan &node) override { copy_call(node); }
+ void visit(const Relu &node) override { copy_call(node); }
+ void visit(const Sigmoid &node) override { copy_call(node); }
+ void visit(const Elu &node) override { copy_call(node); }
+ void visit(const Erf &node) override { copy_call(node); }
+ void visit(const Bit &node) override { copy_call(node); }
+ void visit(const Hamming &node) override { copy_call(node); }
// traverse nodes
bool open(const Node &) override { return !error; }
diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp
index c234631984f..767d0f8b28a 100644
--- a/eval/src/vespa/eval/eval/node_types.cpp
+++ b/eval/src/vespa/eval/eval/node_types.cpp
@@ -139,6 +139,29 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
bind(ValueType::error_type(), node, false);
}
void visit(const TensorMap &node) override { resolve_op1(node); }
+ void visit(const TensorMapSubspaces &node) override {
+ const ValueType &in_type = type(node.child());
+ auto outer_type = in_type.strip_indexed_dimensions();
+ auto inner_type = in_type.strip_mapped_dimensions();
+ std::vector<ValueType> arg_type({inner_type});
+ NodeTypes lambda_types(node.lambda(), arg_type);
+ const ValueType &lambda_res = lambda_types.get_type(node.lambda().root());
+ if (lambda_res.is_error()) {
+ import_errors(lambda_types);
+ return fail(node, "lambda function has type errors", false);
+ }
+ if (lambda_res.count_mapped_dimensions() > 0) {
+ return fail(node, fmt("lambda function result contains mapped dimensions: %s",
+ lambda_res.to_spec().c_str()), false);
+ }
+ auto res_type = outer_type.wrap(lambda_res);
+ if (res_type.is_error()) {
+ return fail(node, fmt("lambda result contains dimensions that conflict with input type: %s <-> %s",
+ lambda_res.to_spec().c_str(), in_type.to_spec().c_str()), false);
+ }
+ import_types(lambda_types);
+ bind(res_type, node);
+ }
void visit(const TensorJoin &node) override { resolve_op2(node); }
void visit(const TensorMerge &node) override {
bind(ValueType::merge(type(node.get_child(0)),
@@ -316,6 +339,9 @@ struct TypeExporter : public NodeTraverser {
if (auto lambda = as<TensorLambda>(node)) {
lambda->lambda().root().traverse(*this);
}
+ if (auto map_subspaces = as<TensorMapSubspaces>(node)) {
+ map_subspaces->lambda().root().traverse(*this);
+ }
return true;
}
void close(const Node &node) override {
diff --git a/eval/src/vespa/eval/eval/node_visitor.h b/eval/src/vespa/eval/eval/node_visitor.h
index dcd1486824a..c57253d138c 100644
--- a/eval/src/vespa/eval/eval/node_visitor.h
+++ b/eval/src/vespa/eval/eval/node_visitor.h
@@ -18,74 +18,75 @@ namespace vespalib::eval {
struct NodeVisitor {
// basic nodes
- virtual void visit(const nodes::Number &) = 0;
- virtual void visit(const nodes::Symbol &) = 0;
- virtual void visit(const nodes::String &) = 0;
- virtual void visit(const nodes::In &) = 0;
- virtual void visit(const nodes::Neg &) = 0;
- virtual void visit(const nodes::Not &) = 0;
- virtual void visit(const nodes::If &) = 0;
- virtual void visit(const nodes::Error &) = 0;
+ virtual void visit(const nodes::Number &) = 0;
+ virtual void visit(const nodes::Symbol &) = 0;
+ virtual void visit(const nodes::String &) = 0;
+ virtual void visit(const nodes::In &) = 0;
+ virtual void visit(const nodes::Neg &) = 0;
+ virtual void visit(const nodes::Not &) = 0;
+ virtual void visit(const nodes::If &) = 0;
+ virtual void visit(const nodes::Error &) = 0;
// tensor nodes
- virtual void visit(const nodes::TensorMap &) = 0;
- virtual void visit(const nodes::TensorJoin &) = 0;
- virtual void visit(const nodes::TensorMerge &) = 0;
- virtual void visit(const nodes::TensorReduce &) = 0;
- virtual void visit(const nodes::TensorRename &) = 0;
- virtual void visit(const nodes::TensorConcat &) = 0;
- virtual void visit(const nodes::TensorCellCast &) = 0;
- virtual void visit(const nodes::TensorCreate &) = 0;
- virtual void visit(const nodes::TensorLambda &) = 0;
- virtual void visit(const nodes::TensorPeek &) = 0;
+ virtual void visit(const nodes::TensorMap &) = 0;
+ virtual void visit(const nodes::TensorMapSubspaces &) = 0;
+ virtual void visit(const nodes::TensorJoin &) = 0;
+ virtual void visit(const nodes::TensorMerge &) = 0;
+ virtual void visit(const nodes::TensorReduce &) = 0;
+ virtual void visit(const nodes::TensorRename &) = 0;
+ virtual void visit(const nodes::TensorConcat &) = 0;
+ virtual void visit(const nodes::TensorCellCast &) = 0;
+ virtual void visit(const nodes::TensorCreate &) = 0;
+ virtual void visit(const nodes::TensorLambda &) = 0;
+ virtual void visit(const nodes::TensorPeek &) = 0;
// operator nodes
- virtual void visit(const nodes::Add &) = 0;
- virtual void visit(const nodes::Sub &) = 0;
- virtual void visit(const nodes::Mul &) = 0;
- virtual void visit(const nodes::Div &) = 0;
- virtual void visit(const nodes::Mod &) = 0;
- virtual void visit(const nodes::Pow &) = 0;
- virtual void visit(const nodes::Equal &) = 0;
- virtual void visit(const nodes::NotEqual &) = 0;
- virtual void visit(const nodes::Approx &) = 0;
- virtual void visit(const nodes::Less &) = 0;
- virtual void visit(const nodes::LessEqual &) = 0;
- virtual void visit(const nodes::Greater &) = 0;
- virtual void visit(const nodes::GreaterEqual &) = 0;
- virtual void visit(const nodes::And &) = 0;
- virtual void visit(const nodes::Or &) = 0;
+ virtual void visit(const nodes::Add &) = 0;
+ virtual void visit(const nodes::Sub &) = 0;
+ virtual void visit(const nodes::Mul &) = 0;
+ virtual void visit(const nodes::Div &) = 0;
+ virtual void visit(const nodes::Mod &) = 0;
+ virtual void visit(const nodes::Pow &) = 0;
+ virtual void visit(const nodes::Equal &) = 0;
+ virtual void visit(const nodes::NotEqual &) = 0;
+ virtual void visit(const nodes::Approx &) = 0;
+ virtual void visit(const nodes::Less &) = 0;
+ virtual void visit(const nodes::LessEqual &) = 0;
+ virtual void visit(const nodes::Greater &) = 0;
+ virtual void visit(const nodes::GreaterEqual &) = 0;
+ virtual void visit(const nodes::And &) = 0;
+ virtual void visit(const nodes::Or &) = 0;
// call nodes
- virtual void visit(const nodes::Cos &) = 0;
- virtual void visit(const nodes::Sin &) = 0;
- virtual void visit(const nodes::Tan &) = 0;
- virtual void visit(const nodes::Cosh &) = 0;
- virtual void visit(const nodes::Sinh &) = 0;
- virtual void visit(const nodes::Tanh &) = 0;
- virtual void visit(const nodes::Acos &) = 0;
- virtual void visit(const nodes::Asin &) = 0;
- virtual void visit(const nodes::Atan &) = 0;
- virtual void visit(const nodes::Exp &) = 0;
- virtual void visit(const nodes::Log10 &) = 0;
- virtual void visit(const nodes::Log &) = 0;
- virtual void visit(const nodes::Sqrt &) = 0;
- virtual void visit(const nodes::Ceil &) = 0;
- virtual void visit(const nodes::Fabs &) = 0;
- virtual void visit(const nodes::Floor &) = 0;
- virtual void visit(const nodes::Atan2 &) = 0;
- virtual void visit(const nodes::Ldexp &) = 0;
- virtual void visit(const nodes::Pow2 &) = 0;
- virtual void visit(const nodes::Fmod &) = 0;
- virtual void visit(const nodes::Min &) = 0;
- virtual void visit(const nodes::Max &) = 0;
- virtual void visit(const nodes::IsNan &) = 0;
- virtual void visit(const nodes::Relu &) = 0;
- virtual void visit(const nodes::Sigmoid &) = 0;
- virtual void visit(const nodes::Elu &) = 0;
- virtual void visit(const nodes::Erf &) = 0;
- virtual void visit(const nodes::Bit &) = 0;
- virtual void visit(const nodes::Hamming &) = 0;
+ virtual void visit(const nodes::Cos &) = 0;
+ virtual void visit(const nodes::Sin &) = 0;
+ virtual void visit(const nodes::Tan &) = 0;
+ virtual void visit(const nodes::Cosh &) = 0;
+ virtual void visit(const nodes::Sinh &) = 0;
+ virtual void visit(const nodes::Tanh &) = 0;
+ virtual void visit(const nodes::Acos &) = 0;
+ virtual void visit(const nodes::Asin &) = 0;
+ virtual void visit(const nodes::Atan &) = 0;
+ virtual void visit(const nodes::Exp &) = 0;
+ virtual void visit(const nodes::Log10 &) = 0;
+ virtual void visit(const nodes::Log &) = 0;
+ virtual void visit(const nodes::Sqrt &) = 0;
+ virtual void visit(const nodes::Ceil &) = 0;
+ virtual void visit(const nodes::Fabs &) = 0;
+ virtual void visit(const nodes::Floor &) = 0;
+ virtual void visit(const nodes::Atan2 &) = 0;
+ virtual void visit(const nodes::Ldexp &) = 0;
+ virtual void visit(const nodes::Pow2 &) = 0;
+ virtual void visit(const nodes::Fmod &) = 0;
+ virtual void visit(const nodes::Min &) = 0;
+ virtual void visit(const nodes::Max &) = 0;
+ virtual void visit(const nodes::IsNan &) = 0;
+ virtual void visit(const nodes::Relu &) = 0;
+ virtual void visit(const nodes::Sigmoid &) = 0;
+ virtual void visit(const nodes::Elu &) = 0;
+ virtual void visit(const nodes::Erf &) = 0;
+ virtual void visit(const nodes::Bit &) = 0;
+ virtual void visit(const nodes::Hamming &) = 0;
virtual ~NodeVisitor() {}
};
@@ -95,68 +96,69 @@ struct NodeVisitor {
* of all types not specifically handled.
**/
struct EmptyNodeVisitor : NodeVisitor {
- void visit(const nodes::Number &) override {}
- void visit(const nodes::Symbol &) override {}
- void visit(const nodes::String &) override {}
- void visit(const nodes::In &) override {}
- void visit(const nodes::Neg &) override {}
- void visit(const nodes::Not &) override {}
- void visit(const nodes::If &) override {}
- void visit(const nodes::Error &) override {}
- void visit(const nodes::TensorMap &) override {}
- void visit(const nodes::TensorJoin &) override {}
- void visit(const nodes::TensorMerge &) override {}
- void visit(const nodes::TensorReduce &) override {}
- void visit(const nodes::TensorRename &) override {}
- void visit(const nodes::TensorConcat &) override {}
- void visit(const nodes::TensorCellCast &) override {}
- void visit(const nodes::TensorCreate &) override {}
- void visit(const nodes::TensorLambda &) override {}
- void visit(const nodes::TensorPeek &) override {}
- void visit(const nodes::Add &) override {}
- void visit(const nodes::Sub &) override {}
- void visit(const nodes::Mul &) override {}
- void visit(const nodes::Div &) override {}
- void visit(const nodes::Mod &) override {}
- void visit(const nodes::Pow &) override {}
- void visit(const nodes::Equal &) override {}
- void visit(const nodes::NotEqual &) override {}
- void visit(const nodes::Approx &) override {}
- void visit(const nodes::Less &) override {}
- void visit(const nodes::LessEqual &) override {}
- void visit(const nodes::Greater &) override {}
- void visit(const nodes::GreaterEqual &) override {}
- void visit(const nodes::And &) override {}
- void visit(const nodes::Or &) override {}
- void visit(const nodes::Cos &) override {}
- void visit(const nodes::Sin &) override {}
- void visit(const nodes::Tan &) override {}
- void visit(const nodes::Cosh &) override {}
- void visit(const nodes::Sinh &) override {}
- void visit(const nodes::Tanh &) override {}
- void visit(const nodes::Acos &) override {}
- void visit(const nodes::Asin &) override {}
- void visit(const nodes::Atan &) override {}
- void visit(const nodes::Exp &) override {}
- void visit(const nodes::Log10 &) override {}
- void visit(const nodes::Log &) override {}
- void visit(const nodes::Sqrt &) override {}
- void visit(const nodes::Ceil &) override {}
- void visit(const nodes::Fabs &) override {}
- void visit(const nodes::Floor &) override {}
- void visit(const nodes::Atan2 &) override {}
- void visit(const nodes::Ldexp &) override {}
- void visit(const nodes::Pow2 &) override {}
- void visit(const nodes::Fmod &) override {}
- void visit(const nodes::Min &) override {}
- void visit(const nodes::Max &) override {}
- void visit(const nodes::IsNan &) override {}
- void visit(const nodes::Relu &) override {}
- void visit(const nodes::Sigmoid &) override {}
- void visit(const nodes::Elu &) override {}
- void visit(const nodes::Erf &) override {}
- void visit(const nodes::Bit &) override {}
- void visit(const nodes::Hamming &) override {}
+ void visit(const nodes::Number &) override {}
+ void visit(const nodes::Symbol &) override {}
+ void visit(const nodes::String &) override {}
+ void visit(const nodes::In &) override {}
+ void visit(const nodes::Neg &) override {}
+ void visit(const nodes::Not &) override {}
+ void visit(const nodes::If &) override {}
+ void visit(const nodes::Error &) override {}
+ void visit(const nodes::TensorMap &) override {}
+ void visit(const nodes::TensorMapSubspaces &) override {}
+ void visit(const nodes::TensorJoin &) override {}
+ void visit(const nodes::TensorMerge &) override {}
+ void visit(const nodes::TensorReduce &) override {}
+ void visit(const nodes::TensorRename &) override {}
+ void visit(const nodes::TensorConcat &) override {}
+ void visit(const nodes::TensorCellCast &) override {}
+ void visit(const nodes::TensorCreate &) override {}
+ void visit(const nodes::TensorLambda &) override {}
+ void visit(const nodes::TensorPeek &) override {}
+ void visit(const nodes::Add &) override {}
+ void visit(const nodes::Sub &) override {}
+ void visit(const nodes::Mul &) override {}
+ void visit(const nodes::Div &) override {}
+ void visit(const nodes::Mod &) override {}
+ void visit(const nodes::Pow &) override {}
+ void visit(const nodes::Equal &) override {}
+ void visit(const nodes::NotEqual &) override {}
+ void visit(const nodes::Approx &) override {}
+ void visit(const nodes::Less &) override {}
+ void visit(const nodes::LessEqual &) override {}
+ void visit(const nodes::Greater &) override {}
+ void visit(const nodes::GreaterEqual &) override {}
+ void visit(const nodes::And &) override {}
+ void visit(const nodes::Or &) override {}
+ void visit(const nodes::Cos &) override {}
+ void visit(const nodes::Sin &) override {}
+ void visit(const nodes::Tan &) override {}
+ void visit(const nodes::Cosh &) override {}
+ void visit(const nodes::Sinh &) override {}
+ void visit(const nodes::Tanh &) override {}
+ void visit(const nodes::Acos &) override {}
+ void visit(const nodes::Asin &) override {}
+ void visit(const nodes::Atan &) override {}
+ void visit(const nodes::Exp &) override {}
+ void visit(const nodes::Log10 &) override {}
+ void visit(const nodes::Log &) override {}
+ void visit(const nodes::Sqrt &) override {}
+ void visit(const nodes::Ceil &) override {}
+ void visit(const nodes::Fabs &) override {}
+ void visit(const nodes::Floor &) override {}
+ void visit(const nodes::Atan2 &) override {}
+ void visit(const nodes::Ldexp &) override {}
+ void visit(const nodes::Pow2 &) override {}
+ void visit(const nodes::Fmod &) override {}
+ void visit(const nodes::Min &) override {}
+ void visit(const nodes::Max &) override {}
+ void visit(const nodes::IsNan &) override {}
+ void visit(const nodes::Relu &) override {}
+ void visit(const nodes::Sigmoid &) override {}
+ void visit(const nodes::Elu &) override {}
+ void visit(const nodes::Erf &) override {}
+ void visit(const nodes::Bit &) override {}
+ void visit(const nodes::Hamming &) override {}
};
}
diff --git a/eval/src/vespa/eval/eval/tensor_function.cpp b/eval/src/vespa/eval/eval/tensor_function.cpp
index b258b6c824e..14d486aeb48 100644
--- a/eval/src/vespa/eval/eval/tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/tensor_function.cpp
@@ -12,6 +12,7 @@
#include <vespa/eval/instruction/generic_join.h>
#include <vespa/eval/instruction/generic_lambda.h>
#include <vespa/eval/instruction/generic_map.h>
+#include <vespa/eval/instruction/generic_map_subspaces.h>
#include <vespa/eval/instruction/generic_merge.h>
#include <vespa/eval/instruction/generic_peek.h>
#include <vespa/eval/instruction/generic_reduce.h>
@@ -172,6 +173,20 @@ Map::visit_self(vespalib::ObjectVisitor &visitor) const
//-----------------------------------------------------------------------------
+InterpretedFunction::Instruction
+MapSubspaces::compile_self(const ValueBuilderFactory &factory, Stash &stash) const
+{
+ return instruction::GenericMapSubspaces::make_instruction(*this, factory, stash);
+}
+
+void
+MapSubspaces::visit_self(vespalib::ObjectVisitor &visitor) const
+{
+ Super::visit_self(visitor);
+}
+
+//-----------------------------------------------------------------------------
+
Instruction
Join::compile_self(const ValueBuilderFactory &factory, Stash &stash) const
{
@@ -455,6 +470,11 @@ const TensorFunction &map(const TensorFunction &child, map_fun_t function, Stash
return stash.create<Map>(result_type, child, function);
}
+const TensorFunction &map_subspaces(const TensorFunction &child, const Function &function, NodeTypes node_types, Stash &stash) {
+ auto result_type = child.result_type().strip_indexed_dimensions().wrap(node_types.get_type(function.root()));
+ return stash.create<MapSubspaces>(result_type, child, function, std::move(node_types));
+}
+
const TensorFunction &join(const TensorFunction &lhs, const TensorFunction &rhs, join_fun_t function, Stash &stash) {
ValueType result_type = ValueType::join(lhs.result_type(), rhs.result_type());
return stash.create<Join>(result_type, lhs, rhs, function);
diff --git a/eval/src/vespa/eval/eval/tensor_function.h b/eval/src/vespa/eval/eval/tensor_function.h
index 24548bfae4d..2c703fbdfef 100644
--- a/eval/src/vespa/eval/eval/tensor_function.h
+++ b/eval/src/vespa/eval/eval/tensor_function.h
@@ -249,6 +249,29 @@ public:
//-----------------------------------------------------------------------------
+class MapSubspaces : public Op1
+{
+ using Super = Op1;
+private:
+ ValueType _inner_type;
+ std::shared_ptr<Function const> _lambda;
+ NodeTypes _lambda_types;
+public:
+ MapSubspaces(const ValueType &result_type_in, const TensorFunction &child_in, const Function &lambda_in, NodeTypes lambda_types_in)
+ : Super(result_type_in, child_in),
+ _inner_type(child_in.result_type().strip_mapped_dimensions()),
+ _lambda(lambda_in.shared_from_this()),
+ _lambda_types(std::move(lambda_types_in)) {}
+ const ValueType &inner_type() const { return _inner_type; }
+ const Function &lambda() const { return *_lambda; }
+ const NodeTypes &types() const { return _lambda_types; }
+ bool result_is_mutable() const override { return true; }
+ InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const final override;
+ void visit_self(vespalib::ObjectVisitor &visitor) const override;
+};
+
+//-----------------------------------------------------------------------------
+
class Join : public Op2
{
using Super = Op2;
@@ -463,6 +486,7 @@ const TensorFunction &const_value(const Value &value, Stash &stash);
const TensorFunction &inject(const ValueType &type, size_t param_idx, Stash &stash);
const TensorFunction &reduce(const TensorFunction &child, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash);
const TensorFunction &map(const TensorFunction &child, map_fun_t function, Stash &stash);
+const TensorFunction &map_subspaces(const TensorFunction &child, const Function &function, NodeTypes node_types, Stash &stash);
const TensorFunction &join(const TensorFunction &lhs, const TensorFunction &rhs, join_fun_t function, Stash &stash);
const TensorFunction &merge(const TensorFunction &lhs, const TensorFunction &rhs, join_fun_t function, Stash &stash);
const TensorFunction &concat(const TensorFunction &lhs, const TensorFunction &rhs, const vespalib::string &dimension, Stash &stash);
diff --git a/eval/src/vespa/eval/eval/tensor_nodes.cpp b/eval/src/vespa/eval/eval/tensor_nodes.cpp
index bfcd1f979e2..ef2718234b2 100644
--- a/eval/src/vespa/eval/eval/tensor_nodes.cpp
+++ b/eval/src/vespa/eval/eval/tensor_nodes.cpp
@@ -5,15 +5,16 @@
namespace vespalib::eval::nodes {
-void TensorMap ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorJoin ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorMerge ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorReduce ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorRename ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorConcat ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorCellCast::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorCreate ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorLambda ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
-void TensorPeek ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorMap ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorMapSubspaces::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorJoin ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorMerge ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorReduce ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorRename ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorConcat ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorCellCast ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorCreate ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorLambda ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
+void TensorPeek ::accept(NodeVisitor &visitor) const { visitor.visit(*this); }
}
diff --git a/eval/src/vespa/eval/eval/tensor_nodes.h b/eval/src/vespa/eval/eval/tensor_nodes.h
index 33f9cc5e39c..6ed19e81712 100644
--- a/eval/src/vespa/eval/eval/tensor_nodes.h
+++ b/eval/src/vespa/eval/eval/tensor_nodes.h
@@ -44,6 +44,36 @@ public:
}
};
+class TensorMapSubspaces : public Node {
+private:
+ Node_UP _child;
+ std::shared_ptr<Function const> _lambda;
+public:
+ TensorMapSubspaces(Node_UP child, std::shared_ptr<Function const> lambda)
+ : _child(std::move(child)), _lambda(std::move(lambda)) {}
+ const Node &child() const { return *_child; }
+ const Function &lambda() const { return *_lambda; }
+ vespalib::string dump(DumpContext &ctx) const override {
+ vespalib::string str;
+ str += "map_subspaces(";
+ str += _child->dump(ctx);
+ str += ",";
+ str += _lambda->dump_as_lambda();
+ str += ")";
+ return str;
+ }
+ void accept(NodeVisitor &visitor) const override;
+ size_t num_children() const override { return 1; }
+ const Node &get_child(size_t idx) const override {
+ (void) idx;
+ assert(idx == 0);
+ return *_child;
+ }
+ void detach_children(NodeHandler &handler) override {
+ handler.handle(std::move(_child));
+ }
+};
+
class TensorJoin : public Node {
private:
Node_UP _lhs;
diff --git a/eval/src/vespa/eval/eval/tensor_spec.cpp b/eval/src/vespa/eval/eval/tensor_spec.cpp
index c9401606600..323f9eaf0fe 100644
--- a/eval/src/vespa/eval/eval/tensor_spec.cpp
+++ b/eval/src/vespa/eval/eval/tensor_spec.cpp
@@ -7,6 +7,7 @@
#include "value.h"
#include "value_codec.h"
#include "value_type.h"
+#include <vespa/vespalib/util/require.h>
#include <vespa/vespalib/util/overload.h>
#include <vespa/vespalib/util/visit_ranges.h>
#include <vespa/vespalib/util/stringfmt.h>
@@ -182,19 +183,19 @@ struct NormalizeTensorSpec {
size_t dense_key = 0;
auto binding = entry.first.begin();
for (const auto &dim : type.dimensions()) {
- assert(binding != entry.first.end());
- assert(dim.name == binding->first);
- assert(dim.is_mapped() == binding->second.is_mapped());
+ REQUIRE(binding != entry.first.end());
+ REQUIRE(dim.name == binding->first);
+ REQUIRE(dim.is_mapped() == binding->second.is_mapped());
if (dim.is_mapped()) {
sparse_key.push_back(binding->second.name);
} else {
- assert(binding->second.index < dim.size);
+ REQUIRE(binding->second.index < dim.size);
dense_key = (dense_key * dim.size) + binding->second.index;
}
++binding;
}
- assert(binding == entry.first.end());
- assert(dense_key < map.values_per_entry());
+ REQUIRE(binding == entry.first.end());
+ REQUIRE(dense_key < map.values_per_entry());
auto [tag, ignore] = map.lookup_or_add_entry(ConstArrayRef<vespalib::stringref>(sparse_key));
map.get_values(tag)[dense_key] = entry.second;
}
@@ -212,7 +213,7 @@ struct NormalizeTensorSpec {
address.emplace(dim.name, *sparse_addr_iter++);
}
}
- assert(sparse_addr_iter == keys.end());
+ REQUIRE(sparse_addr_iter == keys.end());
for (size_t i = 0; i < values.size(); ++i) {
size_t dense_key = i;
for (auto dim = type.dimensions().rbegin();
@@ -364,7 +365,12 @@ TensorSpec::normalize() const
if (my_type.is_error()) {
return TensorSpec(my_type.to_spec());
}
- return typify_invoke<1,TypifyCellType,NormalizeTensorSpec>(my_type.cell_type(), my_type, *this);
+ try {
+ return typify_invoke<1,TypifyCellType,NormalizeTensorSpec>(my_type.cell_type(), my_type, *this);
+ } catch (RequireFailedException &e) {
+ fprintf(stderr, "TensorSpec::normalize: invalid spec: %s\n", to_string().c_str());
+ assert(false); // preserve crashing behavior
+ }
}
vespalib::string
diff --git a/eval/src/vespa/eval/eval/test/eval_spec.cpp b/eval/src/vespa/eval/eval/test/eval_spec.cpp
index af88b2a526a..72664168114 100644
--- a/eval/src/vespa/eval/eval/test/eval_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_spec.cpp
@@ -198,6 +198,8 @@ void
EvalSpec::add_tensor_operation_cases() {
add_rule({"a", -1.0, 1.0}, "map(a,f(x)(sin(x)))", [](double x){ return std::sin(x); });
add_rule({"a", -1.0, 1.0}, "map(a,f(x)(x*x*3))", [](double x){ return ((x * x) * 3); });
+ add_rule({"a", -1.0, 1.0}, "map_subspaces(a,f(x)(sin(x)))", [](double x){ return std::sin(x); });
+ add_rule({"a", -1.0, 1.0}, "map_subspaces(a,f(x)(x*x*3))", [](double x){ return ((x * x) * 3); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "join(a,b,f(x,y)(x+y))", [](double x, double y){ return (x + y); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "join(a,b,f(x,y)(x*y*3))", [](double x, double y){ return ((x * y) * 3); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "merge(a,b,f(x,y)(x+y))", [](double x, double y){ return (x + y); });
diff --git a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
index 9bfa314493a..5a1fd2041dd 100644
--- a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
@@ -136,6 +136,13 @@ struct EvalNode : public NodeVisitor {
result = ReferenceOperations::peek(spec, children);
}
+ void eval_map_subspaces(const Node &node, const Node &lambda) {
+ auto fun = [&](const TensorSpec &subspace) {
+ return eval_node(lambda, {subspace});
+ };
+ result = ReferenceOperations::map_subspaces(eval_node(node, params), fun);
+ }
+
//-------------------------------------------------------------------------
void visit(const Number &node) override {
@@ -176,6 +183,9 @@ struct EvalNode : public NodeVisitor {
};
eval_map(node.child(), my_op1);
}
+ void visit(const TensorMapSubspaces &node) override {
+ eval_map_subspaces(node.child(), node.lambda().root());
+ }
void visit(const TensorJoin &node) override {
auto my_op2 = [&](double a, double b) {
return ReferenceEvaluation::eval(node.lambda(), {num(a), num(b)}).as_double();
diff --git a/eval/src/vespa/eval/eval/test/reference_operations.cpp b/eval/src/vespa/eval/eval/test/reference_operations.cpp
index 5d79f168aaa..6771eda91e3 100644
--- a/eval/src/vespa/eval/eval/test/reference_operations.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_operations.cpp
@@ -176,6 +176,52 @@ TensorSpec ReferenceOperations::map(const TensorSpec &in_a, map_fun_t func) {
}
+TensorSpec ReferenceOperations::map_subspaces(const TensorSpec &a, map_subspace_fun_t fun) {
+ auto type = ValueType::from_spec(a.type());
+ auto outer_type = type.strip_indexed_dimensions();
+ auto inner_type = type.strip_mapped_dimensions();
+ auto inner_type_str = inner_type.to_spec();
+ auto lambda_res_type = ValueType::from_spec(fun(TensorSpec(inner_type_str).normalize()).type());
+ auto res_type = outer_type.wrap(lambda_res_type);
+ auto split = [](const auto &addr) {
+ TensorSpec::Address outer;
+ TensorSpec::Address inner;
+ for (const auto &[name, label]: addr) {
+ if (label.is_mapped()) {
+ outer.insert_or_assign(name, label);
+ } else {
+ inner.insert_or_assign(name, label);
+ }
+ }
+ return std::make_pair(outer, inner);
+ };
+ auto combine = [](const auto &outer, const auto &inner) {
+ TensorSpec::Address addr;
+ for (const auto &[name, label]: outer) {
+ addr.insert_or_assign(name, label);
+ }
+ for (const auto &[name, label]: inner) {
+ addr.insert_or_assign(name, label);
+ }
+ return addr;
+ };
+ std::map<TensorSpec::Address,TensorSpec> subspaces;
+ for (const auto &[addr, value]: a.cells()) {
+ auto [outer, inner] = split(addr);
+ auto &subspace = subspaces.try_emplace(outer, inner_type_str).first->second;
+ subspace.add(inner, value);
+ }
+ TensorSpec result(res_type.to_spec());
+ for (const auto &[outer, subspace]: subspaces) {
+ auto mapped = fun(subspace);
+ for (const auto &[inner, value]: mapped.cells()) {
+ result.add(combine(outer, inner), value);
+ }
+ }
+ return result.normalize();
+}
+
+
TensorSpec ReferenceOperations::merge(const TensorSpec &in_a, const TensorSpec &in_b, join_fun_t fun) {
auto a = in_a.normalize();
auto b = in_b.normalize();
diff --git a/eval/src/vespa/eval/eval/test/reference_operations.h b/eval/src/vespa/eval/eval/test/reference_operations.h
index 85aa73ec958..dd9c4b143ed 100644
--- a/eval/src/vespa/eval/eval/test/reference_operations.h
+++ b/eval/src/vespa/eval/eval/test/reference_operations.h
@@ -19,6 +19,7 @@ struct ReferenceOperations {
using map_fun_t = std::function<double(double)>;
using join_fun_t = std::function<double(double,double)>;
using lambda_fun_t = std::function<double(const std::vector<size_t> &dimension_indexes)>;
+ using map_subspace_fun_t = std::function<TensorSpec(const TensorSpec &subspace)>;
// mapping from cell address to index of child that computes the cell value
using CreateSpec = tensor_function::Create::Spec;
@@ -33,6 +34,7 @@ struct ReferenceOperations {
static TensorSpec create(const vespalib::string &type, const CreateSpec &spec, const std::vector<TensorSpec> &children);
static TensorSpec join(const TensorSpec &a, const TensorSpec &b, join_fun_t function);
static TensorSpec map(const TensorSpec &a, map_fun_t func);
+ static TensorSpec map_subspaces(const TensorSpec &a, map_subspace_fun_t fun);
static TensorSpec merge(const TensorSpec &a, const TensorSpec &b, join_fun_t fun);
static TensorSpec peek(const PeekSpec &spec, const std::vector<TensorSpec> &children);
static TensorSpec reduce(const TensorSpec &a, Aggr aggr, const std::vector<vespalib::string> &dims);
diff --git a/eval/src/vespa/eval/eval/value_type.cpp b/eval/src/vespa/eval/eval/value_type.cpp
index 1a83de9b0f9..fe70622de4e 100644
--- a/eval/src/vespa/eval/eval/value_type.cpp
+++ b/eval/src/vespa/eval/eval/value_type.cpp
@@ -138,6 +138,25 @@ struct Renamer {
bool matched_all() const { return (match_cnt == from.size()); }
};
+auto filter(const std::vector<Dimension> &dims, auto keep) {
+ std::vector<Dimension> result;
+ result.reserve(dims.size());
+ for (const auto &dim: dims) {
+ if (keep(dim)) {
+ result.push_back(dim);
+ }
+ }
+ return result;
+}
+
+auto strip(CellType old_cell_type, const std::vector<Dimension> &old_dims, auto discard) {
+ auto new_dims = filter(old_dims, [discard](const auto &dim){ return !discard(dim); });
+ if (new_dims.empty()) {
+ return ValueType::double_type();
+ }
+ return ValueType::make_type(old_cell_type, std::move(new_dims));
+}
+
} // namespace vespalib::eval::<unnamed>
constexpr ValueType::Dimension::size_type ValueType::Dimension::npos;
@@ -245,37 +264,19 @@ ValueType::dense_subspace_size() const
std::vector<ValueType::Dimension>
ValueType::nontrivial_indexed_dimensions() const
{
- std::vector<ValueType::Dimension> result;
- for (const auto &dim: dimensions()) {
- if (dim.is_indexed() && !dim.is_trivial()) {
- result.push_back(dim);
- }
- }
- return result;
+ return filter(_dimensions, [](const auto &dim){ return !dim.is_trivial() && dim.is_indexed(); });
}
std::vector<ValueType::Dimension>
ValueType::indexed_dimensions() const
{
- std::vector<ValueType::Dimension> result;
- for (const auto &dim: dimensions()) {
- if (dim.is_indexed()) {
- result.push_back(dim);
- }
- }
- return result;
+ return filter(_dimensions, [](const auto &dim){ return dim.is_indexed(); });
}
std::vector<ValueType::Dimension>
ValueType::mapped_dimensions() const
{
- std::vector<ValueType::Dimension> result;
- for (const auto &dim: dimensions()) {
- if (dim.is_mapped()) {
- result.push_back(dim);
- }
- }
- return result;
+ return filter(_dimensions, [](const auto &dim){ return dim.is_mapped(); });
}
size_t
@@ -312,6 +313,31 @@ ValueType::dimension_names() const
}
ValueType
+ValueType::strip_mapped_dimensions() const
+{
+ return error_if(_error, strip(_cell_type, _dimensions,
+ [](const auto &dim){ return dim.is_mapped(); }));
+}
+
+ValueType
+ValueType::strip_indexed_dimensions() const
+{
+ return error_if(_error, strip(_cell_type, _dimensions,
+ [](const auto &dim){ return dim.is_indexed(); }));
+}
+
+ValueType
+ValueType::wrap(const ValueType &inner)
+{
+ MyJoin result(_dimensions, inner._dimensions);
+ auto meta = cell_meta().wrap(inner.cell_meta());
+ return error_if(_error || inner._error || result.mismatch ||
+ (count_indexed_dimensions() > 0) ||
+ (inner.count_mapped_dimensions() > 0),
+ make_type(meta.cell_type, std::move(result.dimensions)));
+}
+
+ValueType
ValueType::map() const
{
auto meta = cell_meta().map();
diff --git a/eval/src/vespa/eval/eval/value_type.h b/eval/src/vespa/eval/eval/value_type.h
index b35e23ee4e6..a65dde398c2 100644
--- a/eval/src/vespa/eval/eval/value_type.h
+++ b/eval/src/vespa/eval/eval/value_type.h
@@ -81,6 +81,9 @@ public:
}
bool operator!=(const ValueType &rhs) const noexcept { return !(*this == rhs); }
+ ValueType strip_mapped_dimensions() const;
+ ValueType strip_indexed_dimensions() const;
+ ValueType wrap(const ValueType &inner);
ValueType map() const;
ValueType reduce(const std::vector<vespalib::string> &dimensions_in) const;
ValueType peek(const std::vector<vespalib::string> &dimensions_in) const;
diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt
index 22fa58a08fc..67203b6c7c8 100644
--- a/eval/src/vespa/eval/instruction/CMakeLists.txt
+++ b/eval/src/vespa/eval/instruction/CMakeLists.txt
@@ -24,6 +24,7 @@ vespa_add_library(eval_instruction OBJECT
generic_join.cpp
generic_lambda.cpp
generic_map.cpp
+ generic_map_subspaces.cpp
generic_merge.cpp
generic_peek.cpp
generic_reduce.cpp
diff --git a/eval/src/vespa/eval/instruction/generic_map_subspaces.cpp b/eval/src/vespa/eval/instruction/generic_map_subspaces.cpp
new file mode 100644
index 00000000000..1238d4f4e57
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/generic_map_subspaces.cpp
@@ -0,0 +1,118 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "generic_map_subspaces.h"
+
+using namespace vespalib::eval::tensor_function;
+
+namespace vespalib::eval::instruction {
+
+using Instruction = InterpretedFunction::Instruction;
+using State = InterpretedFunction::State;
+
+namespace {
+
+//-----------------------------------------------------------------------------
+
+struct InterpretedParams {
+ const ValueType &result_type;
+ const ValueType &inner_type;
+ InterpretedFunction fun;
+ size_t in_size;
+ size_t out_size;
+ bool direct_in;
+ bool direct_out;
+ InterpretedParams(const MapSubspaces &map_subspaces, const ValueBuilderFactory &factory)
+ : result_type(map_subspaces.result_type()),
+ inner_type(map_subspaces.inner_type()),
+ fun(factory, map_subspaces.lambda().root(), map_subspaces.types()),
+ in_size(inner_type.dense_subspace_size()),
+ out_size(result_type.dense_subspace_size()),
+ direct_in(map_subspaces.child().result_type().cell_type() == inner_type.cell_type()),
+ direct_out(map_subspaces.types().get_type(map_subspaces.lambda().root()).cell_type() == result_type.cell_type())
+ {
+ assert(direct_in || (in_size == 1));
+ assert(direct_out || (out_size == 1));
+ }
+};
+
+struct ParamView final : Value, LazyParams {
+ const ValueType &my_type;
+ TypedCells my_cells;
+ double value;
+ bool direct;
+public:
+ ParamView(const ValueType &type_in, bool direct_in)
+ : my_type(type_in), my_cells(), value(0.0), direct(direct_in) {}
+ const ValueType &type() const final override { return my_type; }
+ template <typename ICT>
+ void adjust(const ICT *cells, size_t size) {
+ if (direct) {
+ my_cells = TypedCells(cells, get_cell_type<ICT>(), size);
+ } else {
+ value = cells[0];
+ my_cells = TypedCells(&value, CellType::DOUBLE, 1);
+ }
+ }
+ TypedCells cells() const final override { return my_cells; }
+ const Index &index() const final override { return TrivialIndex::get(); }
+ MemoryUsage get_memory_usage() const final override { return self_memory_usage<ParamView>(); }
+ const Value &resolve(size_t, Stash &) const final override { return *this; }
+};
+
+template <typename OCT>
+struct ResultFiller {
+ OCT *dst;
+ bool direct;
+public:
+ ResultFiller(OCT *dst_in, bool direct_out)
+ : dst(dst_in), direct(direct_out) {}
+ void fill(const Value &value) {
+ if (direct) {
+ auto cells = value.cells();
+ memcpy(dst, cells.data, sizeof(OCT) * cells.size);
+ dst += cells.size;
+ } else {
+ *dst++ = value.as_double();
+ }
+ }
+};
+
+template <typename ICT, typename OCT>
+void my_generic_map_subspaces_op(InterpretedFunction::State &state, uint64_t param) {
+ const InterpretedParams &params = unwrap_param<InterpretedParams>(param);
+ InterpretedFunction::Context ctx(params.fun);
+ const Value &input = state.peek(0);
+ const ICT *src = input.cells().typify<ICT>().data();
+ size_t num_subspaces = input.index().size();
+ auto res_cells = state.stash.create_uninitialized_array<OCT>(num_subspaces * params.out_size);
+ ResultFiller result_filler(res_cells.data(), params.direct_out);
+ ParamView param_view(params.inner_type, params.direct_in);
+ for (size_t i = 0; i < num_subspaces; ++i) {
+ param_view.adjust(src, params.in_size);
+ src += params.in_size;
+ result_filler.fill(params.fun.eval(ctx, param_view));
+ }
+ state.pop_push(state.stash.create<ValueView>(params.result_type, input.index(), TypedCells(res_cells)));
+}
+
+struct SelectGenericMapSubspacesOp {
+ template <typename ICT, typename OCT> static auto invoke() {
+ return my_generic_map_subspaces_op<ICT,OCT>;
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+} // namespace <unnamed>
+
+Instruction
+GenericMapSubspaces::make_instruction(const tensor_function::MapSubspaces &map_subspaces_in,
+ const ValueBuilderFactory &factory, Stash &stash)
+{
+ InterpretedParams &params = stash.create<InterpretedParams>(map_subspaces_in, factory);
+ auto op = typify_invoke<2,TypifyCellType,SelectGenericMapSubspacesOp>(map_subspaces_in.child().result_type().cell_type(),
+ params.result_type.cell_type());
+ return Instruction(op, wrap_param<InterpretedParams>(params));
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/generic_map_subspaces.h b/eval/src/vespa/eval/instruction/generic_map_subspaces.h
new file mode 100644
index 00000000000..f95ded60a1b
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/generic_map_subspaces.h
@@ -0,0 +1,17 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/eval/value_type.h>
+
+namespace vespalib::eval::instruction {
+
+struct GenericMapSubspaces {
+ static InterpretedFunction::Instruction
+ make_instruction(const tensor_function::MapSubspaces &map_subspaces_in,
+ const ValueBuilderFactory &factory, Stash &stash);
+};
+
+} // namespace
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 9c2669eebe0..d701a35c2ef 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -423,6 +423,15 @@ public class Flags {
"Whether to send cloud trial email notifications",
"Takes effect immediately");
+ public static final UnboundLongFlag MERGING_MAX_MEMORY_USAGE_PER_NODE = defineLongFlag(
+ "merging-max-memory-usage-per-node", -1,
+ List.of("vekterli"), "2023-11-03", "2024-03-01",
+ "Soft limit of the maximum amount of memory that can be used across merge operations on a content node. " +
+ "Value semantics: < 0: unlimited (legacy behavior), == 0: auto-deduced from node HW and config," +
+ " > 0: explicit memory usage limit in bytes.",
+ "Takes effect at redeployment",
+ INSTANCE_ID);
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java b/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
index 3f6b20ccfa4..66356d979a4 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
@@ -41,9 +41,16 @@ public class SharedHost {
return resources.isEmpty() ? null : resources;
}
+ /** Whether there are any shared hosts specifically for the given cluster type, or without a cluster type restriction. */
@JsonIgnore
- public boolean isEnabled(String clusterType) {
- return resources.stream().anyMatch(hr -> hr.satisfiesClusterType(clusterType));
+ public boolean supportsClusterType(String clusterType) {
+ return resources.stream().anyMatch(resource -> resource.clusterType().map(clusterType::equalsIgnoreCase).orElse(true));
+ }
+
+ /** Whether there are any shared hosts specifically for the given cluster type. */
+ @JsonIgnore
+ public boolean hasClusterType(String clusterType) {
+ return resources.stream().anyMatch(resource -> resource.clusterType().map(clusterType::equalsIgnoreCase).orElse(false));
}
@JsonIgnore
diff --git a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
index db7b7fec494..f52147c1e17 100644
--- a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java
@@ -96,6 +96,7 @@ public enum StorageMetrics implements VespaMetrics {
VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME("vds.mergethrottler.averagequeuewaitingtime", Unit.MILLISECOND, "Time merges spent in the throttler queue"),
VDS_MERGETHROTTLER_QUEUESIZE("vds.mergethrottler.queuesize", Unit.INSTANCE, "Length of merge queue"),
VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE("vds.mergethrottler.active_window_size", Unit.INSTANCE, "Number of merges active within the pending window size"),
+ VDS_MERGETHROTTLER_ESTIMATED_MERGE_MEMORY_USAGE("vds.mergethrottler.estimated_merge_memory_usage", Unit.BYTE, "An estimated upper bound of the memory usage (in bytes) of the merges currently in the active window"),
VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE("vds.mergethrottler.bounced_due_to_back_pressure", Unit.INSTANCE, "Number of merges bounced due to resource exhaustion back-pressure"),
VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK("vds.mergethrottler.locallyexecutedmerges.ok", Unit.INSTANCE, "The number of successful merges for 'locallyexecutedmerges'"),
VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_ABORTED("vds.mergethrottler.locallyexecutedmerges.failures.aborted", Unit.OPERATION, "The number of merges that failed because the storage node was (most likely) shutting down"),
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
index 4174aa6cb53..b0286fd6e0f 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
@@ -530,6 +530,7 @@ public class Vespa9VespaMetricSet {
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ESTIMATED_MERGE_MEMORY_USAGE, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate());
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate());
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate());
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
index a6ac18cf011..33f137c2b98 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
@@ -604,6 +604,7 @@ public class VespaMetricSet {
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ESTIMATED_MERGE_MEMORY_USAGE, EnumSet.of(max, sum, count));
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate());
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate());
addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 449e1c07bf8..dfbe41e31d7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -201,6 +201,11 @@ public class NodeRepository extends AbstractComponent {
/** The number of nodes we should ensure has free capacity for node failures whenever possible */
public int spareCount() { return spareCount; }
+ /** Returns whether nodes must be allocated to hosts that are exclusive to the cluster type. */
+ public boolean exclusiveClusterType(ClusterSpec cluster) {
+ return sharedHosts.value().hasClusterType(cluster.type().name());
+ }
+
/**
* Returns whether nodes are allocated exclusively in this instance given this cluster spec.
* Exclusive allocation requires that the wanted node resources matches the advertised resources of the node
@@ -209,7 +214,7 @@ public class NodeRepository extends AbstractComponent {
public boolean exclusiveAllocation(ClusterSpec clusterSpec) {
return clusterSpec.isExclusive() ||
( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
- ( !zone().cloud().allowHostSharing() && !sharedHosts.value().isEnabled(clusterSpec.type().name()));
+ ( !zone().cloud().allowHostSharing() && !sharedHosts.value().supportsClusterType(clusterSpec.type().name()));
}
/** Whether the nodes of this cluster must be running on hosts that are specifically provisioned for the application. */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 8e8474c6a6d..738abddc31a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -78,6 +78,9 @@ public class Autoscaler {
if (target.isEmpty())
return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", model);
+ if (target.get().nodes() == 1)
+ return Autoscaling.dontScale(Status.unavailable, "Autoscaling is disabled in single node clusters", model);
+
if (! worthRescaling(model.current().realResources(), target.get().realResources())) {
if (target.get().fulfilment() < 0.9999999)
return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents ideal scaling of this cluster", model);
@@ -86,7 +89,6 @@ public class Autoscaler {
else
return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled (within configured limits)", model);
}
-
return Autoscaling.scaleTo(target.get().advertisedResources(), model);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 3c42972ee0b..108f8d77837 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -301,6 +301,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
.stream()
.filter(node -> node.violatesExclusivity(cluster,
application,
+ nodeRepository().exclusiveClusterType(cluster),
nodeRepository().exclusiveAllocation(cluster),
false,
nodeRepository().zone().cloud().allowHostSharing(),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
index 01da75f90b6..b6897d5b1c9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
@@ -14,9 +14,11 @@ import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
import java.time.Duration;
+import java.util.HashSet;
import java.util.Objects;
import java.util.Optional;
import java.util.Random;
+import java.util.Set;
import java.util.function.Predicate;
import java.util.logging.Level;
@@ -63,13 +65,15 @@ public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
NodeList activeNodes = allNodes.nodeType(NodeType.tenant)
.state(Node.State.active)
.shuffle(random); // Shuffle to avoid getting stuck trying to upgrade the same host
+ Set<String> exhaustedFlavors = new HashSet<>();
for (var node : activeNodes) {
Optional<Node> parent = allNodes.parentOf(node);
if (parent.isEmpty()) continue;
+ if (exhaustedFlavors.contains(parent.get().flavor().name())) continue;
Allocation allocation = node.allocation().get();
Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(resources, allocation.owner(), allocation.membership().cluster());
if (!hostProvisioner.canUpgradeFlavor(parent.get(), node, realHostResourcesWithinLimits)) continue;
- if (parent.get().status().wantToUpgradeFlavor()) continue; // Already upgrading
+ if (parent.get().status().wantToUpgradeFlavor() && allocation.membership().retired()) continue; // Already upgrading
boolean redeployed = false;
boolean deploymentValid = false;
@@ -85,6 +89,7 @@ public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
return 1.0;
} catch (NodeAllocationException e) {
// Fine, no capacity for upgrade
+ exhaustedFlavors.add(parent.get().flavor().name());
} finally {
if (deploymentValid && !redeployed) { // Cancel upgrade if redeploy failed
upgradeFlavor(parent.get(), false);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index e1be5b48e2d..21340baf273 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -198,6 +198,7 @@ class NodeAllocation {
private NodeCandidate.ExclusivityViolation violatesExclusivity(NodeCandidate candidate) {
return candidate.violatesExclusivity(cluster, application,
+ nodeRepository.exclusiveClusterType(cluster),
nodeRepository.exclusiveAllocation(cluster),
nodeRepository.exclusiveProvisioning(cluster),
nodeRepository.zone().cloud().allowHostSharing(), allNodes, makeExclusive);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 1547a266e15..8c29b40bc26 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -595,7 +595,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
}
public ExclusivityViolation violatesExclusivity(ClusterSpec cluster, ApplicationId application,
- boolean exclusiveAllocation, boolean exclusiveProvisioning,
+ boolean exclusiveClusterType, boolean exclusiveAllocation, boolean exclusiveProvisioning,
boolean hostSharing, NodeList allNodes, boolean makeExclusive) {
if (parentHostname().isEmpty()) return ExclusivityViolation.NONE;
if (type() != NodeType.tenant) return ExclusivityViolation.NONE;
@@ -614,6 +614,10 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
if ( ! emptyOrEqual(parent.flatMap(Node::exclusiveToClusterType), cluster.type()))
return ExclusivityViolation.YES;
+ // this cluster requires a parent that was provisioned exclusively for this cluster type
+ if (exclusiveClusterType && parent.flatMap(Node::exclusiveToClusterType).isEmpty() && makeExclusive)
+ return ExclusivityViolation.YES;
+
// the parent is provisioned for another application
if ( ! emptyOrEqual(parent.flatMap(Node::provisionedForApplicationId), application))
return ExclusivityViolation.YES;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 37e1390a673..d4d34ab66e5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -843,9 +843,7 @@ public class AutoscalingTest {
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 200);
- fixture.tester().assertResources("Scale only to a single node and group since this is dev",
- 1, 1, 0.1, 22.9, 105.2,
- fixture.autoscale());
+ assertEquals("Don't autoscale: Autoscaling is disabled in single node clusters", fixture.autoscale().toString());
}
/** Same setup as test_autoscaling_in_dev(), just with required = true */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java
index e4c12ede1d9..64c0135bd8e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgraderTest.java
@@ -68,12 +68,16 @@ class HostFlavorUpgraderTest {
.matching(node -> node.status().wantToUpgradeFlavor() || node.status().wantToRetire()),
"No hosts marked for upgrade or retirement");
- // First provision request fails, but second succeeds and a replacement host starts provisioning
+ // First provision request fails, but we only try once for the same flavor
hostProvisioner.with(Behaviour.failProvisionRequest, 1);
assertEquals(1, upgrader.maintain());
NodeList nodes = tester.nodeRepository().nodes().list();
- NodeList upgradingFlavor = nodes.matching(node -> node.status().wantToRetire() &&
- node.status().wantToUpgradeFlavor());
+ assertEquals(0, nodes.matching(node -> node.status().wantToUpgradeFlavor()).size());
+
+ // Second succeeds and a replacement host starts provisioning
+ assertEquals(1, upgrader.maintain());
+ nodes = tester.nodeRepository().nodes().list();
+ NodeList upgradingFlavor = nodes.matching(node -> node.status().wantToUpgradeFlavor());
assertEquals(1, upgradingFlavor.size());
assertEquals(1, nodes.state(Node.State.provisioned).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index 5927cb43c3a..abcef421b4c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -40,9 +40,14 @@ import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
+import static com.yahoo.config.provision.ClusterSpec.Type.container;
+import static com.yahoo.config.provision.ClusterSpec.Type.content;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
import static com.yahoo.config.provision.NodeResources.StorageType.local;
import static com.yahoo.config.provision.NodeResources.StorageType.remote;
+import static com.yahoo.vespa.hosted.provision.Node.State.active;
+import static com.yahoo.vespa.hosted.provision.Node.State.dirty;
+import static com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester.applicationId;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -60,35 +65,35 @@ public class DynamicProvisioningTest {
var tester = tester(true);
assertEquals(0, tester.nodeRepository().nodes().list().size());
- ApplicationId application1 = ProvisioningTester.applicationId("application1");
+ ApplicationId application1 = applicationId("application1");
NodeResources resources = new NodeResources(1, 4, 10, 1);
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources, tester);
// Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes
assertEquals(8, tester.nodeRepository().nodes().list().size());
- assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
+ assertEquals(4, tester.nodeRepository().nodes().list(active).nodeType(NodeType.host).size());
assertEquals(Set.of("host100-1", "host101-1", "host102-1", "host103-1"),
- tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames());
+ tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).hostnames());
// Deploy new application
- ApplicationId application2 = ProvisioningTester.applicationId("application2");
+ ApplicationId application2 = applicationId("application2");
prepareAndActivate(application2, clusterSpec("mycluster"), 4, 1, resources, tester);
// Total of 12 nodes should now be in node-repo, 4 active hosts and 8 active nodes
assertEquals(12, tester.nodeRepository().nodes().list().size());
- assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
+ assertEquals(4, tester.nodeRepository().nodes().list(active).nodeType(NodeType.host).size());
assertEquals(Set.of("host100-1", "host100-2", "host101-1", "host101-2", "host102-1", "host102-2", "host103-1", "host103-2"),
- tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames());
+ tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).hostnames());
// Deploy new exclusive application
- ApplicationId application3 = ProvisioningTester.applicationId("application3");
+ ApplicationId application3 = applicationId("application3");
NodeResources exclusiveResources = new NodeResources(2, 10, 20, 1);
prepareAndActivate(application3, clusterSpec("mycluster", true), 4, 1, exclusiveResources, tester);
// Total of 20 nodes should now be in node-repo, 8 active hosts and 12 active nodes
assertEquals(20, tester.nodeRepository().nodes().list().size());
- assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
- assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository().nodes().list(active).nodeType(NodeType.host).size());
+ assertEquals(12, tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).size());
}
@Test
@@ -98,15 +103,15 @@ public class DynamicProvisioningTest {
NodeResources initialResources = new NodeResources(4, 80, 100, 1);
NodeResources smallResources = new NodeResources(2, 20, 50, 1);
- ApplicationId application1 = ProvisioningTester.applicationId();
+ ApplicationId application1 = applicationId();
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, initialResources, tester);
- ApplicationId application2 = ProvisioningTester.applicationId();
+ ApplicationId application2 = applicationId();
prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, initialResources, tester);
// Total of 16 nodes should now be in node-repo, 8 active hosts and 8 active nodes
assertEquals(16, tester.nodeRepository().nodes().list().size());
- assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).size());
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallResources, tester);
prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, smallResources, tester);
@@ -114,8 +119,8 @@ public class DynamicProvisioningTest {
// 24 nodes: 4 shared hosts with 4 app1 nodes + 8 exclusive hosts with 8 nodes of app2, 4 of which are retired
NodeList nodes = tester.nodeRepository().nodes().list();
assertEquals(24, nodes.size());
- assertEquals(12, nodes.nodeType(NodeType.host).state(Node.State.active).size());
- assertEquals(12, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(12, nodes.nodeType(NodeType.host).state(active).size());
+ assertEquals(12, nodes.nodeType(NodeType.tenant).state(active).size());
assertEquals(4, nodes.retired().size());
}
@@ -126,17 +131,17 @@ public class DynamicProvisioningTest {
NodeResources highResources = new NodeResources(4, 80, 100, 1);
NodeResources lowResources = new NodeResources(2, 20, 50, 1);
- ApplicationId application = ProvisioningTester.applicationId();
+ ApplicationId application = applicationId();
prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
// Total of 4 nodes should now be in node-repo, 2 active hosts and 2 active nodes.
assertEquals(4, tester.nodeRepository().nodes().list().size());
- assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(2, tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).size());
// Redeploying the application causes no changes at all.
prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
assertEquals(4, tester.nodeRepository().nodes().list().size());
- assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(2, tester.nodeRepository().nodes().list(active).nodeType(NodeType.tenant).size());
// Deploying with a smaller node flavour causes new, smaller hosts to be provisioned.
prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, lowResources, tester);
@@ -144,8 +149,8 @@ public class DynamicProvisioningTest {
// Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes, of which 2 are retired.
NodeList nodes = tester.nodeRepository().nodes().list();
assertEquals(8, nodes.size());
- assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
- assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(active).size());
assertEquals(2, nodes.retired().size());
// Remove the child nodes, and redeploy with the original flavour. This should reuse the existing hosts.
@@ -155,8 +160,8 @@ public class DynamicProvisioningTest {
// Total of 6 nodes should now be in node-repo, 4 active hosts and 2 active nodes.
nodes = tester.nodeRepository().nodes().list();
assertEquals(6, nodes.size());
- assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
- assertEquals(2, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(active).size());
+ assertEquals(2, nodes.nodeType(NodeType.tenant).state(active).size());
assertEquals(0, nodes.retired().size());
// Deploy again with high resources.
@@ -164,8 +169,8 @@ public class DynamicProvisioningTest {
// Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes.
nodes = tester.nodeRepository().nodes().list();
assertEquals(8, nodes.size());
- assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
- assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(active).size());
}
@Test
@@ -176,27 +181,27 @@ public class DynamicProvisioningTest {
NodeResources resources = new NodeResources(2, 4, 10, 4);
- ApplicationId application1 = ProvisioningTester.applicationId();
+ ApplicationId application1 = applicationId();
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources, tester);
- ApplicationId application2 = ProvisioningTester.applicationId();
+ ApplicationId application2 = applicationId();
prepareAndActivate(application2, clusterSpec("mycluster"), 3, 1, resources, tester);
- ApplicationId application3 = ProvisioningTester.applicationId();
+ ApplicationId application3 = applicationId();
prepareAndActivate(application3, clusterSpec("mycluster"), 3, 1, resources, tester);
assertEquals(4, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
- ApplicationId application4 = ProvisioningTester.applicationId();
+ ApplicationId application4 = applicationId();
prepareAndActivate(application4, clusterSpec("mycluster"), 3, 1, resources, tester);
assertEquals(5, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
}
@Test
public void does_not_allocate_container_nodes_to_shared_hosts() {
- assertHostSharing(Environment.prod, ClusterSpec.Type.container, false);
- assertHostSharing(Environment.prod, ClusterSpec.Type.content, true);
- assertHostSharing(Environment.staging, ClusterSpec.Type.container, true);
- assertHostSharing(Environment.staging, ClusterSpec.Type.content, true);
+ assertHostSharing(Environment.prod, container, false);
+ assertHostSharing(Environment.prod, content, true);
+ assertHostSharing(Environment.staging, container, true);
+ assertHostSharing(Environment.staging, content, true);
}
private void assertHostSharing(Environment environment, ClusterSpec.Type clusterType, boolean expectShared) {
@@ -206,7 +211,7 @@ public class DynamicProvisioningTest {
tester.makeReadyHosts(2, new NodeResources(12, 12, 200, 12));
tester.flagSource().withJacksonFlag(PermanentFlags.SHARED_HOST.id(), new SharedHost(List.of(new HostResources(4.0, 16.0, 50.0, 0.3, "fast", "local", null, 10, "x86_64"))), SharedHost.class);
- ApplicationId application = ProvisioningTester.applicationId();
+ ApplicationId application = applicationId();
ClusterSpec cluster = ClusterSpec.request(clusterType, ClusterSpec.Id.from("default")).vespaVersion("6.42").build();
tester.prepare(application, cluster, 2, 1, new NodeResources(2., 10., 20, 1));
assertEquals(expectShared ? 2 : 4, tester.nodeRepository().nodes().list().nodeType(NodeType.host).size());
@@ -216,7 +221,7 @@ public class DynamicProvisioningTest {
public void retires_on_exclusivity_violation() {
var tester = tester(false);
tester.flagSource().withJacksonFlag(PermanentFlags.SHARED_HOST.id(), new SharedHost(List.of(new HostResources(1., 1., 1., 1., "fast", "local", null, 10, "x86_64"))), SharedHost.class);
- ApplicationId application1 = ProvisioningTester.applicationId();
+ ApplicationId application1 = applicationId();
NodeResources resources = new NodeResources(4, 80, 100, 1);
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources, tester);
NodeList initialNodes = tester.nodeRepository().nodes().list().owner(application1);
@@ -239,7 +244,7 @@ public class DynamicProvisioningTest {
NodeList exclusiveViolators = nodes.owner(application1).not().retired().first(2);
List<Node> parents = exclusiveViolators.mapToList(node -> nodes.parentOf(node).get());
tester.patchNode(parents.get(0), node -> node.withProvisionedForApplicationId(ApplicationId.defaultId()));
- tester.patchNode(parents.get(1), node -> node.withExclusiveToClusterType(ClusterSpec.Type.container));
+ tester.patchNode(parents.get(1), node -> node.withExclusiveToClusterType(container));
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallerExclusiveResources, tester);
assertEquals(10, tester.nodeRepository().nodes().list().owner(application1).size());
@@ -250,7 +255,7 @@ public class DynamicProvisioningTest {
public void node_indices_are_unique_even_when_a_node_is_left_in_reserved_state() {
var tester = tester(true);
NodeResources resources = new NodeResources(10, 10, 10, 10);
- ApplicationId app = ProvisioningTester.applicationId();
+ ApplicationId app = applicationId();
Function<Node, Node> retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now()));
Function<Integer, Node> getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream()
@@ -295,8 +300,8 @@ public class DynamicProvisioningTest {
tester.activateTenantHosts();
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+ ApplicationId app1 = applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
// Deploy using real memory amount (17)
try {
@@ -333,32 +338,60 @@ public class DynamicProvisioningTest {
.flagSource(flagSource)
.build();
- ApplicationId app = ProvisioningTester.applicationId("a1");
- ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("8").build();
+ ApplicationId app = applicationId("a1");
+ ClusterSpec cluster = ClusterSpec.request(content, new ClusterSpec.Id("cluster1")).vespaVersion("8").build();
Capacity capacity = Capacity.from(new ClusterResources(4, 2, new NodeResources(2, 8, 50, 0.1, DiskSpeed.any, StorageType.any, Architecture.any)));
- hostProvisioner.setHostFlavor("x86", ClusterSpec.Type.content);
+ hostProvisioner.setHostFlavor("x86", content);
tester.activate(app, cluster, capacity);
NodeList nodes = tester.nodeRepository().nodes().list();
- assertEquals(4, nodes.owner(app).state(Node.State.active).size());
- assertEquals(Set.of("x86"), nodes.parentsOf(nodes.owner(app).state(Node.State.active)).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ assertEquals(4, nodes.owner(app).state(active).size());
+ assertEquals(Set.of("x86"), nodes.parentsOf(nodes.owner(app).state(active)).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
- hostProvisioner.setHostFlavor("arm", ClusterSpec.Type.content);
+ hostProvisioner.setHostFlavor("arm", content);
flagSource.withStringFlag(PermanentFlags.HOST_FLAVOR.id(), "arm");
tester.activate(app, cluster, capacity);
nodes = tester.nodeRepository().nodes().list();
- assertEquals(4, nodes.owner(app).state(Node.State.active).retired().size());
- assertEquals(4, nodes.owner(app).state(Node.State.active).not().retired().size());
- assertEquals(Set.of("x86"), nodes.parentsOf(tester.getNodes(app, Node.State.active).retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
- assertEquals(Set.of("arm"), nodes.parentsOf(tester.getNodes(app, Node.State.active).not().retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ assertEquals(4, nodes.owner(app).state(active).retired().size());
+ assertEquals(4, nodes.owner(app).state(active).not().retired().size());
+ assertEquals(Set.of("x86"), nodes.parentsOf(tester.getNodes(app, active).retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ assertEquals(Set.of("arm"), nodes.parentsOf(tester.getNodes(app, active).not().retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
flagSource.removeFlag(PermanentFlags.HOST_FLAVOR.id()); // Resetting flag does not move the nodes back
tester.activate(app, cluster, capacity);
nodes = tester.nodeRepository().nodes().list();
- assertEquals(4, nodes.owner(app).state(Node.State.active).retired().size());
- assertEquals(4, nodes.owner(app).state(Node.State.active).not().retired().size());
- assertEquals(Set.of("x86"), nodes.parentsOf(tester.getNodes(app, Node.State.active).retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
- assertEquals(Set.of("arm"), nodes.parentsOf(tester.getNodes(app, Node.State.active).not().retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ assertEquals(4, nodes.owner(app).state(active).retired().size());
+ assertEquals(4, nodes.owner(app).state(active).not().retired().size());
+ assertEquals(Set.of("x86"), nodes.parentsOf(tester.getNodes(app, active).retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ assertEquals(Set.of("arm"), nodes.parentsOf(tester.getNodes(app, active).not().retired()).stream().map(n -> n.flavor().name()).collect(Collectors.toSet()));
+ }
+
+
+ @Test
+ public void reduces_container_node_count() {
+ List<Flavor> flavors = List.of(new Flavor("default", new NodeResources(2, 8, 50, 0.1, fast, local, Architecture.x86_64)));
+ MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors);
+ ProvisioningTester tester = new ProvisioningTester.Builder()
+ .dynamicProvisioning(true, false)
+ .flavors(flavors)
+ .hostProvisioner(hostProvisioner)
+ .build();
+
+ ApplicationId app = applicationId("a1");
+ ClusterSpec cluster = ClusterSpec.request(container, new ClusterSpec.Id("cluster1")).vespaVersion("8").build();
+ Capacity capacity = Capacity.from(resources(4, 1, 2, 8, 50));
+
+ tester.activate(app, cluster, capacity);
+ NodeList nodes = tester.nodeRepository().nodes().list();
+ assertEquals(4, nodes.owner(app).state(active).size());
+ assertEquals(0, nodes.owner(app).state(dirty).size());
+
+ // Go from 4 to 2 nodes, 2 nodes will go directly to dirty
+ capacity = Capacity.from(resources(2, 1, 2, 8, 50));
+ tester.activate(app, cluster, capacity);
+ nodes = tester.nodeRepository().nodes().list();
+ assertEquals(2, nodes.owner(app).state(active).size());
+ assertEquals(2, nodes.owner(app).state(dirty).size());
}
@Test
@@ -377,8 +410,8 @@ public class DynamicProvisioningTest {
tester.activateTenantHosts();
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+ ApplicationId app1 = applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
// Limits where each number is within flavor limits but which don't contain any flavor leads to an error
try {
@@ -452,8 +485,8 @@ public class DynamicProvisioningTest {
tester.activateTenantHosts();
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+ ApplicationId app1 = applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, local),
resources(6, 3, 3, 25, 400, fast, local)));
@@ -487,8 +520,8 @@ public class DynamicProvisioningTest {
tester.activateTenantHosts();
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.container, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+ ApplicationId app1 = applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(container, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, StorageType.any),
resources(6, 3, 3, 25, 400, fast, StorageType.any)));
@@ -515,7 +548,7 @@ public class DynamicProvisioningTest {
NodeResources resources = new NodeResources(4, 16, 125, 0.3,
NodeResources.DiskSpeed.any, NodeResources.StorageType.any,
NodeResources.Architecture.x86_64, new NodeResources.GpuResources(1, 16));
- tester.prepare(ProvisioningTester.applicationId(), ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("id1"))
+ tester.prepare(applicationId(), ClusterSpec.request(container, ClusterSpec.Id.from("id1"))
.vespaVersion("8.0").build(),
2, 1, resources);
}
@@ -555,7 +588,7 @@ public class DynamicProvisioningTest {
}
private static ClusterSpec clusterSpec(String clusterId, boolean exclusive) {
- return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").exclusive(exclusive).build();
+ return ClusterSpec.request(content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").exclusive(exclusive).build();
}
private static ClusterResources resources(int nodes, int groups, double vcpu, double memory, double disk) {
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 924e01a90f4..e1ee1bcff8c 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -528,7 +528,26 @@ jobs:
dnf install -y epel-release
dnf install -y vespa
+ mirror-copr-rpms-to-archive:
+ image: quay.io/centos/centos:stream8
+ annotations:
+ screwdriver.cd/cpu: LOW
+ screwdriver.cd/ram: LOW
+ screwdriver.cd/disk: HIGH
+ screwdriver.cd/timeout: 60
+ screwdriver.cd/buildPeriodically: H 6 * * *
+ secrets:
+ - CLOUDSMITH_API_CREDS
+ steps:
+ - install: |
+ dnf install -y dnf-plugins-core jq
+ - mirror-x86-64: |
+ screwdriver/publish-unpublished-rpms-to-archive.sh x86_64
+ - mirror-aarch64: |
+ screwdriver/publish-unpublished-rpms-to-archive.sh aarch64
+
mirror-copr-rpms-to-artifactory:
+ image: quay.io/centos/centos:stream8
annotations:
screwdriver.cd/cpu: LOW
screwdriver.cd/ram: LOW
@@ -538,6 +557,8 @@ jobs:
secrets:
- JFROG_API_TOKEN
steps:
+ - install: |
+ dnf install -y dnf-plugins-core
- mirror: |
screwdriver/publish-unpublished-rpms-to-jfrog-cloud.sh
diff --git a/screwdriver/publish-unpublished-rpms-to-archive.sh b/screwdriver/publish-unpublished-rpms-to-archive.sh
new file mode 100755
index 00000000000..1e4b74b6f78
--- /dev/null
+++ b/screwdriver/publish-unpublished-rpms-to-archive.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+set -euo pipefail
+set -x
+
+if (( $# < 1 )); then
+ echo "Usage: $0 <RPM architecture>"
+ exit 1
+fi
+
+RPMARCH=$1
+ALLOWED_ARCHS=("x86_64" "aarch64")
+
+if [[ ! ${ALLOWED_ARCHS[@]} =~ $RPMARCH ]]; then
+ echo "Architecture $RPMARCH not in allowed archs: ${ALLOWED_ARCHS[@]}"
+ exit 1
+fi
+
+readonly MYDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# Copr repo
+dnf config-manager --add-repo https://copr.fedorainfracloud.org/coprs/g/vespa/vespa/repo/epel-8/group_vespa-vespa-epel-8.repo
+sed -i "s,\$basearch,$RPMARCH,g" /etc/yum.repos.d/group_vespa-vespa-epel-8.repo
+
+# Cloudsmith repo
+rpm --import 'https://dl.cloudsmith.io/public/vespa/vespa/gpg.0F3DA3C70D35DA7B.key'
+curl -1sLf 'https://dl.cloudsmith.io/public/vespa/vespa/config.rpm.txt?distro=el&codename=8' > /tmp/vespa-vespa.repo
+dnf config-manager --add-repo '/tmp/vespa-vespa.repo'
+rm -f /tmp/vespa-vespa.repo
+
+readonly COPR_PACKAGES=$(mktemp)
+trap "rm -f $COPR_PACKAGES" EXIT
+readonly DLDIR=$(mktemp -d)
+trap "rm -rf $DLDIR" EXIT
+
+cd $DLDIR
+
+readonly DNF="dnf -y -q --forcearch $RPMARCH"
+
+$DNF list --disablerepo='*' --enablerepo=copr:copr.fedorainfracloud.org:group_vespa:vespa --showduplicates 'vespa*' | grep "Available Packages" -A 100000 | tail -n +2 | sed '/\.src\ */d' | sed "s/\.$RPMARCH\ */-/" | awk '{print $1}' | grep -v '.src$' > $COPR_PACKAGES
+
+echo "Packages on Copr:"
+cat $COPR_PACKAGES
+echo
+
+for pv in $(cat $COPR_PACKAGES); do
+ if ! $DNF list --disablerepo='*' --enablerepo=vespa-* $pv &> /dev/null; then
+ echo "$pv not found on in archive. Downloading..."
+ $DNF download --disablerepo='*' --enablerepo=copr:copr.fedorainfracloud.org:group_vespa:vespa $pv
+ echo "$pv downloaded."
+ fi
+done
+echo
+
+if ! ls *.rpm &> /dev/null; then
+ echo "All packages already in archive."
+ exit 0
+fi
+
+echo "RPMs missing in archive:"
+ls -lh *.rpm
+echo
+
+UPLOAD_FAILED=false
+if [[ -n $SCREWDRIVER ]] && [[ -z $SD_PULL_REQUEST ]]; then
+ for rpm in $(ls *.rpm); do
+ echo "Uploading $rpm ..."
+ if ! $MYDIR/upload-rpm-to-cloudsmith.sh $rpm ; then
+ echo "Could not upload $rpm"
+ UPLOAD_FAILED=true
+ else
+ echo "$rpm uploaded"
+ fi
+ done
+ echo
+fi
+
+if $UPLOAD_FAILED; then
+ echo "Some RPMs failed to upload"
+ exit 1
+fi
diff --git a/screwdriver/upload-rpm-to-cloudsmith.sh b/screwdriver/upload-rpm-to-cloudsmith.sh
new file mode 100755
index 00000000000..dd24185433b
--- /dev/null
+++ b/screwdriver/upload-rpm-to-cloudsmith.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+set -euo pipefail
+
+if (( $# < 1 )); then
+ echo "Usage: $0 <RPM file>"
+ exit 1
+fi
+
+if [[ -z $CLOUDSMITH_API_CREDS ]]; then
+ echo "Environment CLOUDSMITH_API_CREDS not set. Exiting."
+ exit 1
+fi
+
+RPM=$1
+OS_DISTRO=el
+RELEASEVER=8
+
+main() {
+
+ FID=$(curl -sLf \
+ --upload-file $RPM \
+ -u "$CLOUDSMITH_API_CREDS" \
+ -H "Content-Sha256: $(sha256sum $RPM | cut -f1 -d' ')" \
+ https://upload.cloudsmith.io/vespa/vespa/$RPM | jq -re '.identifier')
+
+ if [[ -n $FID ]]; then
+ curl -sLf -X POST -H "Content-Type: application/json" \
+ -u "$CLOUDSMITH_API_CREDS" \
+ -d "{\"package_file\": \"$FID\", \"distribution\": \"$OS_DISTRO/$RELEASEVER\"}" \
+ https://api-prd.cloudsmith.io/v1/packages/vespa/vespa/upload/rpm/
+ fi
+}
+
+main "$@"
diff --git a/searchcore/src/apps/proton/proton.cpp b/searchcore/src/apps/proton/proton.cpp
index 129091606b3..4c20c40b406 100644
--- a/searchcore/src/apps/proton/proton.cpp
+++ b/searchcore/src/apps/proton/proton.cpp
@@ -109,7 +109,8 @@ class ProtonServiceLayerProcess : public storage::ServiceLayerProcess {
public:
ProtonServiceLayerProcess(const config::ConfigUri & configUri,
proton::Proton & proton, FNET_Transport& transport,
- const vespalib::string& file_distributor_connection_spec);
+ const vespalib::string& file_distributor_connection_spec,
+ const vespalib::HwInfo& hw_info);
~ProtonServiceLayerProcess() override { shutdown(); }
void shutdown() override;
@@ -130,8 +131,9 @@ public:
ProtonServiceLayerProcess::ProtonServiceLayerProcess(const config::ConfigUri & configUri,
proton::Proton & proton, FNET_Transport& transport,
- const vespalib::string& file_distributor_connection_spec)
- : ServiceLayerProcess(configUri),
+ const vespalib::string& file_distributor_connection_spec,
+ const vespalib::HwInfo& hw_info)
+ : ServiceLayerProcess(configUri, hw_info),
_proton(proton),
_transport(transport),
_file_distributor_connection_spec(file_distributor_connection_spec),
@@ -259,18 +261,18 @@ App::startAndRun(FNET_Transport & transport, int argc, char **argv) {
proton.init(configSnapshot);
}
vespalib::string file_distributor_connection_spec = configSnapshot->getFiledistributorrpcConfig().connectionspec;
- configSnapshot.reset();
std::unique_ptr<ProtonServiceLayerProcess> spiProton;
if ( ! params.serviceidentity.empty()) {
spiProton = std::make_unique<ProtonServiceLayerProcess>(identityUri.createWithNewId(params.serviceidentity), proton, transport,
- file_distributor_connection_spec);
+ file_distributor_connection_spec, configSnapshot->getHwInfo());
spiProton->setupConfig(subscribeTimeout);
spiProton->createNode();
EV_STARTED("servicelayer");
} else {
proton.getMetricManager().init(identityUri);
}
+ configSnapshot.reset();
EV_STARTED("proton");
while (!(SIG::INT.check() || SIG::TERM.check() || (spiProton && spiProton->getNode().attemptedStopped()))) {
std::this_thread::sleep_for(1000ms);
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
index 9a0a2968c69..808747034ac 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
@@ -243,7 +243,7 @@ public:
MyServiceLayerProcess::MyServiceLayerProcess(const config::ConfigUri& configUri,
PersistenceProvider& provider,
std::unique_ptr<storage::IStorageChainBuilder> chain_builder)
- : ServiceLayerProcess(configUri),
+ : ServiceLayerProcess(configUri, vespalib::HwInfo()),
_provider(provider)
{
if (chain_builder) {
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoresaver.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoresaver.cpp
index 49d53aa38f0..ee5713c3c15 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoresaver.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoresaver.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/util/bufferwriter.h>
#include "document_meta_store_versions.h"
#include <vespa/searchlib/attribute/iattributesavetarget.h>
+#include <vespa/vespalib/btree/btreenode.hpp>
using vespalib::GenerationHandler;
using search::IAttributeSaveTarget;
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
index 9a734168260..150de1c9cec 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
@@ -21,6 +21,7 @@
#include <vespa/searchsummary/config/config-juniperrc.h>
#include <vespa/config/retriever/configsnapshot.hpp>
#include <vespa/vespalib/util/hw_info.h>
+#include <vespa/config.h>
#include <thread>
#include <cassert>
#include <cinttypes>
@@ -222,6 +223,18 @@ find_document_db_config_entry(const ProtonConfig::DocumentdbVector& document_dbs
return default_document_db_config_entry;
}
+[[nodiscard]] bool
+use_hw_memory_presized_target_num_docs([[maybe_unused]] ProtonConfig::Documentdb::Mode mode) noexcept {
+ // If sanitizers are enabled, mmap-allocations may be intercepted and allocated pages
+ // may be implicitly touched+committed. This tends to explode when testing locally, so
+ // fall back to configured initial num-docs if this is the case.
+#ifndef VESPA_USE_SANITIZER
+ return (mode != ProtonConfig::Documentdb::Mode::INDEX);
+#else
+ return false;
+#endif
+}
+
AllocConfig
build_alloc_config(const vespalib::HwInfo & hwInfo, const ProtonConfig& proton_config, const vespalib::string& doc_type_name)
{
@@ -230,7 +243,7 @@ build_alloc_config(const vespalib::HwInfo & hwInfo, const ProtonConfig& proton_c
auto& document_db_config_entry = find_document_db_config_entry(proton_config.documentdb, doc_type_name);
auto& alloc_config = document_db_config_entry.allocation;
- uint32_t target_numdocs = (document_db_config_entry.mode != ProtonConfig::Documentdb::Mode::INDEX)
+ uint32_t target_numdocs = use_hw_memory_presized_target_num_docs(document_db_config_entry.mode)
? (hwInfo.memory().sizeBytes() / (MIN_MEMORY_COST_PER_DOCUMENT * proton_config.distribution.searchablecopies))
: alloc_config.initialnumdocs;
auto& distribution_config = proton_config.distribution;
diff --git a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
index 2ac0e42cc79..62801e0b1ff 100644
--- a/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
+++ b/searchlib/src/tests/attribute/reference_attribute/reference_attribute_test.cpp
@@ -14,6 +14,7 @@
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/test/insertion_operators.h>
+#include <vespa/vespalib/btree/btreenode.hpp>
#include <cinttypes>
#include <filesystem>
diff --git a/searchlib/src/tests/query/querybuilder_test.cpp b/searchlib/src/tests/query/querybuilder_test.cpp
index 189e0f5f0b1..606d6a2474a 100644
--- a/searchlib/src/tests/query/querybuilder_test.cpp
+++ b/searchlib/src/tests/query/querybuilder_test.cpp
@@ -673,7 +673,7 @@ TEST("require that empty intermediate node can be added") {
}
TEST("control size of SimpleQueryStackDumpIterator") {
- EXPECT_EQUAL(128u, sizeof(SimpleQueryStackDumpIterator));
+ EXPECT_EQUAL(120u, sizeof(SimpleQueryStackDumpIterator));
}
TEST("test query parsing error") {
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp b/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp
index 89ab99754a5..18c6a9ba931 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/reference_mappings.cpp
@@ -4,6 +4,7 @@
#include "reference.h"
#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
+#include <vespa/vespalib/btree/btreenode.hpp>
#include <vespa/vespalib/util/rcuvector.hpp>
namespace search::attribute {
diff --git a/searchlib/src/vespa/searchlib/common/bitvector.cpp b/searchlib/src/vespa/searchlib/common/bitvector.cpp
index c359f433d12..b79703a8e5c 100644
--- a/searchlib/src/vespa/searchlib/common/bitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvector.cpp
@@ -49,7 +49,7 @@ BitVector::allocatePaddedAndAligned(Index start, Index end, Index capacity, cons
return alloc;
}
-BitVector::BitVector(void * buf, Index start, Index end) :
+BitVector::BitVector(void * buf, Index start, Index end) noexcept :
_words(static_cast<Word *>(buf) - wordNum(start)),
_startOffset(start),
_sz(end),
diff --git a/searchlib/src/vespa/searchlib/common/bitvector.h b/searchlib/src/vespa/searchlib/common/bitvector.h
index af1722e200c..943db5f06ba 100644
--- a/searchlib/src/vespa/searchlib/common/bitvector.h
+++ b/searchlib/src/vespa/searchlib/common/bitvector.h
@@ -40,18 +40,18 @@ public:
BitVector& operator = (const BitVector &) = delete;
virtual ~BitVector() = default;
bool operator == (const BitVector &right) const;
- const void * getStart() const { return _words; }
- void * getStart() { return _words; }
+ const void * getStart() const noexcept { return _words; }
+ void * getStart() noexcept { return _words; }
Range range() const noexcept { return {getStartIndex(), size()}; }
- Index size() const { return vespalib::atomic::load_ref_relaxed(_sz); }
- Index sizeBytes() const { return numBytes(getActiveSize()); }
+ Index size() const noexcept { return vespalib::atomic::load_ref_relaxed(_sz); }
+ Index sizeBytes() const noexcept { return numBytes(getActiveSize()); }
bool testBit(Index idx) const noexcept {
return ((load(_words[wordNum(idx)]) & mask(idx)) != 0);
}
Index getSizeAcquire() const {
return vespalib::atomic::load_ref_acquire(_sz);
}
- bool testBitAcquire(Index idx) const {
+ bool testBitAcquire(Index idx) const noexcept {
auto my_word = vespalib::atomic::load_ref_acquire(_words[wordNum(idx)]);
return (my_word & mask(idx)) != 0;
}
@@ -144,31 +144,31 @@ public:
}
vespalib::atomic::store_ref_release(_sz, sz);
}
- void set_bit_no_range_check(Index idx) {
+ void set_bit_no_range_check(Index idx) noexcept {
store_unchecked(_words[wordNum(idx)], _words[wordNum(idx)] | mask(idx));
}
- void clear_bit_no_range_check(Index idx) {
+ void clear_bit_no_range_check(Index idx) noexcept {
store_unchecked(_words[wordNum(idx)], _words[wordNum(idx)] & ~ mask(idx));
}
- void flip_bit_no_range_check(Index idx) {
+ void flip_bit_no_range_check(Index idx) noexcept {
store_unchecked(_words[wordNum(idx)], _words[wordNum(idx)] ^ mask(idx));
}
- void range_check(Index idx) const {
+ void range_check(Index idx) const noexcept {
#if VESPA_ENABLE_BITVECTOR_RANGE_CHECK
assert(!_enable_range_check || (idx >= _startOffset && idx < _sz));
#else
(void) idx;
#endif
}
- void setBit(Index idx) {
+ void setBit(Index idx) noexcept {
range_check(idx);
set_bit_no_range_check(idx);
}
- void clearBit(Index idx) {
+ void clearBit(Index idx) noexcept {
range_check(idx);
clear_bit_no_range_check(idx);
}
- void flipBit(Index idx) {
+ void flipBit(Index idx) noexcept {
range_check(idx);
flip_bit_no_range_check(idx);
}
@@ -283,20 +283,20 @@ public:
static void consider_enable_range_check();
protected:
using Alloc = vespalib::alloc::Alloc;
- VESPA_DLL_LOCAL BitVector(void * buf, Index start, Index end);
- BitVector(void * buf, Index sz) : BitVector(buf, 0, sz) { }
- BitVector() : BitVector(nullptr, 0) { }
+ VESPA_DLL_LOCAL BitVector(void * buf, Index start, Index end) noexcept;
+ BitVector(void * buf, Index sz) noexcept : BitVector(buf, 0, sz) { }
+ BitVector() noexcept : BitVector(nullptr, 0) { }
void init(void * buf, Index start, Index end);
- void updateCount() const { _numTrueBits.store(count(), std::memory_order_relaxed); }
- void setTrueBits(Index numTrueBits) { _numTrueBits.store(numTrueBits, std::memory_order_relaxed); }
+ void updateCount() const noexcept { _numTrueBits.store(count(), std::memory_order_relaxed); }
+ void setTrueBits(Index numTrueBits) noexcept { _numTrueBits.store(numTrueBits, std::memory_order_relaxed); }
VESPA_DLL_LOCAL void clearIntervalNoInvalidation(Range range);
- bool isValidCount() const { return isValidCount(_numTrueBits.load(std::memory_order_relaxed)); }
- static bool isValidCount(Index v) { return v != invalidCount(); }
- static Index numWords(Index bits) { return wordNum(bits + 1 + (WordLen - 1)); }
- static Index numBytes(Index bits) { return numWords(bits) * sizeof(Word); }
- size_t numWords() const { return numWords(size()); }
- static size_t getAlignment() { return 0x40u; }
- static size_t numActiveBytes(Index start, Index end) { return numActiveWords(start, end) * sizeof(Word); }
+ bool isValidCount() const noexcept { return isValidCount(_numTrueBits.load(std::memory_order_relaxed)); }
+ static bool isValidCount(Index v) noexcept { return v != invalidCount(); }
+ static Index numWords(Index bits) noexcept { return wordNum(bits + 1 + (WordLen - 1)); }
+ static Index numBytes(Index bits) noexcept { return numWords(bits) * sizeof(Word); }
+ size_t numWords() const noexcept { return numWords(size()); }
+ static constexpr size_t getAlignment() noexcept { return 0x40u; }
+ static size_t numActiveBytes(Index start, Index end) noexcept { return numActiveWords(start, end) * sizeof(Word); }
static Alloc allocatePaddedAndAligned(Index sz) {
return allocatePaddedAndAligned(0, sz);
}
@@ -308,29 +308,29 @@ protected:
private:
static Word load(const Word &word) noexcept { return vespalib::atomic::load_ref_relaxed(word); }
VESPA_DLL_LOCAL void store(Word &word, Word value);
- static void store_unchecked(Word &word, Word value) {
+ static void store_unchecked(Word &word, Word value) noexcept {
return vespalib::atomic::store_ref_relaxed(word, value);
}
friend PartialBitVector;
- const Word * getWordIndex(Index index) const { return static_cast<const Word *>(getStart()) + wordNum(index); }
- Word * getWordIndex(Index index) { return static_cast<Word *>(getStart()) + wordNum(index); }
- const Word * getActiveStart() const { return getWordIndex(getStartIndex()); }
- Word * getActiveStart() { return getWordIndex(getStartIndex()); }
- Index getStartWordNum() const { return wordNum(getStartIndex()); }
- Index getActiveSize() const { return size() - getStartIndex(); }
- size_t getActiveBytes() const { return numActiveBytes(getStartIndex(), size()); }
- size_t numActiveWords() const { return numActiveWords(getStartIndex(), size()); }
- static size_t numActiveWords(Index start, Index end) {
+ const Word * getWordIndex(Index index) const noexcept { return static_cast<const Word *>(getStart()) + wordNum(index); }
+ Word * getWordIndex(Index index) noexcept { return static_cast<Word *>(getStart()) + wordNum(index); }
+ const Word * getActiveStart() const noexcept { return getWordIndex(getStartIndex()); }
+ Word * getActiveStart() noexcept { return getWordIndex(getStartIndex()); }
+ Index getStartWordNum() const noexcept { return wordNum(getStartIndex()); }
+ Index getActiveSize() const noexcept { return size() - getStartIndex(); }
+ size_t getActiveBytes() const noexcept { return numActiveBytes(getStartIndex(), size()); }
+ size_t numActiveWords() const noexcept { return numActiveWords(getStartIndex(), size()); }
+ static size_t numActiveWords(Index start, Index end) noexcept {
return (end >= start) ? (numWords(end) - wordNum(start)) : 0;
}
- static Index invalidCount() { return std::numeric_limits<Index>::max(); }
- void setGuardBit() { set_bit_no_range_check(size()); }
- void incNumBits() {
+ static constexpr Index invalidCount() noexcept { return std::numeric_limits<Index>::max(); }
+ void setGuardBit() noexcept { set_bit_no_range_check(size()); }
+ void incNumBits() noexcept {
if ( isValidCount() ) {
_numTrueBits.store(_numTrueBits.load(std::memory_order_relaxed) + 1, std::memory_order_relaxed);
}
}
- void decNumBits() {
+ void decNumBits() noexcept {
if ( isValidCount() ) {
_numTrueBits.store(_numTrueBits.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
diff --git a/searchlib/src/vespa/searchlib/parsequery/parse.h b/searchlib/src/vespa/searchlib/parsequery/parse.h
index bb0b7b88caa..7029a3d4e12 100644
--- a/searchlib/src/vespa/searchlib/parsequery/parse.h
+++ b/searchlib/src/vespa/searchlib/parsequery/parse.h
@@ -25,7 +25,7 @@ class ParseItem
public:
/** The type of the item is from this set of values.
It is important that these defines match those in container-search/src/main/java/com/yahoo/prelude/query/Item.java */
- enum ItemType {
+ enum ItemType : uint8_t {
ITEM_OR = 0,
ITEM_AND = 1,
ITEM_NOT = 2,
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 0773958e535..6ed05e14519 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -45,6 +45,7 @@ struct MergeOperationTest : Test, DistributorStripeTestUtil {
void assert_simple_merge_bucket_command();
void assert_simple_delete_bucket_command();
MergeBucketMetricSet& get_merge_metrics();
+ [[nodiscard]] uint32_t merge_footprint(const std::string& db_state);
};
std::shared_ptr<MergeOperation>
@@ -86,7 +87,7 @@ MergeOperationTest::assert_simple_merge_bucket_command()
{
ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
"cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
- "reasons to start: ) => 0",
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 0",
_sender.getLastCommand(true));
}
@@ -295,7 +296,7 @@ TEST_F(MergeOperationTest, do_not_remove_copies_with_pending_messages) {
std::string merge("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
"cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
- "reasons to start: ) => 0");
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 0");
ASSERT_EQ(merge, _sender.getLastCommand(true));
@@ -356,8 +357,8 @@ TEST_F(MergeOperationTest, allow_deleting_active_source_only_replica) {
std::string merge(
"MergeBucketCommand(BucketId(0x4000000000000001), to time "
- "10000000, cluster state version: 0, nodes: [0, 2, 1 "
- "(source only)], chain: [], reasons to start: ) => 0");
+ "10000000, cluster state version: 0, nodes: [0, 2, 1 (source only)], chain: [], "
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 0");
ASSERT_EQ(merge, _sender.getLastCommand(true));
sendReply(op);
@@ -580,14 +581,14 @@ TEST_F(MergeOperationTest, unordered_merges_only_sent_iff_config_enabled_and_all
setup_simple_merge_op({1, 2, 3}); // Note: these will be re-ordered in ideal state order internally
ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
"cluster state version: 0, nodes: [2, 1, 3], chain: [], "
- "reasons to start: ) => 1",
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 1",
_sender.getLastCommand(true));
// All involved nodes support unordered merging; merges should be unordered (sent to ideal node 2)
setup_simple_merge_op({1, 2});
ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000001, "
"cluster state version: 0, nodes: [2, 1], chain: [] (unordered forwarding), "
- "reasons to start: ) => 2",
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 2",
_sender.getLastCommand(true));
_sender.clear();
@@ -600,7 +601,7 @@ TEST_F(MergeOperationTest, unordered_merges_only_sent_iff_config_enabled_and_all
setup_simple_merge_op({2, 1});
ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000002, "
"cluster state version: 0, nodes: [2, 1], chain: [], "
- "reasons to start: ) => 1",
+ "estimated memory footprint: 2 bytes, reasons to start: ) => 1",
_sender.getLastCommand(true));
}
@@ -644,4 +645,60 @@ TEST_F(MergeOperationTest, no_delete_bucket_ops_sent_if_node_subset_cancelled) {
EXPECT_FALSE(op->ok());
}
+uint32_t MergeOperationTest::merge_footprint(const std::string& db_state) {
+ getClock().setAbsoluteTimeInSeconds(10);
+ addNodesToBucketDB(document::BucketId(16, 1), db_state);
+ enable_cluster_state("distributor:1 storage:3");
+ MergeOperation op(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), toVector<uint16_t>(0, 1, 2)));
+ op.setIdealStateManager(&getIdealStateManager());
+
+ _sender.clear();
+ op.start(_sender);
+ assert(!_sender.commands().empty());
+ auto cmd_as_merge = std::dynamic_pointer_cast<api::MergeBucketCommand>(_sender.commands()[0]);
+ assert(cmd_as_merge);
+ return cmd_as_merge->estimated_memory_footprint();
+}
+
+TEST_F(MergeOperationTest, memory_footprint_is_computed_from_replica_state) {
+ // Reminder of syntax: "index=checksum/doc count/doc size"
+ // {0,2} in sync, {1} out of sync; footprint is sum across "sync-ness groups"
+ EXPECT_EQ(merge_footprint("0=10/100/3000,1=20/200/7000,2=10/100/3000"), 10'000);
+ EXPECT_EQ(merge_footprint("0=10/100/7000,1=20/200/3000,2=10/100/7000"), 10'000);
+ // All replicas mutually out of sync
+ EXPECT_EQ(merge_footprint("0=10/100/3000,1=20/200/7000,2=30/100/5000"), 15'000);
+ // One replica empty
+ EXPECT_EQ(merge_footprint("0=20/200/4000,1=20/200/4000,2=1/0/0"), 4'000);
+}
+
+TEST_F(MergeOperationTest, memory_footprint_is_bounded_by_max_expected_merge_chunk_size) {
+ auto cfg = make_config();
+ cfg->setSplitSize(20'000); // proxy for max merge chunk size
+ configure_stripe(cfg);
+
+ EXPECT_EQ(merge_footprint("0=10/100/5000,1=20/200/5000,2=30/100/9999"), 19'999);
+ EXPECT_EQ(merge_footprint("0=10/100/5000,1=20/200/5000,2=30/100/10000"), 20'000);
+ EXPECT_EQ(merge_footprint("0=10/100/5000,1=20/200/5000,2=30/100/10001"), 20'000);
+ EXPECT_EQ(merge_footprint("0=10/100/6000,1=20/200/7000,2=30/100/20000"), 20'000);
+}
+
+TEST_F(MergeOperationTest, memory_footprint_with_single_doc_replica_can_be_greater_than_max_expected_bucket_size) {
+ auto cfg = make_config();
+ cfg->setSplitSize(20'000);
+ configure_stripe(cfg);
+
+ EXPECT_EQ(merge_footprint("0=10/100/5000,1=20/200/5000,2=30/1/50000"), 50'000);
+ EXPECT_EQ(merge_footprint("0=10/100/5000,1=20/1/60000,2=30/1/50000"), 60'000);
+}
+
+TEST_F(MergeOperationTest, memory_footprint_estimation_saturates_instead_of_overflowing_u32_limits) {
+ auto cfg = make_config();
+ cfg->setSplitSize(1'234'567);
+ configure_stripe(cfg);
+ // Here we massively _undercount_ our estimate, but this is a wildly unrealistic replica state
+ // just for testing correctness of arithmetic ops.
+ // UINT32_MAX/3 * 3 + 1 will overflow to 0 if unchecked. Must be saturated instead.
+ EXPECT_EQ(merge_footprint("0=10/10/1431655765,1=20/10/1431655765,2=30/10/1431655766"), 1'234'567);
+}
+
} // storage::distributor
diff --git a/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp b/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
index addc80e4150..698d8dee573 100644
--- a/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
+++ b/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
@@ -79,7 +79,7 @@ struct StorageProtocolTest : TestWithParam<vespalib::Version> {
_protocol(_docMan.getTypeRepoSP())
{
}
- ~StorageProtocolTest();
+ ~StorageProtocolTest() override;
void set_dummy_bucket_info_reply_fields(BucketInfoReply& reply) {
reply.setBucketInfo(_dummy_bucket_info);
@@ -456,18 +456,12 @@ TEST_P(StorageProtocolTest, delete_bucket) {
TEST_P(StorageProtocolTest, merge_bucket) {
using Node = api::MergeBucketCommand::Node;
- std::vector<Node> nodes;
- nodes.push_back(Node(4, false));
- nodes.push_back(Node(13, true));
- nodes.push_back(Node(26, true));
-
- std::vector<uint16_t> chain;
- // Not a valid chain wrt. the nodes, but just want to have unique values
- chain.push_back(7);
- chain.push_back(14);
+ std::vector<Node> nodes = {{4, false}, {13, true}, {26, true}};
+ std::vector<uint16_t> chain = {7, 14}; // Not a valid chain wrt. the nodes, but just want to have unique values
auto cmd = std::make_shared<MergeBucketCommand>(_bucket, nodes, Timestamp(1234), 567, chain);
cmd->set_use_unordered_forwarding(true);
+ cmd->set_estimated_memory_footprint(123'456'789);
auto cmd2 = copyCommand(cmd);
EXPECT_EQ(_bucket, cmd2->getBucket());
EXPECT_EQ(nodes, cmd2->getNodes());
@@ -475,6 +469,7 @@ TEST_P(StorageProtocolTest, merge_bucket) {
EXPECT_EQ(uint32_t(567), cmd2->getClusterStateVersion());
EXPECT_EQ(chain, cmd2->getChain());
EXPECT_EQ(cmd2->use_unordered_forwarding(), cmd->use_unordered_forwarding());
+ EXPECT_EQ(cmd2->estimated_memory_footprint(), 123'456'789);
auto reply = std::make_shared<MergeBucketReply>(*cmd);
auto reply2 = copyReply(reply);
@@ -485,6 +480,17 @@ TEST_P(StorageProtocolTest, merge_bucket) {
EXPECT_EQ(chain, reply2->getChain());
}
+TEST_P(StorageProtocolTest, merge_bucket_estimated_memory_footprint_is_zero_by_default) {
+ using Node = api::MergeBucketCommand::Node;
+ std::vector<Node> nodes = {{4, false}, {13, true}, {26, true}};
+ std::vector<uint16_t> chain = {7, 14};
+
+ auto cmd = std::make_shared<MergeBucketCommand>(_bucket, nodes, Timestamp(1234), 567, chain);
+ cmd->set_use_unordered_forwarding(true);
+ auto cmd2 = copyCommand(cmd);
+ EXPECT_EQ(cmd2->estimated_memory_footprint(), 0);
+}
+
TEST_P(StorageProtocolTest, split_bucket) {
auto cmd = std::make_shared<SplitBucketCommand>(_bucket);
EXPECT_EQ(0u, cmd->getMinSplitBits());
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index 7a7f2551c2d..6f80ffe0727 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -1,17 +1,18 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/testhelper.h>
#include <tests/common/dummystoragelink.h>
+#include <tests/common/testhelper.h>
#include <tests/common/teststorageapp.h>
#include <vespa/config/helper/configgetter.hpp>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/messagebus/dynamicthrottlepolicy.h>
-#include <vespa/storage/storageserver/mergethrottler.h>
#include <vespa/storage/persistence/messages.h>
+#include <vespa/storage/storageserver/mergethrottler.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/size_literals.h>
#include <unordered_set>
#include <memory>
#include <iterator>
@@ -30,22 +31,33 @@ namespace storage {
namespace {
using StorServerConfig = vespa::config::content::core::StorServerConfig;
+using StorServerConfigBuilder = vespa::config::content::core::StorServerConfigBuilder;
vespalib::string _storage("storage");
+std::unique_ptr<StorServerConfig> default_server_config() {
+ vdstestlib::DirConfig dir_config(getStandardConfig(true));
+ auto cfg_uri = ::config::ConfigUri(dir_config.getConfigId());
+ return config_from<StorServerConfig>(cfg_uri);
+}
+
struct MergeBuilder {
- document::BucketId _bucket;
- api::Timestamp _maxTimestamp;
- std::vector<uint16_t> _nodes;
- std::vector<uint16_t> _chain;
+ document::BucketId _bucket;
+ api::Timestamp _maxTimestamp;
+ std::vector<uint16_t> _nodes;
+ std::vector<uint16_t> _chain;
std::unordered_set<uint16_t> _source_only;
- uint64_t _clusterStateVersion;
+ uint64_t _clusterStateVersion;
+ uint32_t _memory_usage;
+ bool _unordered;
- MergeBuilder(const document::BucketId& bucket)
+ explicit MergeBuilder(const document::BucketId& bucket)
: _bucket(bucket),
_maxTimestamp(1234),
_chain(),
- _clusterStateVersion(1)
+ _clusterStateVersion(1),
+ _memory_usage(0),
+ _unordered(false)
{
nodes(0, 1, 2);
}
@@ -100,6 +112,14 @@ struct MergeBuilder {
_source_only.insert(node);
return *this;
}
+ MergeBuilder& memory_usage(uint32_t usage_bytes) {
+ _memory_usage = usage_bytes;
+ return *this;
+ }
+ MergeBuilder& unordered(bool is_unordered) {
+ _unordered = is_unordered;
+ return *this;
+ }
api::MergeBucketCommand::SP create() const {
std::vector<api::MergeBucketCommand::Node> n;
@@ -112,6 +132,8 @@ struct MergeBuilder {
makeDocumentBucket(_bucket), n, _maxTimestamp,
_clusterStateVersion, _chain);
cmd->setAddress(StorageMessageAddress::create(&_storage, lib::NodeType::STORAGE, _nodes[0]));
+ cmd->set_estimated_memory_footprint(_memory_usage);
+ cmd->set_use_unordered_forwarding(_unordered);
return cmd;
}
};
@@ -137,20 +159,26 @@ struct MergeThrottlerTest : Test {
std::vector<DummyStorageLink*> _bottomLinks;
MergeThrottlerTest();
- ~MergeThrottlerTest();
+ ~MergeThrottlerTest() override;
void SetUp() override;
void TearDown() override;
+ MergeThrottler& throttler(size_t idx) noexcept {
+ assert(idx < _throttlers.size());
+ return *_throttlers[idx];
+ }
+
api::MergeBucketCommand::SP sendMerge(const MergeBuilder&);
- void sendAndExpectReply(
+ void send_and_expect_reply(
const std::shared_ptr<api::StorageMessage>& msg,
const api::MessageType& expectedReplyType,
api::ReturnCode::Result expectedResultCode);
+ std::shared_ptr<api::StorageMessage> send_and_expect_forwarding(const std::shared_ptr<api::StorageMessage>& msg);
+
void fill_throttler_queue_with_n_commands(uint16_t throttler_index, size_t queued_count);
- void fill_up_throttler_active_window_and_queue(uint16_t node_idx);
void receive_chained_merge_with_full_queue(bool disable_queue_limits, bool unordered_fwd = false);
std::shared_ptr<api::MergeBucketCommand> peek_throttler_queue_top(size_t throttler_idx) {
@@ -170,20 +198,17 @@ MergeThrottlerTest::~MergeThrottlerTest() = default;
void
MergeThrottlerTest::SetUp()
{
- vdstestlib::DirConfig dir_config(getStandardConfig(true));
- auto cfg_uri = ::config::ConfigUri(dir_config.getConfigId());
- auto config = config_from<StorServerConfig>(cfg_uri);
-
+ auto config = default_server_config();
for (int i = 0; i < _storageNodeCount; ++i) {
auto server = std::make_unique<TestServiceLayerApp>(NodeIndex(i));
server->setClusterState(lib::ClusterState("distributor:100 storage:100 version:1"));
std::unique_ptr<DummyStorageLink> top;
top = std::make_unique<DummyStorageLink>();
- MergeThrottler* throttler = new MergeThrottler(*config, server->getComponentRegister());
+ auto* throttler = new MergeThrottler(*config, server->getComponentRegister(), vespalib::HwInfo());
// MergeThrottler will be sandwiched in between two dummy links
top->push_back(std::unique_ptr<StorageLink>(throttler));
- DummyStorageLink* bottom = new DummyStorageLink;
+ auto* bottom = new DummyStorageLink;
throttler->push_back(std::unique_ptr<StorageLink>(bottom));
_servers.push_back(std::shared_ptr<TestServiceLayerApp>(server.release()));
@@ -291,6 +316,7 @@ TEST_F(MergeThrottlerTest, chain) {
cmd->setAddress(StorageMessageAddress::create(&_storage, lib::NodeType::STORAGE, 0));
const uint16_t distributorIndex = 123;
cmd->setSourceIndex(distributorIndex); // Dummy distributor index that must be forwarded
+ cmd->set_estimated_memory_footprint(456'789);
StorageMessage::SP fwd = cmd;
StorageMessage::SP fwdToExec;
@@ -322,10 +348,12 @@ TEST_F(MergeThrottlerTest, chain) {
}
EXPECT_TRUE(checkChain(fwd, chain.begin(), chain.end()));
}
- // Ensure priority, cluster state version and timeout is correctly forwarded
+ // Ensure operation properties are forwarded as expected
EXPECT_EQ(7, static_cast<int>(fwd->getPriority()));
- EXPECT_EQ(123, dynamic_cast<const MergeBucketCommand&>(*fwd).getClusterStateVersion());
- EXPECT_EQ(54321ms, dynamic_cast<const StorageCommand&>(*fwd).getTimeout());
+ auto& as_merge = dynamic_cast<const MergeBucketCommand&>(*fwd);
+ EXPECT_EQ(as_merge.getClusterStateVersion(), 123);
+ EXPECT_EQ(as_merge.getTimeout(), 54321ms);
+ EXPECT_EQ(as_merge.estimated_memory_footprint(), 456'789);
}
_topLinks[lastNodeIdx]->sendDown(fwd);
@@ -1209,7 +1237,7 @@ void
MergeThrottlerTest::receive_chained_merge_with_full_queue(bool disable_queue_limits, bool unordered_fwd)
{
// Note: uses node with index 1 to not be the first node in chain
- _throttlers[1]->set_disable_queue_limits_for_chained_merges(disable_queue_limits);
+ _throttlers[1]->set_disable_queue_limits_for_chained_merges_locking(disable_queue_limits);
size_t max_pending = throttler_max_merges_pending(1);
size_t max_enqueued = _throttlers[1]->getMaxQueueSize();
for (size_t i = 0; i < max_pending + max_enqueued; ++i) {
@@ -1350,7 +1378,7 @@ TEST_F(MergeThrottlerTest, broken_cycle) {
}
void
-MergeThrottlerTest::sendAndExpectReply(
+MergeThrottlerTest::send_and_expect_reply(
const std::shared_ptr<api::StorageMessage>& msg,
const api::MessageType& expectedReplyType,
api::ReturnCode::Result expectedResultCode)
@@ -1362,13 +1390,22 @@ MergeThrottlerTest::sendAndExpectReply(
ASSERT_EQ(expectedResultCode, storageReply.getResult().getResult());
}
+std::shared_ptr<api::StorageMessage>
+MergeThrottlerTest::send_and_expect_forwarding(const std::shared_ptr<api::StorageMessage>& msg)
+{
+ _topLinks[0]->sendDown(msg);
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ return _topLinks[0]->getAndRemoveMessage(MessageType::MERGEBUCKET);
+}
+
TEST_F(MergeThrottlerTest, get_bucket_diff_command_not_in_active_set_is_rejected) {
document::BucketId bucket(16, 1234);
std::vector<api::GetBucketDiffCommand::Node> nodes;
auto getDiffCmd = std::make_shared<api::GetBucketDiffCommand>(
makeDocumentBucket(bucket), nodes, api::Timestamp(1234));
- ASSERT_NO_FATAL_FAILURE(sendAndExpectReply(getDiffCmd,
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ getDiffCmd,
api::MessageType::GETBUCKETDIFF_REPLY,
api::ReturnCode::ABORTED));
ASSERT_EQ(0, _bottomLinks[0]->getNumCommands());
@@ -1379,7 +1416,8 @@ TEST_F(MergeThrottlerTest, apply_bucket_diff_command_not_in_active_set_is_reject
std::vector<api::GetBucketDiffCommand::Node> nodes;
auto applyDiffCmd = std::make_shared<api::ApplyBucketDiffCommand>(makeDocumentBucket(bucket), nodes);
- ASSERT_NO_FATAL_FAILURE(sendAndExpectReply(applyDiffCmd,
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ applyDiffCmd,
api::MessageType::APPLYBUCKETDIFF_REPLY,
api::ReturnCode::ABORTED));
ASSERT_EQ(0, _bottomLinks[0]->getNumCommands());
@@ -1411,7 +1449,8 @@ TEST_F(MergeThrottlerTest, new_cluster_state_aborts_all_outdated_active_merges)
auto getDiffCmd = std::make_shared<api::GetBucketDiffCommand>(
makeDocumentBucket(bucket), std::vector<api::GetBucketDiffCommand::Node>(), api::Timestamp(123));
- ASSERT_NO_FATAL_FAILURE(sendAndExpectReply(getDiffCmd,
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ getDiffCmd,
api::MessageType::GETBUCKETDIFF_REPLY,
api::ReturnCode::ABORTED));
}
@@ -1428,7 +1467,8 @@ TEST_F(MergeThrottlerTest, backpressure_busy_bounces_merges_for_configured_durat
EXPECT_EQ(0, _throttlers[0]->getMetrics().bounced_due_to_back_pressure.getValue());
EXPECT_EQ(uint64_t(0), _throttlers[0]->getMetrics().local.failures.busy.getValue());
- ASSERT_NO_FATAL_FAILURE(sendAndExpectReply(MergeBuilder(bucket).create(),
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ MergeBuilder(bucket).create(),
api::MessageType::MERGEBUCKET_REPLY,
api::ReturnCode::BUSY));
@@ -1480,6 +1520,159 @@ TEST_F(MergeThrottlerTest, backpressure_evicts_all_queued_merges) {
EXPECT_EQ(ReturnCode::BUSY, dynamic_cast<const MergeBucketReply&>(*reply).getResult().getResult());
}
+TEST_F(MergeThrottlerTest, exceeding_memory_soft_limit_rejects_merges_even_with_available_active_window_slots) {
+ ASSERT_GT(throttler_max_merges_pending(0), 1); // Sanity check for the test itself
+
+ throttler(0).set_max_merge_memory_usage_bytes_locking(10_Mi);
+
+ ASSERT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 0);
+
+ std::shared_ptr<api::StorageMessage> fwd_cmd;
+ ASSERT_NO_FATAL_FAILURE(fwd_cmd = send_and_expect_forwarding(
+ MergeBuilder(document::BucketId(16, 0)).nodes(0, 1, 2).memory_usage(5_Mi).create()));
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 5_Mi);
+
+ // Accepting this merge would exceed memory limits. It is sent as part of a forwarded unordered
+ // merge and can therefore NOT be enqueued; it must be bounced immediately.
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ MergeBuilder(document::BucketId(16, 1))
+ .nodes(2, 1, 0).chain(2, 1).unordered(true)
+ .memory_usage(8_Mi).create(),
+ MessageType::MERGEBUCKET_REPLY, ReturnCode::BUSY));
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 5_Mi); // Unchanged
+
+ // Fail the forwarded merge. This shall immediately free up the memory usage, allowing a new merge in.
+ auto fwd_reply = dynamic_cast<api::MergeBucketCommand&>(*fwd_cmd).makeReply();
+ fwd_reply->setResult(ReturnCode(ReturnCode::ABORTED, "node stumbled into a ravine"));
+
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_reply(
+ std::shared_ptr<api::StorageReply>(std::move(fwd_reply)),
+ MessageType::MERGEBUCKET_REPLY, ReturnCode::ABORTED)); // Unwind reply for failed merge
+
+ ASSERT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 0);
+
+ // New merge is accepted and forwarded
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_forwarding(
+ MergeBuilder(document::BucketId(16, 2)).nodes(0, 1, 2).unordered(true).memory_usage(9_Mi).create()));
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 9_Mi);
+}
+
+TEST_F(MergeThrottlerTest, exceeding_memory_soft_limit_can_enqueue_unordered_merge_sent_directly_from_distributor) {
+ throttler(0).set_max_merge_memory_usage_bytes_locking(10_Mi);
+
+ ASSERT_NO_FATAL_FAILURE(send_and_expect_forwarding(
+ MergeBuilder(document::BucketId(16, 0)).nodes(0, 1, 2).memory_usage(5_Mi).create()));
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 5_Mi);
+
+ // Accepting this merge would exceed memory limits. It is sent directly from a distributor and
+ // can therefore be enqueued.
+ _topLinks[0]->sendDown(MergeBuilder(document::BucketId(16, 1)).nodes(0, 1, 2).unordered(true).memory_usage(8_Mi).create());
+ waitUntilMergeQueueIs(throttler(0), 1, _messageWaitTime); // Should end up in queue
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 5_Mi); // Unchanged
+}
+
+TEST_F(MergeThrottlerTest, at_least_one_merge_is_accepted_even_if_exceeding_memory_soft_limit) {
+ throttler(0).set_max_merge_memory_usage_bytes_locking(5_Mi);
+
+ _topLinks[0]->sendDown(MergeBuilder(document::BucketId(16, 0)).nodes(0, 1, 2).unordered(true).memory_usage(100_Mi).create());
+ _topLinks[0]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime); // Forwarded, _not_ bounced
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 100_Mi);
+}
+
+TEST_F(MergeThrottlerTest, queued_merges_are_not_counted_towards_memory_usage) {
+ // Our utility function for filling queues uses bucket IDs {16, x} where x is increasing
+ // from 0 to the max pending. Ensure we don't accidentally overlap with the bucket ID we
+ // send for below in the test code.
+ ASSERT_LT(throttler_max_merges_pending(0), 1000);
+
+ throttler(0).set_max_merge_memory_usage_bytes_locking(50_Mi);
+ // Fill up active window on node 0. Note: these merges do not have any associated memory cost.
+ fill_throttler_queue_with_n_commands(0, 0);
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 0_Mi);
+
+ _topLinks[0]->sendDown(MergeBuilder(document::BucketId(16, 1000)).nodes(0, 1, 2).unordered(true).memory_usage(10_Mi).create());
+ waitUntilMergeQueueIs(throttler(0), 1, _messageWaitTime); // Should end up in queue
+
+ EXPECT_EQ(throttler(0).getMetrics().estimated_merge_memory_usage.getLast(), 0_Mi);
+}
+
+namespace {
+
+vespalib::HwInfo make_mem_info(uint64_t mem_size) {
+ return {{0, false, false}, {mem_size}, {1}};
+}
+
+}
+
+TEST_F(MergeThrottlerTest, memory_limit_can_be_auto_deduced_from_hw_info) {
+ StorServerConfigBuilder cfg(*default_server_config());
+ auto& cfg_limit = cfg.mergeThrottlingMemoryLimit;
+ auto& mt = throttler(0);
+
+ // Enable auto-deduction of limits
+ cfg_limit.maxUsageBytes = 0;
+
+ cfg_limit.autoLowerBoundBytes = 100'000;
+ cfg_limit.autoUpperBoundBytes = 750'000;
+ cfg_limit.autoPhysMemScaleFactor = 0.5;
+
+ mt.set_hw_info_locking(make_mem_info(1'000'000));
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 500'000);
+ EXPECT_EQ(throttler(0).getMetrics().merge_memory_limit.getLast(), 500'000);
+
+ cfg_limit.autoPhysMemScaleFactor = 0.75;
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 750'000);
+ EXPECT_EQ(throttler(0).getMetrics().merge_memory_limit.getLast(), 750'000);
+
+ cfg_limit.autoPhysMemScaleFactor = 0.25;
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 250'000);
+
+ // Min-capped
+ cfg_limit.autoPhysMemScaleFactor = 0.05;
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 100'000);
+
+ // Max-capped
+ cfg_limit.autoPhysMemScaleFactor = 0.90;
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 750'000);
+}
+
+TEST_F(MergeThrottlerTest, memory_limit_can_be_set_explicitly) {
+ StorServerConfigBuilder cfg(*default_server_config());
+ auto& cfg_limit = cfg.mergeThrottlingMemoryLimit;
+ auto& mt = throttler(0);
+
+ cfg_limit.maxUsageBytes = 1'234'567;
+ mt.set_hw_info_locking(make_mem_info(1'000'000));
+ mt.on_configure(cfg);
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 1'234'567);
+ EXPECT_EQ(throttler(0).getMetrics().merge_memory_limit.getLast(), 1'234'567);
+}
+
+TEST_F(MergeThrottlerTest, memory_limit_can_be_set_to_unlimited) {
+ StorServerConfigBuilder cfg(*default_server_config());
+ auto& cfg_limit = cfg.mergeThrottlingMemoryLimit;
+ auto& mt = throttler(0);
+
+ cfg_limit.maxUsageBytes = -1;
+ mt.set_hw_info_locking(make_mem_info(1'000'000));
+ mt.on_configure(cfg);
+ // Zero implies infinity
+ EXPECT_EQ(mt.max_merge_memory_usage_bytes_locking(), 0);
+ EXPECT_EQ(throttler(0).getMetrics().merge_memory_limit.getLast(), 0);
+}
+
// TODO test message queue aborting (use rendezvous functionality--make guard)
} // namespace storage
diff --git a/storage/src/tests/storageserver/service_layer_error_listener_test.cpp b/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
index edb13eea5af..63d8eec6dc3 100644
--- a/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
+++ b/storage/src/tests/storageserver/service_layer_error_listener_test.cpp
@@ -40,7 +40,8 @@ struct Fixture {
vdstestlib::DirConfig config{getStandardConfig(true)};
TestServiceLayerApp app;
ServiceLayerComponent component{app.getComponentRegister(), "dummy"};
- MergeThrottler merge_throttler{*config_from<StorServerConfig>(config::ConfigUri(config.getConfigId())), app.getComponentRegister()};
+ MergeThrottler merge_throttler{*config_from<StorServerConfig>(config::ConfigUri(config.getConfigId())),
+ app.getComponentRegister(), vespalib::HwInfo()};
TestShutdownListener shutdown_listener;
ServiceLayerErrorListener error_listener{component, merge_throttler};
diff --git a/storage/src/vespa/storage/config/stor-server.def b/storage/src/vespa/storage/config/stor-server.def
index dcce3079c68..0d877d33277 100644
--- a/storage/src/vespa/storage/config/stor-server.def
+++ b/storage/src/vespa/storage/config/stor-server.def
@@ -46,6 +46,38 @@ merge_throttling_policy.min_window_size int default=16
merge_throttling_policy.max_window_size int default=128
merge_throttling_policy.window_size_increment double default=2.0
+## If positive, nodes enforce a soft limit on the estimated amount of memory that
+## can be used by merges touching a particular content node. If a merge arrives
+## to the node that would violate the soft limit, it will be bounced with BUSY.
+## Note that this also counts merges where the node is part of the source-only set,
+## since these use memory when/if data is read from the local node.
+##
+## Semantics:
+## > 0 explicit limit in bytes
+## == 0 limit automatically deduced by content node
+## < 0 unlimited (legacy behavior)
+merge_throttling_memory_limit.max_usage_bytes long default=-1
+
+## If merge_throttling_memory_limit.max_usage_bytes == 0, this factor is used
+## as a multiplier to automatically deduce a memory limit for merges on the
+## content node. Note that the result of this multiplication is capped at both
+## ends by the auto_(lower|upper)_bound_bytes config values.
+##
+## Default: 3% of physical memory
+merge_throttling_memory_limit.auto_phys_mem_scale_factor double default=0.03
+
+## The absolute minimum memory limit that can be set when automatically
+## deducing the limit from physical memory on the node.
+##
+## Default: 128MiB
+merge_throttling_memory_limit.auto_lower_bound_bytes long default=134217728
+
+## The absolute maximum memory limit that can be set when automatically
+## deducing the limit from physical memory on the node.
+##
+## Default: 2GiB
+merge_throttling_memory_limit.auto_upper_bound_bytes long default=2147483648
+
## If the persistence provider indicates that it has exhausted one or more
## of its internal resources during a mutating operation, new merges will
## be bounced for this duration. Not allowing further merges helps take
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index 7ce034abfee..68f305fe94e 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -9,6 +9,7 @@
#include <vespa/storageframework/generic/clock/clock.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vespalib/stllike/hash_set.h>
#include <array>
#include <vespa/log/bufferedlogger.h>
@@ -121,7 +122,7 @@ MergeOperation::onStart(DistributorStripeMessageSender& sender)
}
const lib::ClusterState& clusterState(_bucketSpace->getClusterState());
- std::vector<std::unique_ptr<BucketCopy> > newCopies;
+ std::vector<std::unique_ptr<BucketCopy>> newCopies;
std::vector<MergeMetaData> nodes;
for (uint16_t node : getNodes()) {
@@ -139,6 +140,8 @@ MergeOperation::onStart(DistributorStripeMessageSender& sender)
_mnodes.emplace_back(node._nodeIndex, node._sourceOnly);
}
+ const auto estimated_memory_footprint = estimate_merge_memory_footprint_upper_bound(nodes);
+
if (_mnodes.size() > 1) {
auto msg = std::make_shared<api::MergeBucketCommand>(getBucket(), _mnodes,
_manager->operation_context().generate_unique_timestamp(),
@@ -153,6 +156,7 @@ MergeOperation::onStart(DistributorStripeMessageSender& sender)
} else {
msg->set_use_unordered_forwarding(true);
}
+ msg->set_estimated_memory_footprint(estimated_memory_footprint);
LOG(debug, "Sending %s to storage node %u", msg->toString().c_str(), _mnodes[0].index);
@@ -367,6 +371,40 @@ bool MergeOperation::all_involved_nodes_support_unordered_merge_chaining() const
return true;
}
+uint32_t MergeOperation::estimate_merge_memory_footprint_upper_bound(const std::vector<MergeMetaData>& nodes) const noexcept {
+ vespalib::hash_set<uint32_t> seen_checksums;
+ uint32_t worst_case_footprint_across_nodes = 0;
+ uint32_t largest_single_doc_contribution = 0;
+ for (const auto& node : nodes) {
+ if (!seen_checksums.contains(node.checksum())) {
+ seen_checksums.insert(node.checksum());
+ const uint32_t replica_size = node._copy->getUsedFileSize();
+ // We don't know the overlap of document sets across replicas, so we have to assume the
+ // worst and treat the replicas as entirely disjoint. In this case, the _sum_ of all disjoint
+ // replica group footprints gives us the upper bound.
+ // Note: saturate-on-overflow check requires all types to be _unsigned_ to work.
+ if (worst_case_footprint_across_nodes + replica_size >= worst_case_footprint_across_nodes) {
+ worst_case_footprint_across_nodes += replica_size;
+ } else {
+ worst_case_footprint_across_nodes = UINT32_MAX;
+ }
+ // Special case for not bounding single massive doc replica to that of the max
+ // configured bucket size.
+ if (node._copy->getDocumentCount() == 1) {
+ largest_single_doc_contribution = std::max(replica_size, largest_single_doc_contribution);
+ }
+ }
+ }
+ // We know that simply adding up replica sizes is likely to massively over-count in the common
+ // case (due to the intersection set between replicas rarely being empty), so we cap it by the
+ // max expected merge chunk size (which is expected to be configured equal to the split limit).
+ // _Except_ if we have single-doc replicas, as these are known not to overlap, and we know that
+ // the worst case must be the max of the chunk size and the biggest single doc size.
+ const uint32_t expected_max_merge_chunk_size = _manager->operation_context().distributor_config().getSplitSize();
+ return std::max(std::min(worst_case_footprint_across_nodes, expected_max_merge_chunk_size),
+ largest_single_doc_contribution);
+}
+
MergeBucketMetricSet*
MergeOperation::get_merge_metrics()
{
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
index ff21e3d1594..8f54aea33be 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
@@ -63,8 +63,9 @@ private:
void deleteSourceOnlyNodes(const BucketDatabase::Entry& currentState,
DistributorStripeMessageSender& sender);
- bool is_global_bucket_merge() const noexcept;
- bool all_involved_nodes_support_unordered_merge_chaining() const noexcept;
+ [[nodiscard]] bool is_global_bucket_merge() const noexcept;
+ [[nodiscard]] bool all_involved_nodes_support_unordered_merge_chaining() const noexcept;
+ [[nodiscard]] uint32_t estimate_merge_memory_footprint_upper_bound(const std::vector<MergeMetaData>& nodes) const noexcept;
MergeBucketMetricSet* get_merge_metrics();
};
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.cpp b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
index 4cc2a7a89ab..b341c676fc9 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.cpp
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
@@ -29,7 +29,7 @@ namespace {
struct NodeComparator {
bool operator()(const api::MergeBucketCommand::Node& a,
- const api::MergeBucketCommand::Node& b) const
+ const api::MergeBucketCommand::Node& b) const noexcept
{
return a.index < b.index;
}
@@ -41,6 +41,7 @@ MergeThrottler::ChainedMergeState::ChainedMergeState()
: _cmd(),
_cmdString(),
_clusterStateVersion(0),
+ _estimated_memory_usage(0),
_inCycle(false),
_executingLocally(false),
_unwinding(false),
@@ -52,6 +53,7 @@ MergeThrottler::ChainedMergeState::ChainedMergeState(const api::StorageMessage::
: _cmd(cmd),
_cmdString(cmd->toString()),
_clusterStateVersion(static_cast<const api::MergeBucketCommand&>(*cmd).getClusterStateVersion()),
+ _estimated_memory_usage(static_cast<const api::MergeBucketCommand&>(*cmd).estimated_memory_footprint()),
_inCycle(false),
_executingLocally(executing),
_unwinding(false),
@@ -65,6 +67,9 @@ MergeThrottler::Metrics::Metrics(metrics::MetricSet* owner)
averageQueueWaitingTime("averagequeuewaitingtime", {}, "Average time a merge spends in the throttler queue", this),
queueSize("queuesize", {}, "Length of merge queue", this),
active_window_size("active_window_size", {}, "Number of merges active within the pending window size", this),
+ estimated_merge_memory_usage("estimated_merge_memory_usage", {}, "An estimated upper bound of the "
+ "memory usage (in bytes) of the merges currently in the active window", this),
+ merge_memory_limit("merge_memory_limit", {}, "The active soft limit (in bytes) for memory used by merge operations on this node", this),
bounced_due_to_back_pressure("bounced_due_to_back_pressure", {}, "Number of merges bounced due to resource exhaustion back-pressure", this),
chaining("mergechains", this),
local("locallyexecutedmerges", this)
@@ -180,9 +185,11 @@ MergeThrottler::MergeNodeSequence::chain_contains_this_node() const noexcept
MergeThrottler::MergeThrottler(
const StorServerConfig& bootstrap_config,
- StorageComponentRegister& compReg)
+ StorageComponentRegister& comp_reg,
+ const vespalib::HwInfo& hw_info)
: StorageLink("Merge Throttler"),
framework::HtmlStatusReporter("merges", "Merge Throttler"),
+ _hw_info(hw_info),
_merges(),
_queue(),
_maxQueueSize(1024),
@@ -191,11 +198,13 @@ MergeThrottler::MergeThrottler(
_messageLock(),
_stateLock(),
_metrics(std::make_unique<Metrics>()),
- _component(compReg, "mergethrottler"),
+ _component(comp_reg, "mergethrottler"),
_thread(),
_rendezvous(RendezvousState::NONE),
_throttle_until_time(),
_backpressure_duration(std::chrono::seconds(30)),
+ _active_merge_memory_used_bytes(0),
+ _max_merge_memory_usage_bytes(0), // 0 ==> unlimited
_use_dynamic_throttling(false),
_disable_queue_limits_for_chained_merges(false),
_closing(false)
@@ -244,6 +253,14 @@ MergeThrottler::on_configure(const StorServerConfig& new_config)
_backpressure_duration = std::chrono::duration_cast<std::chrono::steady_clock::duration>(
std::chrono::duration<double>(new_config.resourceExhaustionMergeBackPressureDurationSecs));
_disable_queue_limits_for_chained_merges = new_config.disableQueueLimitsForChainedMerges;
+ if (new_config.mergeThrottlingMemoryLimit.maxUsageBytes > 0) {
+ _max_merge_memory_usage_bytes = static_cast<size_t>(new_config.mergeThrottlingMemoryLimit.maxUsageBytes);
+ } else if ((new_config.mergeThrottlingMemoryLimit.maxUsageBytes == 0) && (_hw_info.memory().sizeBytes() > 0)) {
+ _max_merge_memory_usage_bytes = deduced_memory_limit(new_config);
+ } else {
+ _max_merge_memory_usage_bytes = 0; // Implies unlimited
+ }
+ _metrics->merge_memory_limit.set(static_cast<int64_t>(_max_merge_memory_usage_bytes));
}
MergeThrottler::~MergeThrottler()
@@ -373,16 +390,19 @@ MergeThrottler::forwardCommandToNode(
fwdMerge->setPriority(mergeCmd.getPriority());
fwdMerge->setTimeout(mergeCmd.getTimeout());
fwdMerge->set_use_unordered_forwarding(mergeCmd.use_unordered_forwarding());
+ fwdMerge->set_estimated_memory_footprint(mergeCmd.estimated_memory_footprint());
msgGuard.sendUp(fwdMerge);
}
void
MergeThrottler::removeActiveMerge(ActiveMergeMap::iterator mergeIter)
{
- LOG(debug, "Removed merge for %s from internal state",
- mergeIter->first.toString().c_str());
+ LOG(debug, "Removed merge for %s from internal state", mergeIter->first.toString().c_str());
+ assert(_active_merge_memory_used_bytes >= mergeIter->second._estimated_memory_usage);
+ _active_merge_memory_used_bytes -= mergeIter->second._estimated_memory_usage;
_merges.erase(mergeIter);
update_active_merge_window_size_metric();
+ update_active_merge_memory_usage_metric();
}
api::StorageMessage::SP
@@ -714,6 +734,21 @@ bool MergeThrottler::allow_merge_despite_full_window(const api::MergeBucketComma
return !_use_dynamic_throttling;
}
+bool MergeThrottler::accepting_merge_is_within_memory_limits(const api::MergeBucketCommand& cmd) const noexcept {
+ // Soft-limit on expected memory usage, but always let at least one merge into the active window.
+ if ((_max_merge_memory_usage_bytes > 0) && !_merges.empty()) {
+ size_t future_usage = _active_merge_memory_used_bytes + cmd.estimated_memory_footprint();
+ if (future_usage > _max_merge_memory_usage_bytes) {
+ LOG(spam, "Adding merge with memory footprint %u would exceed node soft limit of %zu. "
+ "Current memory usage is %zu, future usage would have been %zu",
+ cmd.estimated_memory_footprint(), _max_merge_memory_usage_bytes,
+ _active_merge_memory_used_bytes, future_usage);
+ return false;
+ }
+ }
+ return true;
+}
+
bool MergeThrottler::may_allow_into_queue(const api::MergeBucketCommand& cmd) const noexcept {
// We cannot let forwarded unordered merges fall into the queue, as that might lead to a deadlock.
// Consider the following scenario, with two nodes C0 and C1, each with a low window size of 1 (low
@@ -761,7 +796,10 @@ MergeThrottler::handleMessageDown(
if (isMergeAlreadyKnown(msg)) {
processCycledMergeCommand(msg, msgGuard);
- } else if (canProcessNewMerge() || allow_merge_despite_full_window(mergeCmd)) {
+ } else if (accepting_merge_is_within_memory_limits(mergeCmd)
+ && (canProcessNewMerge()
+ || allow_merge_despite_full_window(mergeCmd)))
+ {
processNewMergeCommand(msg, msgGuard);
} else if (may_allow_into_queue(mergeCmd)) {
enqueue_merge_for_later_processing(msg, msgGuard);
@@ -864,9 +902,10 @@ MergeThrottler::processNewMergeCommand(
assert(_merges.find(mergeCmd.getBucket()) == _merges.end());
auto state = _merges.emplace(mergeCmd.getBucket(), ChainedMergeState(msg)).first;
update_active_merge_window_size_metric();
+ _active_merge_memory_used_bytes += mergeCmd.estimated_memory_footprint();
+ update_active_merge_memory_usage_metric();
- LOG(debug, "Added merge %s to internal state",
- mergeCmd.toString().c_str());
+ LOG(debug, "Added merge %s to internal state", mergeCmd.toString().c_str());
DummyMbusRequest dummyMsg;
_throttlePolicy->processMessage(dummyMsg);
@@ -889,7 +928,7 @@ MergeThrottler::processNewMergeCommand(
} else {
if (!nodeSeq.isLastNode()) {
// When we're not the last node and haven't seen the merge before,
- // we cannot possible execute the merge yet. Forward to next.
+ // we cannot possibly execute the merge yet. Forward to next.
uint16_t nextNodeInChain = nodeSeq.getNextNodeInChain();
LOG(debug, "Forwarding merge %s to storage node %u",
mergeCmd.toString().c_str(), nextNodeInChain);
@@ -1291,17 +1330,52 @@ MergeThrottler::markActiveMergesAsAborted(uint32_t minimumStateVersion)
}
void
-MergeThrottler::set_disable_queue_limits_for_chained_merges(bool disable_limits) noexcept {
+MergeThrottler::set_disable_queue_limits_for_chained_merges_locking(bool disable_limits) noexcept {
std::lock_guard lock(_stateLock);
_disable_queue_limits_for_chained_merges = disable_limits;
}
void
+MergeThrottler::set_max_merge_memory_usage_bytes_locking(uint32_t max_memory_bytes) noexcept {
+ std::lock_guard lock(_stateLock);
+ _max_merge_memory_usage_bytes = max_memory_bytes;
+}
+
+uint32_t
+MergeThrottler::max_merge_memory_usage_bytes_locking() const noexcept {
+ std::lock_guard lock(_stateLock);
+ return _max_merge_memory_usage_bytes;
+}
+
+void
+MergeThrottler::set_hw_info_locking(const vespalib::HwInfo& hw_info) {
+ std::lock_guard lock(_stateLock);
+ _hw_info = hw_info;
+}
+
+size_t
+MergeThrottler::deduced_memory_limit(const StorServerConfig& cfg) const noexcept {
+ const auto min_limit = static_cast<size_t>(std::max(cfg.mergeThrottlingMemoryLimit.autoLowerBoundBytes, 1L));
+ const auto max_limit = std::max(static_cast<size_t>(std::max(cfg.mergeThrottlingMemoryLimit.autoUpperBoundBytes, 1L)), min_limit);
+ const auto mem_scale_factor = std::max(cfg.mergeThrottlingMemoryLimit.autoPhysMemScaleFactor, 0.0);
+
+ const auto node_mem = static_cast<double>(_hw_info.memory().sizeBytes());
+ const auto scaled_mem = static_cast<size_t>(node_mem * mem_scale_factor);
+
+ return std::min(std::max(scaled_mem, min_limit), max_limit);
+}
+
+void
MergeThrottler::update_active_merge_window_size_metric() noexcept {
_metrics->active_window_size.set(static_cast<int64_t>(_merges.size()));
}
void
+MergeThrottler::update_active_merge_memory_usage_metric() noexcept {
+ _metrics->estimated_merge_memory_usage.set(static_cast<int64_t>(_active_merge_memory_used_bytes));
+}
+
+void
MergeThrottler::print(std::ostream& out, bool /*verbose*/,
const std::string& /*indent*/) const
{
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.h b/storage/src/vespa/storage/storageserver/mergethrottler.h
index 5362c2f6df8..a5559c159bf 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.h
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.h
@@ -1,26 +1,24 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
/**
- * @class storage::MergeThrottler
- * @ingroup storageserver
- *
- * @brief Throttler and forwarder of merge commands
+ * Throttler and forwarder of merge commands
*/
#pragma once
-#include <vespa/storage/config/config-stor-server.h>
-#include <vespa/storage/common/message_guard.h>
-#include <vespa/storage/common/storagelink.h>
-#include <vespa/storage/common/storagecomponent.h>
-#include <vespa/storageframework/generic/status/htmlstatusreporter.h>
-#include <vespa/storageframework/generic/thread/runnable.h>
-#include <vespa/storageapi/message/bucket.h>
+#include <vespa/config/helper/ifetchercallback.h>
#include <vespa/document/bucket/bucket.h>
+#include <vespa/metrics/countmetric.h>
#include <vespa/metrics/metricset.h>
+#include <vespa/metrics/metrictimer.h>
#include <vespa/metrics/summetric.h>
-#include <vespa/metrics/countmetric.h>
#include <vespa/metrics/valuemetric.h>
-#include <vespa/metrics/metrictimer.h>
-#include <vespa/config/helper/ifetchercallback.h>
+#include <vespa/storage/common/message_guard.h>
+#include <vespa/storage/common/storagecomponent.h>
+#include <vespa/storage/common/storagelink.h>
+#include <vespa/storage/config/config-stor-server.h>
+#include <vespa/storageapi/message/bucket.h>
+#include <vespa/storageframework/generic/status/htmlstatusreporter.h>
+#include <vespa/storageframework/generic/thread/runnable.h>
+#include <vespa/vespalib/util/hw_info.h>
#include <chrono>
@@ -71,6 +69,8 @@ public:
metrics::DoubleAverageMetric averageQueueWaitingTime;
metrics::LongValueMetric queueSize;
metrics::LongValueMetric active_window_size;
+ metrics::LongValueMetric estimated_merge_memory_usage;
+ metrics::LongValueMetric merge_memory_limit;
metrics::LongCountMetric bounced_due_to_back_pressure;
MergeOperationMetrics chaining;
MergeOperationMetrics local;
@@ -113,6 +113,7 @@ private:
api::StorageMessage::SP _cmd;
std::string _cmdString; // For being able to print message even when we don't own it
uint64_t _clusterStateVersion;
+ uint32_t _estimated_memory_usage;
bool _inCycle;
bool _executingLocally;
bool _unwinding;
@@ -154,9 +155,7 @@ private:
// Use a set rather than a priority_queue, since we want to be
// able to iterate over the collection during status rendering
- using MergePriorityQueue = std::set<
- StablePriorityOrderingWrapper<api::StorageMessage::SP>
- >;
+ using MergePriorityQueue = std::set<StablePriorityOrderingWrapper<api::StorageMessage::SP>>;
enum class RendezvousState {
NONE,
@@ -165,32 +164,37 @@ private:
RELEASED
};
- ActiveMergeMap _merges;
- MergePriorityQueue _queue;
- size_t _maxQueueSize;
- std::unique_ptr<mbus::DynamicThrottlePolicy> _throttlePolicy;
- uint64_t _queueSequence; // TODO: move into a stable priority queue class
- mutable std::mutex _messageLock;
- std::condition_variable _messageCond;
- mutable std::mutex _stateLock;
+ vespalib::HwInfo _hw_info;
+ ActiveMergeMap _merges;
+ MergePriorityQueue _queue;
+ size_t _maxQueueSize;
+ std::unique_ptr<mbus::DynamicThrottlePolicy> _throttlePolicy;
+ uint64_t _queueSequence; // TODO: move into a stable priority queue class
+ mutable std::mutex _messageLock;
+ std::condition_variable _messageCond;
+ mutable std::mutex _stateLock;
// Messages pending to be processed by the worker thread
- std::vector<api::StorageMessage::SP> _messagesDown;
- std::vector<api::StorageMessage::SP> _messagesUp;
- std::unique_ptr<Metrics> _metrics;
- StorageComponent _component;
- std::unique_ptr<framework::Thread> _thread;
- RendezvousState _rendezvous;
+ std::vector<api::StorageMessage::SP> _messagesDown;
+ std::vector<api::StorageMessage::SP> _messagesUp;
+ std::unique_ptr<Metrics> _metrics;
+ StorageComponent _component;
+ std::unique_ptr<framework::Thread> _thread;
+ RendezvousState _rendezvous;
mutable std::chrono::steady_clock::time_point _throttle_until_time;
- std::chrono::steady_clock::duration _backpressure_duration;
- bool _use_dynamic_throttling;
- bool _disable_queue_limits_for_chained_merges;
- bool _closing;
+ std::chrono::steady_clock::duration _backpressure_duration;
+ size_t _active_merge_memory_used_bytes;
+ size_t _max_merge_memory_usage_bytes;
+ bool _use_dynamic_throttling;
+ bool _disable_queue_limits_for_chained_merges;
+ bool _closing;
public:
/**
* windowSizeIncrement used for allowing unit tests to start out with more
* than 1 as their window size.
*/
- MergeThrottler(const StorServerConfig& bootstrap_config, StorageComponentRegister&);
+ MergeThrottler(const StorServerConfig& bootstrap_config,
+ StorageComponentRegister& comp_reg,
+ const vespalib::HwInfo& hw_info);
~MergeThrottler() override;
/** Implements document::Runnable::run */
@@ -223,7 +227,10 @@ public:
// For unit testing only
const mbus::DynamicThrottlePolicy& getThrottlePolicy() const { return *_throttlePolicy; }
mbus::DynamicThrottlePolicy& getThrottlePolicy() { return *_throttlePolicy; }
- void set_disable_queue_limits_for_chained_merges(bool disable_limits) noexcept;
+ void set_disable_queue_limits_for_chained_merges_locking(bool disable_limits) noexcept;
+ void set_max_merge_memory_usage_bytes_locking(uint32_t max_memory_bytes) noexcept;
+ [[nodiscard]] uint32_t max_merge_memory_usage_bytes_locking() const noexcept;
+ void set_hw_info_locking(const vespalib::HwInfo& hw_info);
// For unit testing only
std::mutex& getStateLock() { return _stateLock; }
@@ -363,6 +370,7 @@ private:
[[nodiscard]] bool backpressure_mode_active_no_lock() const;
void backpressure_bounce_all_queued_merges(MessageGuard& guard);
[[nodiscard]] bool allow_merge_despite_full_window(const api::MergeBucketCommand& cmd) const noexcept;
+ [[nodiscard]] bool accepting_merge_is_within_memory_limits(const api::MergeBucketCommand& cmd) const noexcept;
[[nodiscard]] bool may_allow_into_queue(const api::MergeBucketCommand& cmd) const noexcept;
void sendReply(const api::MergeBucketCommand& cmd,
@@ -404,7 +412,10 @@ private:
void rejectOperationsInThreadQueue(MessageGuard&, uint32_t minimumStateVersion);
void markActiveMergesAsAborted(uint32_t minimumStateVersion);
+ [[nodiscard]] size_t deduced_memory_limit(const StorServerConfig& cfg) const noexcept;
+
void update_active_merge_window_size_metric() noexcept;
+ void update_active_merge_memory_usage_metric() noexcept;
// const function, but metrics are mutable
void updateOperationMetrics(
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
index 0cce2c27e95..7da75225b6c 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
@@ -30,8 +30,9 @@ ServiceLayerNode::ServiceLayerBootstrapConfigs::ServiceLayerBootstrapConfigs(Ser
ServiceLayerNode::ServiceLayerBootstrapConfigs&
ServiceLayerNode::ServiceLayerBootstrapConfigs::operator=(ServiceLayerBootstrapConfigs&&) noexcept = default;
-ServiceLayerNode::ServiceLayerNode(const config::ConfigUri & configUri,
+ServiceLayerNode::ServiceLayerNode(const config::ConfigUri& configUri,
ServiceLayerNodeContext& context,
+ const vespalib::HwInfo& hw_info,
ServiceLayerBootstrapConfigs bootstrap_configs,
ApplicationGenerationFetcher& generationFetcher,
spi::PersistenceProvider& persistenceProvider,
@@ -41,6 +42,7 @@ ServiceLayerNode::ServiceLayerNode(const config::ConfigUri & configUri,
_context(context),
_persistenceProvider(persistenceProvider),
_externalVisitors(externalVisitors),
+ _hw_info(hw_info),
_persistence_bootstrap_config(std::move(bootstrap_configs.persistence_cfg)),
_visitor_bootstrap_config(std::move(bootstrap_configs.visitor_cfg)),
_filestor_bootstrap_config(std::move(bootstrap_configs.filestor_cfg)),
@@ -172,7 +174,7 @@ ServiceLayerNode::createChain(IStorageChainBuilder &builder)
auto bouncer = std::make_unique<Bouncer>(compReg, bouncer_config());
_bouncer = bouncer.get();
builder.add(std::move(bouncer));
- auto merge_throttler_up = std::make_unique<MergeThrottler>(server_config(), compReg);
+ auto merge_throttler_up = std::make_unique<MergeThrottler>(server_config(), compReg, _hw_info);
_merge_throttler = merge_throttler_up.get();
builder.add(std::move(merge_throttler_up));
auto bucket_ownership_handler = std::make_unique<ChangedBucketOwnershipHandler>(*_persistence_bootstrap_config, compReg);
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.h b/storage/src/vespa/storage/storageserver/servicelayernode.h
index ae39bb0805e..bea09a1c9ce 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.h
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.h
@@ -12,6 +12,7 @@
#include <vespa/storage/common/visitorfactory.h>
#include <vespa/storage/visiting/config-stor-visitor.h>
#include <vespa/storage/visiting/visitormessagesessionfactory.h>
+#include <vespa/vespalib/util/hw_info.h>
namespace storage {
@@ -39,6 +40,7 @@ private:
ServiceLayerNodeContext& _context;
spi::PersistenceProvider& _persistenceProvider;
VisitorFactory::Map _externalVisitors;
+ vespalib::HwInfo _hw_info;
std::unique_ptr<PersistenceConfig> _persistence_bootstrap_config;
std::unique_ptr<StorVisitorConfig> _visitor_bootstrap_config;
std::unique_ptr<StorFilestorConfig> _filestor_bootstrap_config;
@@ -66,8 +68,9 @@ public:
ServiceLayerBootstrapConfigs& operator=(ServiceLayerBootstrapConfigs&&) noexcept;
};
- ServiceLayerNode(const config::ConfigUri & configUri,
+ ServiceLayerNode(const config::ConfigUri& configUri,
ServiceLayerNodeContext& context,
+ const vespalib::HwInfo& hw_info,
ServiceLayerBootstrapConfigs bootstrap_configs,
ApplicationGenerationFetcher& generationFetcher,
spi::PersistenceProvider& persistenceProvider,
diff --git a/storage/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto b/storage/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
index 850b5db5c98..a32fbc3e4de 100644
--- a/storage/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
+++ b/storage/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
@@ -33,12 +33,13 @@ message MergeNode {
}
message MergeBucketRequest {
- Bucket bucket = 1;
- uint32 cluster_state_version = 2;
- uint64 max_timestamp = 3;
- repeated MergeNode nodes = 4;
- repeated uint32 node_chain = 5;
- bool unordered_forwarding = 6;
+ Bucket bucket = 1;
+ uint32 cluster_state_version = 2;
+ uint64 max_timestamp = 3;
+ repeated MergeNode nodes = 4;
+ repeated uint32 node_chain = 5;
+ bool unordered_forwarding = 6;
+ uint32 estimated_memory_footprint = 7;
}
message MergeBucketResponse {
diff --git a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
index af62ec2b418..efbe8c9b42d 100644
--- a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
+++ b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
@@ -801,6 +801,7 @@ void ProtocolSerialization7::onEncode(GBBuf& buf, const api::MergeBucketCommand&
req.set_max_timestamp(msg.getMaxTimestamp());
req.set_cluster_state_version(msg.getClusterStateVersion());
req.set_unordered_forwarding(msg.use_unordered_forwarding());
+ req.set_estimated_memory_footprint(msg.estimated_memory_footprint());
for (uint16_t chain_node : msg.getChain()) {
req.add_node_chain(chain_node);
}
@@ -823,6 +824,7 @@ api::StorageCommand::UP ProtocolSerialization7::onDecodeMergeBucketCommand(BBuf&
}
cmd->setChain(std::move(chain));
cmd->set_use_unordered_forwarding(req.unordered_forwarding());
+ cmd->set_estimated_memory_footprint(req.estimated_memory_footprint());
return cmd;
});
}
diff --git a/storage/src/vespa/storageapi/message/bucket.cpp b/storage/src/vespa/storageapi/message/bucket.cpp
index 49295f54891..499d2f4abe2 100644
--- a/storage/src/vespa/storageapi/message/bucket.cpp
+++ b/storage/src/vespa/storageapi/message/bucket.cpp
@@ -107,6 +107,7 @@ MergeBucketCommand::MergeBucketCommand(
_nodes(nodes),
_maxTimestamp(maxTimestamp),
_clusterStateVersion(clusterStateVersion),
+ _estimated_memory_footprint(0),
_chain(chain),
_use_unordered_forwarding(false)
{}
@@ -132,6 +133,9 @@ MergeBucketCommand::print(std::ostream& out, bool verbose, const std::string& in
if (_use_unordered_forwarding) {
out << " (unordered forwarding)";
}
+ if (_estimated_memory_footprint > 0) {
+ out << ", estimated memory footprint: " << _estimated_memory_footprint << " bytes";
+ }
out << ", reasons to start: " << _reason;
out << ")";
if (verbose) {
diff --git a/storage/src/vespa/storageapi/message/bucket.h b/storage/src/vespa/storageapi/message/bucket.h
index d1fa00619ae..4aa2ff8b0c1 100644
--- a/storage/src/vespa/storageapi/message/bucket.h
+++ b/storage/src/vespa/storageapi/message/bucket.h
@@ -118,6 +118,7 @@ private:
std::vector<Node> _nodes;
Timestamp _maxTimestamp;
uint32_t _clusterStateVersion;
+ uint32_t _estimated_memory_footprint;
std::vector<uint16_t> _chain;
bool _use_unordered_forwarding;
@@ -140,6 +141,12 @@ public:
}
[[nodiscard]] bool use_unordered_forwarding() const noexcept { return _use_unordered_forwarding; }
[[nodiscard]] bool from_distributor() const noexcept { return _chain.empty(); }
+ void set_estimated_memory_footprint(uint32_t footprint_bytes) noexcept {
+ _estimated_memory_footprint = footprint_bytes;
+ }
+ [[nodiscard]] uint32_t estimated_memory_footprint() const noexcept {
+ return _estimated_memory_footprint;
+ }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGECOMMAND(MergeBucketCommand, onMergeBucket)
};
diff --git a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
index 8940c2a320e..245afb1c774 100644
--- a/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/dummyservicelayerprocess.cpp
@@ -7,7 +7,7 @@ namespace storage {
// DummyServiceLayerProcess implementation
DummyServiceLayerProcess::DummyServiceLayerProcess(const config::ConfigUri & configUri)
- : ServiceLayerProcess(configUri)
+ : ServiceLayerProcess(configUri, vespalib::HwInfo())
{
}
diff --git a/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp b/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
index bb284bfc108..ebf320352eb 100644
--- a/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
@@ -31,7 +31,7 @@ bucket_db_options_from_config(const config::ConfigUri& config_uri) {
}
-ServiceLayerProcess::ServiceLayerProcess(const config::ConfigUri& configUri)
+ServiceLayerProcess::ServiceLayerProcess(const config::ConfigUri& configUri, const vespalib::HwInfo& hw_info)
: Process(configUri),
_externalVisitors(),
_persistence_cfg_handle(),
@@ -39,6 +39,7 @@ ServiceLayerProcess::ServiceLayerProcess(const config::ConfigUri& configUri)
_filestor_cfg_handle(),
_node(),
_storage_chain_builder(),
+ _hw_info(hw_info),
_context(std::make_unique<framework::defaultimplementation::RealClock>(),
bucket_db_options_from_config(configUri))
{
@@ -106,7 +107,8 @@ ServiceLayerProcess::createNode()
sbc.visitor_cfg = _visitor_cfg_handle->getConfig();
sbc.filestor_cfg = _filestor_cfg_handle->getConfig();
- _node = std::make_unique<ServiceLayerNode>(_configUri, _context, std::move(sbc), *this, getProvider(), _externalVisitors);
+ _node = std::make_unique<ServiceLayerNode>(_configUri, _context, _hw_info, std::move(sbc),
+ *this, getProvider(), _externalVisitors);
if (_storage_chain_builder) {
_node->set_storage_chain_builder(std::move(_storage_chain_builder));
}
diff --git a/storageserver/src/vespa/storageserver/app/servicelayerprocess.h b/storageserver/src/vespa/storageserver/app/servicelayerprocess.h
index dcc56f373c4..add5a38ca9d 100644
--- a/storageserver/src/vespa/storageserver/app/servicelayerprocess.h
+++ b/storageserver/src/vespa/storageserver/app/servicelayerprocess.h
@@ -7,6 +7,7 @@
#include <vespa/storage/common/visitorfactory.h>
#include <vespa/storage/storageserver/servicelayernodecontext.h>
#include <vespa/storage/visiting/config-stor-visitor.h>
+#include <vespa/vespalib/util/hw_info.h>
namespace config { class ConfigUri; }
@@ -29,14 +30,15 @@ private:
std::unique_ptr<config::ConfigHandle<StorVisitorConfig>> _visitor_cfg_handle;
std::unique_ptr<config::ConfigHandle<StorFilestorConfig>> _filestor_cfg_handle;
- std::unique_ptr<ServiceLayerNode> _node;
+ std::unique_ptr<ServiceLayerNode> _node;
std::unique_ptr<IStorageChainBuilder> _storage_chain_builder;
protected:
+ vespalib::HwInfo _hw_info;
ServiceLayerNodeContext _context;
public:
- explicit ServiceLayerProcess(const config::ConfigUri & configUri);
+ ServiceLayerProcess(const config::ConfigUri & configUri, const vespalib::HwInfo& hw_info);
~ServiceLayerProcess() override;
void shutdown() override;
diff --git a/vespalib/src/tests/net/tls/openssl_impl/openssl_impl_test.cpp b/vespalib/src/tests/net/tls/openssl_impl/openssl_impl_test.cpp
index a75c7dff150..6d5c5fa6308 100644
--- a/vespalib/src/tests/net/tls/openssl_impl/openssl_impl_test.cpp
+++ b/vespalib/src/tests/net/tls/openssl_impl/openssl_impl_test.cpp
@@ -455,8 +455,8 @@ struct CertFixture : Fixture {
{}
~CertFixture();
- CertKeyWrapper create_ca_issued_peer_cert(const std::vector<vespalib::string>& common_names,
- const std::vector<vespalib::string>& sans) {
+ static X509Certificate::SubjectInfo make_subject_info(const std::vector<vespalib::string>& common_names,
+ const std::vector<vespalib::string>& sans) {
auto dn = X509Certificate::DistinguishedName()
.country("US").state("CA").locality("Sunnyvale")
.organization("Wile E. Coyote, Ltd.")
@@ -468,12 +468,27 @@ struct CertFixture : Fixture {
for (auto& san : sans) {
subject.add_subject_alt_name(san);
}
+ return subject;
+ }
+
+ CertKeyWrapper create_ca_issued_peer_cert(const std::vector<vespalib::string>& common_names,
+ const std::vector<vespalib::string>& sans) const {
+ auto subject = make_subject_info(common_names, sans);
auto key = PrivateKey::generate_p256_ec_key();
auto params = X509Certificate::Params::issued_by(std::move(subject), key, root_ca.cert, root_ca.key);
auto cert = X509Certificate::generate_from(std::move(params));
return {std::move(cert), std::move(key)};
}
+ CertKeyWrapper create_self_signed_peer_cert(const std::vector<vespalib::string>& common_names,
+ const std::vector<vespalib::string>& sans) const {
+ auto subject = make_subject_info(common_names, sans);
+ auto key = PrivateKey::generate_p256_ec_key();
+ auto params = X509Certificate::Params::self_signed(std::move(subject), key);
+ auto cert = X509Certificate::generate_from(std::move(params));
+ return {std::move(cert), std::move(key)};
+ }
+
static std::unique_ptr<OpenSslCryptoCodecImpl> create_openssl_codec_with_authz_mode(
const TransportSecurityOptions& opts,
std::shared_ptr<CertificateVerificationCallback> cert_verify_callback,
@@ -663,6 +678,34 @@ TEST_F("Only DNS and URI SANs are enumerated", CertFixture) {
EXPECT_EQUAL(0u, server_cb->creds.uri_sans.size());
}
+// A server must only trust the actual verified peer certificate, not any other random
+// certificate that the client decides to include in its certificate chain. See CVE-2023-2422.
+// Note: this is a preemptive test; we are not--and have never been--vulnerable to this issue.
+TEST_F("Certificate credential extraction is not vulnerable to CVE-2023-2422", CertFixture) {
+ auto good_ck = f.create_ca_issued_peer_cert({}, {{"DNS:legit.example.com"}});
+ auto evil_ck = f.create_self_signed_peer_cert({"rudolf.example.com"}, {{"DNS:blodstrupmoen.example.com"}});
+
+ auto ts_params = TransportSecurityOptions::Params().
+ ca_certs_pem(f.root_ca.cert->to_pem()).
+ // Concatenate CA-signed good cert with self-signed cert with different credentials.
+ // We should only ever look at the good cert.
+ cert_chain_pem(good_ck.cert->to_pem() + evil_ck.cert->to_pem()).
+ private_key_pem(good_ck.key->private_to_pem() + evil_ck.key->private_to_pem()).
+ authorized_peers(AuthorizedPeers::allow_all_authenticated());
+
+ f.client = f.create_openssl_codec(TransportSecurityOptions(std::move(ts_params)),
+ std::make_shared<PrintingCertificateCallback>(),
+ CryptoCodec::Mode::Client);
+ auto server_cb = std::make_shared<MockCertificateCallback>();
+ f.reset_server_with_cert_opts(good_ck, server_cb);
+ ASSERT_TRUE(f.handshake());
+
+ auto& creds = server_cb->creds;
+ EXPECT_EQUAL("", creds.common_name);
+ ASSERT_EQUAL(1u, creds.dns_sans.size());
+ EXPECT_EQUAL("legit.example.com", creds.dns_sans[0]);
+}
+
// We don't test too many combinations of peer policies here, only that
// the wiring is set up. Verification logic is tested elsewhere.
diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.h b/vespalib/src/vespa/vespalib/btree/btreenode.h
index f7e0b535b33..7a4fa8030d3 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenode.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenode.h
@@ -39,7 +39,7 @@ public:
static constexpr uint8_t LEAF_LEVEL = 0;
protected:
uint16_t _validSlots;
- BTreeNode(uint8_t level) noexcept
+ explicit BTreeNode(uint8_t level) noexcept
: _level(level),
_isFrozen(false),
_validSlots(0)
@@ -161,7 +161,7 @@ class BTreeNodeAggregatedWrap<NoAggregated>
static NoAggregated _instance;
public:
- BTreeNodeAggregatedWrap() noexcept {}
+ BTreeNodeAggregatedWrap() noexcept = default;
NoAggregated &getAggregated() { return _instance; }
const NoAggregated &getAggregated() const { return _instance; }
@@ -174,7 +174,7 @@ template <typename KeyT, uint32_t NumSlots>
class BTreeNodeT : public BTreeNode {
protected:
KeyT _keys[NumSlots];
- BTreeNodeT(uint8_t level) noexcept
+ explicit BTreeNodeT(uint8_t level) noexcept
: BTreeNode(level),
_keys()
{}
@@ -247,7 +247,7 @@ public:
using DataWrapType::setData;
using DataWrapType::copyData;
protected:
- BTreeNodeTT(uint8_t level) noexcept
+ explicit BTreeNodeTT(uint8_t level) noexcept
: ParentType(level),
DataWrapType()
{}
@@ -380,54 +380,17 @@ public:
void cleanFrozen();
template <typename NodeStoreType, typename FunctionType>
- void foreach_key(NodeStoreType &store, FunctionType func) const {
- const BTreeNode::ChildRef *it = this->_data;
- const BTreeNode::ChildRef *ite = it + _validSlots;
- if (this->getLevel() > 1u) {
- for (; it != ite; ++it) {
- store.mapInternalRef(it->load_acquire())->foreach_key(store, func);
- }
- } else {
- for (; it != ite; ++it) {
- store.mapLeafRef(it->load_acquire())->foreach_key(func);
- }
- }
- }
+ void foreach_key(NodeStoreType &store, FunctionType func) const;
/**
* Call func with leaf entry key value as argument for all leaf entries in subtrees
* for children [start_idx, end_idx).
*/
template <typename NodeStoreType, typename FunctionType>
- void foreach_key_range(NodeStoreType &store, uint32_t start_idx, uint32_t end_idx, FunctionType func) const {
- const BTreeNode::ChildRef *it = this->_data;
- const BTreeNode::ChildRef *ite = it + end_idx;
- it += start_idx;
- if (this->getLevel() > 1u) {
- for (; it != ite; ++it) {
- store.mapInternalRef(it->load_acquire())->foreach_key(store, func);
- }
- } else {
- for (; it != ite; ++it) {
- store.mapLeafRef(it->load_acquire())->foreach_key(func);
- }
- }
- }
+ void foreach_key_range(NodeStoreType &store, uint32_t start_idx, uint32_t end_idx, FunctionType func) const;
template <typename NodeStoreType, typename FunctionType>
- void foreach(NodeStoreType &store, FunctionType func) const {
- const BTreeNode::ChildRef *it = this->_data;
- const BTreeNode::ChildRef *ite = it + _validSlots;
- if (this->getLevel() > 1u) {
- for (; it != ite; ++it) {
- store.mapInternalRef(it->load_acquire())->foreach(store, func);
- }
- } else {
- for (; it != ite; ++it) {
- store.mapLeafRef(it->load_acquire())->foreach(func);
- }
- }
- }
+ void foreach(NodeStoreType &store, FunctionType func) const;
};
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots = 16>
@@ -490,44 +453,23 @@ public:
const DataT &getLastData() const { return this->getData(validSlots() - 1); }
void writeData(uint32_t idx, const DataT &data) { this->setData(idx, data); }
- uint32_t validLeaves() const { return validSlots(); }
+ uint32_t validLeaves() const noexcept { return validSlots(); }
template <typename FunctionType>
- void foreach_key(FunctionType func) const {
- const KeyT *it = _keys;
- const KeyT *ite = it + _validSlots;
- for (; it != ite; ++it) {
- func(*it);
- }
- }
+ void foreach_key(FunctionType func) const;
/**
* Call func with leaf entry key value as argument for leaf entries [start_idx, end_idx).
*/
template <typename FunctionType>
- void foreach_key_range(uint32_t start_idx, uint32_t end_idx, FunctionType func) const {
- const KeyT *it = _keys;
- const KeyT *ite = it + end_idx;
- it += start_idx;
- for (; it != ite; ++it) {
- func(*it);
- }
- }
+ void foreach_key_range(uint32_t start_idx, uint32_t end_idx, FunctionType func) const;
template <typename FunctionType>
- void foreach(FunctionType func) const {
- const KeyT *it = _keys;
- const KeyT *ite = it + _validSlots;
- uint32_t idx = 0;
- for (; it != ite; ++it) {
- func(*it, this->getData(idx++));
- }
- }
+ void foreach(FunctionType func) const;
};
-template <typename KeyT, typename DataT, typename AggrT,
- uint32_t NumSlots = 16>
+template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots = 16>
class BTreeLeafNodeTemp : public BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>
{
public:
diff --git a/vespalib/src/vespa/vespalib/btree/btreenode.hpp b/vespalib/src/vespa/vespalib/btree/btreenode.hpp
index ab33fd2c059..12b5c985ca6 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenode.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreenode.hpp
@@ -16,7 +16,7 @@ private:
uint32_t _median;
bool _medianBumped;
public:
- SplitInsertHelper(uint32_t idx, uint32_t validSlots) :
+ SplitInsertHelper(uint32_t idx, uint32_t validSlots) noexcept :
_idx(idx),
_median(validSlots / 2),
_medianBumped(false)
@@ -26,8 +26,8 @@ public:
_medianBumped = true;
}
}
- uint32_t getMedian() const { return _median; }
- bool insertInSplitNode() const {
+ uint32_t getMedian() const noexcept { return _median; }
+ bool insertInSplitNode() const noexcept {
if (_median >= _idx && !_medianBumped) {
return false;
}
@@ -361,6 +361,62 @@ BTreeInternalNode<KeyT, AggrT, NumSlots>::cleanFrozen()
_validLeaves = 0;
}
+template <typename KeyT, typename AggrT, uint32_t NumSlots>
+template <typename NodeStoreType, typename FunctionType>
+void
+BTreeInternalNode<KeyT, AggrT, NumSlots>::foreach_key(NodeStoreType &store, FunctionType func) const {
+ const BTreeNode::ChildRef *it = this->_data;
+ const BTreeNode::ChildRef *ite = it + _validSlots;
+ if (this->getLevel() > 1u) {
+ for (; it != ite; ++it) {
+ store.mapInternalRef(it->load_acquire())->foreach_key(store, func);
+ }
+ } else {
+ for (; it != ite; ++it) {
+ store.mapLeafRef(it->load_acquire())->foreach_key(func);
+ }
+ }
+}
+
+/**
+ * Call func with leaf entry key value as argument for all leaf entries in subtrees
+ * for children [start_idx, end_idx).
+ */
+template <typename KeyT, typename AggrT, uint32_t NumSlots>
+template <typename NodeStoreType, typename FunctionType>
+void
+BTreeInternalNode<KeyT, AggrT, NumSlots>::foreach_key_range(NodeStoreType &store, uint32_t start_idx, uint32_t end_idx, FunctionType func) const {
+ const BTreeNode::ChildRef *it = this->_data;
+ const BTreeNode::ChildRef *ite = it + end_idx;
+ it += start_idx;
+ if (this->getLevel() > 1u) {
+ for (; it != ite; ++it) {
+ store.mapInternalRef(it->load_acquire())->foreach_key(store, func);
+ }
+ } else {
+ for (; it != ite; ++it) {
+ store.mapLeafRef(it->load_acquire())->foreach_key(func);
+ }
+ }
+}
+
+template <typename KeyT, typename AggrT, uint32_t NumSlots>
+template <typename NodeStoreType, typename FunctionType>
+void
+BTreeInternalNode<KeyT, AggrT, NumSlots>::foreach(NodeStoreType &store, FunctionType func) const {
+ const BTreeNode::ChildRef *it = this->_data;
+ const BTreeNode::ChildRef *ite = it + _validSlots;
+ if (this->getLevel() > 1u) {
+ for (; it != ite; ++it) {
+ store.mapInternalRef(it->load_acquire())->foreach(store, func);
+ }
+ } else {
+ for (; it != ite; ++it) {
+ store.mapLeafRef(it->load_acquire())->foreach(func);
+ }
+ }
+}
+
template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>::
@@ -376,4 +432,43 @@ BTreeLeafNode(const KeyDataType *smallArray, uint32_t arraySize) noexcept
freeze();
}
+
+template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
+template <typename FunctionType>
+void
+BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>::foreach_key(FunctionType func) const {
+ const KeyT *it = _keys;
+ const KeyT *ite = it + _validSlots;
+ for (; it != ite; ++it) {
+ func(*it);
+ }
+}
+
+/**
+ * Call func with leaf entry key value as argument for leaf entries [start_idx, end_idx).
+ */
+template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
+template <typename FunctionType>
+void
+BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>::foreach_key_range(uint32_t start_idx, uint32_t end_idx, FunctionType func) const {
+ const KeyT *it = _keys;
+ const KeyT *ite = it + end_idx;
+ it += start_idx;
+ for (; it != ite; ++it) {
+ func(*it);
+ }
+}
+
+template <typename KeyT, typename DataT, typename AggrT, uint32_t NumSlots>
+template <typename FunctionType>
+void
+BTreeLeafNode<KeyT, DataT, AggrT, NumSlots>::foreach(FunctionType func) const {
+ const KeyT *it = _keys;
+ const KeyT *ite = it + _validSlots;
+ uint32_t idx = 0;
+ for (; it != ite; ++it) {
+ func(*it, this->getData(idx++));
+ }
+}
+
}