summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/internal/cli/cmd/cert_test.go12
-rw-r--r--client/go/internal/cli/cmd/deploy_test.go4
-rw-r--r--client/go/internal/cli/cmd/prod.go2
-rw-r--r--client/go/internal/cli/cmd/prod_test.go47
-rw-r--r--client/go/internal/cli/cmd/testdata/applications/withTarget/target/application/.gitkeep0
-rw-r--r--client/go/internal/vespa/application.go120
-rw-r--r--client/go/internal/vespa/deploy_test.go14
-rw-r--r--client/js/app/yarn.lock26
-rw-r--r--config-lib/abi-spec.json23
-rw-r--r--config-lib/src/main/java/com/yahoo/config/LeafNodeMaps.java9
-rw-r--r--config-lib/src/main/java/com/yahoo/config/LeafNodeVector.java8
-rw-r--r--config-lib/src/main/java/com/yahoo/config/OptionalPathNode.java84
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/DataplaneProxy.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java18
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudTokenDataPlaneFilterTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/utils/FileSenderTest.java10
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConfigDefinition.java31
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConfigDefinitionBuilder.java10
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConfigPayloadApplier.java20
-rw-r--r--config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java1
-rw-r--r--config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java2
-rw-r--r--config/src/test/java/com/yahoo/config/subscription/FunctionTest.java4
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionBuilderTest.java11
-rwxr-xr-xconfig/src/test/java/com/yahoo/vespa/config/ConfigDefinitionTest.java4
-rw-r--r--config/src/test/resources/configs/def-files/function-test.def2
-rw-r--r--config/src/test/resources/configs/function-test/variableaccess.txt3
-rw-r--r--configdefinitions/src/vespa/dataplane-proxy.def3
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java45
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java11
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/DefLine.java14
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java12
-rw-r--r--configgen/src/test/java/com/yahoo/config/codegen/DefLineParsingTest.java21
-rw-r--r--configgen/src/test/java/com/yahoo/config/codegen/DefParserTest.java4
-rw-r--r--configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java2
-rw-r--r--configgen/src/test/java/com/yahoo/config/codegen/NormalizedDefinitionTest.java2
-rw-r--r--configgen/src/test/resources/allfeatures.reference28
-rw-r--r--configgen/src/test/resources/configgen.allfeatures.def1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java16
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/ReindexingResponse.java46
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java24
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/threadpool/ContainerThreadpoolImpl.java2
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/HttpRequestTestCase.java2
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/HttpResponseTestCase.java2
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/HttpResponseTestCase.java30
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/DataplaneProxyService.java11
-rw-r--r--container-search/abi-spec.json3
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java4
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/EquivItem.java5
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java12
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/semantics/engine/Evaluation.java54
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/semantics/engine/RuleEvaluation.java4
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/semantics/rule/LiteralPhraseProduction.java12
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/semantics/rule/TermProduction.java4
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/semantics/test/ExpansionTestCase.java3
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/semantics/test/OrPhraseTestCase.java18
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/equiv.sr4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java16
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java47
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java18
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArtifactExpirer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java47
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java35
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java14
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json45
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json20
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json34
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-runs.json25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-job.json10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java9
-rw-r--r--dependency-versions/pom.xml8
-rw-r--r--dist/vespa.spec220
-rw-r--r--document/src/main/java/com/yahoo/document/json/JsonWriter.java2
-rw-r--r--eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp8
-rw-r--r--eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp220
-rw-r--r--eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp2
-rw-r--r--eval/src/vespa/eval/instruction/dense_join_reduce_plan.h5
-rw-r--r--eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h2
-rw-r--r--eval/src/vespa/eval/instruction/universal_dot_product.cpp191
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java11
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt6
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java4
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java7
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/CommandLine.java17
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java7
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java41
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java39
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java10
-rw-r--r--parent/pom.xml5
-rw-r--r--storage/src/tests/distributor/bucketstateoperationtest.cpp23
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test.cpp298
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.cpp30
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.h3
-rw-r--r--storage/src/tests/distributor/joinbuckettest.cpp28
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp24
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp79
-rw-r--r--storage/src/tests/distributor/removebucketoperationtest.cpp51
-rw-r--r--storage/src/tests/distributor/removelocationtest.cpp2
-rw-r--r--storage/src/tests/distributor/splitbuckettest.cpp64
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp91
-rw-r--r--storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp14
-rw-r--r--storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp6
-rw-r--r--storage/src/vespa/storage/common/messagebucket.cpp2
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp4
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h11
-rw-r--r--storage/src/vespa/storage/config/stor-distributormanager.def5
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt1
-rw-r--r--storage/src/vespa/storage/distributor/bucket_ownership_calculator.cpp41
-rw-r--r--storage/src/vespa/storage/distributor/bucket_ownership_calculator.h44
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp115
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h13
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_interface.h7
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.h2
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.cpp21
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.h16
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp11
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.h21
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h3
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp9
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp11
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.h9
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp100
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.h78
-rw-r--r--storage/src/vespa/storage/distributor/sentmessagemap.cpp29
-rw-r--r--storage/src/vespa/storage/distributor/sentmessagemap.h5
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp72
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h22
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormanager.h1
-rw-r--r--storage/src/vespa/storage/persistence/persistencehandler.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp3
-rw-r--r--storage/src/vespa/storage/persistence/simplemessagehandler.cpp12
-rw-r--r--storage/src/vespa/storage/persistence/simplemessagehandler.h1
-rw-r--r--storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp1
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protocolserialization.cpp15
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protocolserialization.h6
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp36
-rw-r--r--storage/src/vespa/storageapi/mbusprot/protocolserialization7.h6
-rw-r--r--storage/src/vespa/storageapi/mbusprot/storagemessage.h2
-rw-r--r--storage/src/vespa/storageapi/message/persistence.cpp45
-rw-r--r--storage/src/vespa/storageapi/message/persistence.h33
-rw-r--r--storage/src/vespa/storageapi/messageapi/messagehandler.h4
-rw-r--r--storage/src/vespa/storageapi/messageapi/storagemessage.cpp2
-rw-r--r--storage/src/vespa/storageapi/messageapi/storagemessage.h6
-rw-r--r--valgrind-suppressions.txt9
-rw-r--r--vespa-athenz/pom.xml46
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java180
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt16
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java75
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java22
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java14
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyStats.java19
186 files changed, 2538 insertions, 1613 deletions
diff --git a/client/go/internal/cli/cmd/cert_test.go b/client/go/internal/cli/cmd/cert_test.go
index 0a20ab8eb1a..491857f2bf2 100644
--- a/client/go/internal/cli/cmd/cert_test.go
+++ b/client/go/internal/cli/cmd/cert_test.go
@@ -50,12 +50,6 @@ func testCert(t *testing.T, subcommand []string) {
}
func TestCertCompressedPackage(t *testing.T) {
- t.Run("auth cert", func(t *testing.T) {
- testCertCompressedPackage(t, []string{"auth", "cert"})
- })
-}
-
-func testCertCompressedPackage(t *testing.T, subcommand []string) {
_, pkgDir := mock.ApplicationPackageDir(t, true, false)
zipFile := filepath.Join(pkgDir, "target", "application.zip")
err := os.MkdirAll(filepath.Dir(zipFile), 0755)
@@ -68,16 +62,14 @@ func testCertCompressedPackage(t *testing.T, subcommand []string) {
stdout.Reset()
stderr.Reset()
- args := append(subcommand, pkgDir)
- err = cli.Run(args...)
+ err = cli.Run("auth", "cert", zipFile)
assert.NotNil(t, err)
assert.Contains(t, stderr.String(), "Error: cannot add certificate to compressed application package")
err = os.Remove(zipFile)
assert.Nil(t, err)
- args = append(subcommand, "-f", pkgDir)
- err = cli.Run(args...)
+ err = cli.Run("auth", "cert", "-f", pkgDir)
assert.Nil(t, err)
assert.Contains(t, stdout.String(), "Success: Certificate written to")
assert.Contains(t, stdout.String(), "Success: Private key written to")
diff --git a/client/go/internal/cli/cmd/deploy_test.go b/client/go/internal/cli/cmd/deploy_test.go
index d578b2a4629..d2aa52bc08f 100644
--- a/client/go/internal/cli/cmd/deploy_test.go
+++ b/client/go/internal/cli/cmd/deploy_test.go
@@ -133,7 +133,7 @@ func TestDeployApplicationDirectoryWithSource(t *testing.T) {
}
func TestDeployApplicationDirectoryWithPomAndTarget(t *testing.T) {
- assertDeploy("testdata/applications/withTarget/target/application.zip",
+ assertDeploy("testdata/applications/withTarget/target/application",
[]string{"deploy", "--wait=0", "testdata/applications/withTarget"}, t)
}
@@ -141,7 +141,7 @@ func TestDeployApplicationDirectoryWithPomAndEmptyTarget(t *testing.T) {
cli, _, stderr := newTestCLI(t)
assert.NotNil(t, cli.Run("deploy", "--wait=0", "testdata/applications/withEmptyTarget"))
assert.Equal(t,
- "Error: found pom.xml, but target/application.zip does not exist: run 'mvn package' first\n",
+ "Error: found pom.xml, but testdata/applications/withEmptyTarget/target/application does not exist: run 'mvn package' first\n",
stderr.String())
}
diff --git a/client/go/internal/cli/cmd/prod.go b/client/go/internal/cli/cmd/prod.go
index 1a2f88311b6..5337a346654 100644
--- a/client/go/internal/cli/cmd/prod.go
+++ b/client/go/internal/cli/cmd/prod.go
@@ -147,7 +147,7 @@ $ vespa prod deploy`,
if err != nil {
return err
}
- if !pkg.HasDeployment() {
+ if !pkg.HasDeploymentSpec() {
return errHint(fmt.Errorf("no deployment.xml found"), "Try creating one with vespa prod init")
}
if err := verifyTests(cli, pkg); err != nil {
diff --git a/client/go/internal/cli/cmd/prod_test.go b/client/go/internal/cli/cmd/prod_test.go
index 944f09b3d42..4cca54a76c8 100644
--- a/client/go/internal/cli/cmd/prod_test.go
+++ b/client/go/internal/cli/cmd/prod_test.go
@@ -50,12 +50,12 @@ func TestProdInit(t *testing.T) {
assert.Nil(t, cli.Run("prod", "init", pkgDir))
// Verify contents
- deploymentPath := filepath.Join(pkgDir, "src", "main", "application", "deployment.xml")
+ deploymentPath := filepath.Join(pkgDir, "deployment.xml")
deploymentXML := readFileString(t, deploymentPath)
assert.Contains(t, deploymentXML, `<region>aws-us-west-2a</region>`)
assert.Contains(t, deploymentXML, `<region>aws-eu-west-1a</region>`)
- servicesPath := filepath.Join(pkgDir, "src", "main", "application", "services.xml")
+ servicesPath := filepath.Join(pkgDir, "services.xml")
servicesXML := readFileString(t, servicesPath)
containerFragment := `<container id="qrs" version="1.0">
<document-api></document-api>
@@ -80,6 +80,7 @@ func TestProdInit(t *testing.T) {
}
func readFileString(t *testing.T, filename string) string {
+ t.Helper()
content, err := os.ReadFile(filename)
if err != nil {
t.Fatal(err)
@@ -88,12 +89,15 @@ func readFileString(t *testing.T, filename string) string {
}
func createApplication(t *testing.T, pkgDir string, java bool, skipTests bool) {
- appDir := filepath.Join(pkgDir, "src", "main", "application")
- targetDir := filepath.Join(pkgDir, "target")
+ appDir := pkgDir
+ testsDir := pkgDir
+ if java {
+ appDir = filepath.Join(pkgDir, "target", "application")
+ testsDir = filepath.Join(pkgDir, "target", "application-test")
+ }
if err := os.MkdirAll(appDir, 0755); err != nil {
t.Fatal(err)
}
-
deploymentXML := `<deployment version="1.0">
<prod>
<region>aws-us-east-1c</region>
@@ -102,7 +106,6 @@ func createApplication(t *testing.T, pkgDir string, java bool, skipTests bool) {
if err := os.WriteFile(filepath.Join(appDir, "deployment.xml"), []byte(deploymentXML), 0644); err != nil {
t.Fatal(err)
}
-
servicesXML := `<services version="1.0" xmlns:deploy="vespa" xmlns:preprocess="properties">
<container id="qrs" version="1.0">
<document-api/>
@@ -123,19 +126,16 @@ func createApplication(t *testing.T, pkgDir string, java bool, skipTests bool) {
if err := os.WriteFile(filepath.Join(appDir, "services.xml"), []byte(servicesXML), 0644); err != nil {
t.Fatal(err)
}
- if err := os.MkdirAll(targetDir, 0755); err != nil {
- t.Fatal(err)
- }
if java {
- if skipTests {
- t.Fatalf("skipTests=%t has no effect when java=%t", skipTests, java)
- }
if err := os.WriteFile(filepath.Join(pkgDir, "pom.xml"), []byte(""), 0644); err != nil {
t.Fatal(err)
}
- } else if !skipTests {
- testsDir := filepath.Join(pkgDir, "src", "test", "application", "tests")
- testBytes, _ := io.ReadAll(strings.NewReader("{\"steps\":[{}]}"))
+ }
+ if !skipTests {
+ if err := os.MkdirAll(testsDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+ testBytes := []byte("{\"steps\":[{}]}")
writeTest(filepath.Join(testsDir, "system-test", "test.json"), testBytes, t)
writeTest(filepath.Join(testsDir, "staging-setup", "test.json"), testBytes, t)
writeTest(filepath.Join(testsDir, "staging-test", "test.json"), testBytes, t)
@@ -203,23 +203,17 @@ func TestProdDeployWithJava(t *testing.T) {
httpClient := &mock.HTTPClient{}
httpClient.NextResponseString(200, `ok`)
- cli, stdout, _ := newTestCLI(t, "CI=true")
+ cli, stdout, stderr := newTestCLI(t, "CI=true")
cli.httpClient = httpClient
assert.Nil(t, cli.Run("config", "set", "application", "t1.a1.i1"))
assert.Nil(t, cli.Run("config", "set", "target", "cloud"))
assert.Nil(t, cli.Run("auth", "api-key"))
assert.Nil(t, cli.Run("auth", "cert", "--no-add"))
- // Copy an application package pre-assembled with mvn package
- testAppDir := filepath.Join("testdata", "applications", "withDeployment", "target")
- zipFile := filepath.Join(testAppDir, "application.zip")
- copyFile(t, filepath.Join(pkgDir, "target", "application.zip"), zipFile)
- testZipFile := filepath.Join(testAppDir, "application-test.zip")
- copyFile(t, filepath.Join(pkgDir, "target", "application-test.zip"), testZipFile)
-
stdout.Reset()
cli.Environment["VESPA_CLI_API_KEY_FILE"] = filepath.Join(cli.config.homeDir, "t1.api-key.pem")
- assert.Nil(t, cli.Run("prod", "deploy", pkgDir))
+ assert.Nil(t, cli.Run("prod", "deploy", "--add-cert", pkgDir))
+ assert.Equal(t, "", stderr.String())
assert.Contains(t, stdout.String(), "Success: Deployed")
assert.Contains(t, stdout.String(), "See https://console.vespa-cloud.com/tenant/t1/application/a1/prod/deployment for deployment progress")
}
@@ -240,11 +234,8 @@ func TestProdDeployInvalidZip(t *testing.T) {
// Copy an invalid application package containing relative file names
testAppDir := filepath.Join("testdata", "applications", "withInvalidEntries", "target")
zipFile := filepath.Join(testAppDir, "application.zip")
- copyFile(t, filepath.Join(pkgDir, "target", "application.zip"), zipFile)
- testZipFile := filepath.Join(testAppDir, "application-test.zip")
- copyFile(t, filepath.Join(pkgDir, "target", "application-test.zip"), testZipFile)
- assert.NotNil(t, cli.Run("prod", "deploy", pkgDir))
+ assert.NotNil(t, cli.Run("prod", "deploy", zipFile))
assert.Equal(t, "Error: found invalid path inside zip: ../../../../../../../tmp/foo\n", stderr.String())
}
diff --git a/client/go/internal/cli/cmd/testdata/applications/withTarget/target/application/.gitkeep b/client/go/internal/cli/cmd/testdata/applications/withTarget/target/application/.gitkeep
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/client/go/internal/cli/cmd/testdata/applications/withTarget/target/application/.gitkeep
diff --git a/client/go/internal/vespa/application.go b/client/go/internal/vespa/application.go
index b6b5b9427b3..dd1b580517b 100644
--- a/client/go/internal/vespa/application.go
+++ b/client/go/internal/vespa/application.go
@@ -17,49 +17,57 @@ type ApplicationPackage struct {
TestPath string
}
-func (ap *ApplicationPackage) HasCertificate() bool {
- return ap.hasFile(filepath.Join("security", "clients.pem"), "security/clients.pem")
-}
+func (ap *ApplicationPackage) HasCertificate() bool { return ap.hasFile("security", "clients.pem") }
-func (ap *ApplicationPackage) HasDeployment() bool { return ap.hasFile("deployment.xml", "") }
+func (ap *ApplicationPackage) HasDeploymentSpec() bool { return ap.hasFile("deployment.xml", "") }
-func (ap *ApplicationPackage) hasFile(filename, zipName string) bool {
- if zipName == "" {
- zipName = filename
+func (ap *ApplicationPackage) hasFile(pathSegment ...string) bool {
+ if !ap.IsZip() {
+ return util.PathExists(filepath.Join(append([]string{ap.Path}, pathSegment...)...))
}
- if ap.IsZip() {
- r, err := zip.OpenReader(ap.Path)
- if err != nil {
- return false
- }
- defer r.Close()
- for _, f := range r.File {
- if f.Name == zipName {
- return true
- }
- }
+ zipName := filepath.Join(pathSegment...)
+ return ap.hasZipEntry(func(name string) bool { return zipName == name })
+}
+
+func (ap *ApplicationPackage) hasZipEntry(matcher func(zipName string) bool) bool {
+ r, err := zip.OpenReader(ap.Path)
+ if err != nil {
return false
}
- return util.PathExists(filepath.Join(ap.Path, filename))
+ defer r.Close()
+ for _, f := range r.File {
+ if matcher(f.Name) {
+ return true
+ }
+ }
+ return false
}
func (ap *ApplicationPackage) IsZip() bool { return isZip(ap.Path) }
func (ap *ApplicationPackage) IsJava() bool {
if ap.IsZip() {
- r, err := zip.OpenReader(ap.Path)
- if err != nil {
- return false
- }
- defer r.Close()
- for _, f := range r.File {
- if filepath.Ext(f.Name) == ".jar" {
- return true
- }
+ return ap.hasZipEntry(func(name string) bool { return filepath.Ext(name) == ".jar" })
+ }
+ return util.PathExists(filepath.Join(ap.Path, "pom.xml"))
+}
+
+func (ap *ApplicationPackage) Validate() error {
+ if !ap.IsZip() {
+ return nil
+ }
+ invalidPath := ""
+ invalid := ap.hasZipEntry(func(name string) bool {
+ if !validPath(name) {
+ invalidPath = name
+ return true
}
return false
+ })
+ if invalid {
+ return fmt.Errorf("found invalid path inside zip: %s", invalidPath)
}
- return util.PathExists(filepath.Join(ap.Path, "pom.xml"))
+ return nil
}
func isZip(filename string) bool { return filepath.Ext(filename) == ".zip" }
@@ -166,9 +174,6 @@ func (ap *ApplicationPackage) Unzip(test bool) (string, error) {
}
defer f.Close()
for _, f := range f.File {
- if !validPath(f.Name) {
- return "", fmt.Errorf("found invalid path inside zip: %s", f.Name)
- }
dst := filepath.Join(tmp, f.Name)
if f.FileInfo().IsDir() {
if err := os.Mkdir(dst, f.FileInfo().Mode()); err != nil {
@@ -220,36 +225,42 @@ func copyFile(src *zip.File, dst string) error {
// Package to use is preferred in this order:
// 1. Given path, if it's a zip
// 2. target/application
-// 3. target/application.zip
-// 4. src/main/application
-// 5. Given path, if it contains services.xml
+// 3. src/main/application
+// 4. Given path, if it contains services.xml
func FindApplicationPackage(zipOrDir string, requirePackaging bool) (ApplicationPackage, error) {
+ pkg, err := findApplicationPackage(zipOrDir, requirePackaging)
+ if err != nil {
+ return ApplicationPackage{}, err
+ }
+ if err := pkg.Validate(); err != nil {
+ return ApplicationPackage{}, err
+ }
+ return pkg, nil
+}
+
+func findApplicationPackage(zipOrDir string, requirePackaging bool) (ApplicationPackage, error) {
if isZip(zipOrDir) {
return ApplicationPackage{Path: zipOrDir}, nil
}
- // Prefer uncompressed application because this allows us to add security/clients.pem to the package on-demand
- if path := filepath.Join(zipOrDir, "target", "application"); util.PathExists(path) {
- return ApplicationPackage{Path: path}, nil
- }
- appZip := filepath.Join(zipOrDir, "target", "application.zip")
- if util.PathExists(filepath.Join(zipOrDir, "pom.xml")) || util.PathExists(appZip) {
- if util.PathExists(appZip) {
- if testZip := filepath.Join(zipOrDir, "target", "application-test.zip"); util.PathExists(testZip) {
- return ApplicationPackage{Path: appZip, TestPath: testZip}, nil
- }
- return ApplicationPackage{Path: appZip}, nil
+ // Pre-packaged application. We prefer the uncompressed application because this allows us to add
+ // security/clients.pem to the package on-demand
+ hasPOM := util.PathExists(filepath.Join(zipOrDir, "pom.xml"))
+ if hasPOM {
+ path := filepath.Join(zipOrDir, "target", "application")
+ if util.PathExists(path) {
+ testPath := existingPath(filepath.Join(zipOrDir, "target", "application-test"))
+ return ApplicationPackage{Path: path, TestPath: testPath}, nil
}
if requirePackaging {
- return ApplicationPackage{}, errors.New("found pom.xml, but target/application.zip does not exist: run 'mvn package' first")
+ return ApplicationPackage{}, fmt.Errorf("found pom.xml, but %s does not exist: run 'mvn package' first", path)
}
}
+ // Application with Maven directory structure, but with no POM or no hard requirement on packaging
if path := filepath.Join(zipOrDir, "src", "main", "application"); util.PathExists(path) {
- testPath := ""
- if d := filepath.Join(zipOrDir, "src", "test", "application"); util.PathExists(d) {
- testPath = d
- }
+ testPath := existingPath(filepath.Join(zipOrDir, "src", "test", "application"))
return ApplicationPackage{Path: path, TestPath: testPath}, nil
}
+ // Application without Java components
if util.PathExists(filepath.Join(zipOrDir, "services.xml")) {
testPath := ""
if util.PathExists(filepath.Join(zipOrDir, "tests")) {
@@ -259,3 +270,10 @@ func FindApplicationPackage(zipOrDir string, requirePackaging bool) (Application
}
return ApplicationPackage{}, fmt.Errorf("could not find an application package source in '%s'", zipOrDir)
}
+
+func existingPath(path string) string {
+ if util.PathExists(path) {
+ return path
+ }
+ return ""
+}
diff --git a/client/go/internal/vespa/deploy_test.go b/client/go/internal/vespa/deploy_test.go
index 693d4527624..ff278578e8a 100644
--- a/client/go/internal/vespa/deploy_test.go
+++ b/client/go/internal/vespa/deploy_test.go
@@ -151,18 +151,14 @@ func TestFindApplicationPackage(t *testing.T) {
fail: true,
})
assertFindApplicationPackage(t, dir, pkgFixture{
- expectedPath: filepath.Join(dir, "target", "application.zip"),
- existingFiles: []string{filepath.Join(dir, "pom.xml"), filepath.Join(dir, "target", "application.zip")},
- requirePackaging: true,
- })
- assertFindApplicationPackage(t, dir, pkgFixture{
- expectedPath: filepath.Join(dir, "target", "application.zip"),
- existingFiles: []string{filepath.Join(dir, "target", "application.zip")},
- })
- assertFindApplicationPackage(t, dir, pkgFixture{
expectedPath: filepath.Join(dir, "target", "application"),
existingFiles: []string{filepath.Join(dir, "target", "application"), filepath.Join(dir, "target", "application.zip")},
})
+ assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: filepath.Join(dir, "target", "application"),
+ expectedTestPath: filepath.Join(dir, "target", "application-test"),
+ existingFiles: []string{filepath.Join(dir, "target", "application"), filepath.Join(dir, "target", "application-test")},
+ })
zip := filepath.Join(dir, "myapp.zip")
assertFindApplicationPackage(t, zip, pkgFixture{
expectedPath: zip,
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index 706ecbcc97e..e5146674b8a 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -593,10 +593,10 @@
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
-"@eslint/js@8.48.0":
- version "8.48.0"
- resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.48.0.tgz#642633964e217905436033a2bd08bf322849b7fb"
- integrity sha512-ZSjtmelB7IJfWD2Fvb7+Z+ChTIKWq6kjda95fLcQKNS5aheVHn4IkfgRQE3sIIzTcSLwLcLZUD9UBt+V7+h+Pw==
+"@eslint/js@8.49.0":
+ version "8.49.0"
+ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.49.0.tgz#86f79756004a97fa4df866835093f1df3d03c333"
+ integrity sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==
"@floating-ui/core@^1.4.1":
version "1.4.1"
@@ -667,10 +667,10 @@
dependencies:
prop-types "^15.8.1"
-"@humanwhocodes/config-array@^0.11.10":
- version "0.11.10"
- resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.10.tgz#5a3ffe32cc9306365fb3fd572596cd602d5e12d2"
- integrity sha512-KVVjQmNUepDVGXNuoRRdmmEjruj0KfiGSbS8LVc12LMsWDQzRXJ0qdhN8L8uUigKpfEHRhlaQFY0ib1tnUbNeQ==
+"@humanwhocodes/config-array@^0.11.11":
+ version "0.11.11"
+ resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.11.tgz#88a04c570dbbc7dd943e4712429c3df09bc32844"
+ integrity sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==
dependencies:
"@humanwhocodes/object-schema" "^1.2.1"
debug "^4.1.1"
@@ -2302,15 +2302,15 @@ eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4
integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==
eslint@^8:
- version "8.48.0"
- resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.48.0.tgz#bf9998ba520063907ba7bfe4c480dc8be03c2155"
- integrity sha512-sb6DLeIuRXxeM1YljSe1KEx9/YYeZFQWcV8Rq9HfigmdDEugjLEVEa1ozDjL6YDjBpQHPJxJzze+alxi4T3OLg==
+ version "8.49.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.49.0.tgz#09d80a89bdb4edee2efcf6964623af1054bf6d42"
+ integrity sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
"@eslint-community/regexpp" "^4.6.1"
"@eslint/eslintrc" "^2.1.2"
- "@eslint/js" "8.48.0"
- "@humanwhocodes/config-array" "^0.11.10"
+ "@eslint/js" "8.49.0"
+ "@humanwhocodes/config-array" "^0.11.11"
"@humanwhocodes/module-importer" "^1.0.1"
"@nodelib/fs.walk" "^1.2.8"
ajv "^6.12.4"
diff --git a/config-lib/abi-spec.json b/config-lib/abi-spec.json
index caae2ebf1cf..07e61c63237 100644
--- a/config-lib/abi-spec.json
+++ b/config-lib/abi-spec.json
@@ -273,6 +273,7 @@
"public static java.util.Map asNodeMap(java.util.Map, com.yahoo.config.LeafNode)",
"public static java.util.Map asFileNodeMap(java.util.Map)",
"public static java.util.Map asPathNodeMap(java.util.Map)",
+ "public static java.util.Map asOptionalPathNodeMap(java.util.Map)",
"public static java.util.Map asUrlNodeMap(java.util.Map)",
"public static java.util.Map asModelNodeMap(java.util.Map)"
],
@@ -289,6 +290,7 @@
"public java.util.List asList()",
"public static com.yahoo.config.LeafNodeVector createFileNodeVector(java.util.Collection)",
"public static com.yahoo.config.LeafNodeVector createPathNodeVector(java.util.Collection)",
+ "public static com.yahoo.config.LeafNodeVector createOptionalPathNodeVector(java.util.Collection)",
"public static com.yahoo.config.LeafNodeVector createUrlNodeVector(java.util.Collection)",
"public static com.yahoo.config.LeafNodeVector createModelNodeVector(java.util.Collection)"
],
@@ -420,6 +422,27 @@
"protected final java.util.ArrayList vector"
]
},
+ "com.yahoo.config.OptionalPathNode" : {
+ "superClass" : "com.yahoo.config.LeafNode",
+ "interfaces" : [ ],
+ "attributes" : [
+ "public"
+ ],
+ "methods" : [
+ "public void <init>()",
+ "public void <init>(com.yahoo.config.FileReference)",
+ "public void <init>(java.util.Optional)",
+ "public java.util.Optional value()",
+ "public java.lang.String getValue()",
+ "public java.lang.String toString()",
+ "protected boolean doSetValue(java.lang.String)",
+ "public java.util.Optional getFileReference()",
+ "public static java.util.List toFileReferences(java.util.List)",
+ "public static java.util.Map toFileReferenceMap(java.util.Map)",
+ "public bridge synthetic java.lang.Object value()"
+ ],
+ "fields" : [ ]
+ },
"com.yahoo.config.PathNode" : {
"superClass" : "com.yahoo.config.LeafNode",
"interfaces" : [ ],
diff --git a/config-lib/src/main/java/com/yahoo/config/LeafNodeMaps.java b/config-lib/src/main/java/com/yahoo/config/LeafNodeMaps.java
index 82663fa8bfd..214d8c52caa 100644
--- a/config-lib/src/main/java/com/yahoo/config/LeafNodeMaps.java
+++ b/config-lib/src/main/java/com/yahoo/config/LeafNodeMaps.java
@@ -4,6 +4,7 @@ package com.yahoo.config;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
+import java.util.Optional;
import java.util.stream.Collectors;
/**
@@ -60,6 +61,14 @@ public class LeafNodeMaps {
return Collections.unmodifiableMap(pathNodeMap);
}
+ public static Map<String, OptionalPathNode> asOptionalPathNodeMap(Map<String, Optional<FileReference>> fileReferenceMap) {
+ Map<String, OptionalPathNode> pathNodeMap = new LinkedHashMap<>();
+ for (Map.Entry<String, Optional<FileReference>> e : fileReferenceMap.entrySet()) {
+ pathNodeMap.put(e.getKey(), new OptionalPathNode(e.getValue()));
+ }
+ return Collections.unmodifiableMap(pathNodeMap);
+ }
+
public static Map<String, UrlNode> asUrlNodeMap(Map<String, UrlReference> urlReferenceMap) {
return Collections.unmodifiableMap(
urlReferenceMap.entrySet().stream().collect(
diff --git a/config-lib/src/main/java/com/yahoo/config/LeafNodeVector.java b/config-lib/src/main/java/com/yahoo/config/LeafNodeVector.java
index a4fea95088d..cfb8cd4eebd 100644
--- a/config-lib/src/main/java/com/yahoo/config/LeafNodeVector.java
+++ b/config-lib/src/main/java/com/yahoo/config/LeafNodeVector.java
@@ -8,6 +8,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
/**
* A vector of leaf nodes.
@@ -71,6 +72,13 @@ public class LeafNodeVector<REAL, NODE extends LeafNode<REAL>> extends NodeVecto
return new LeafNodeVector<>(paths, new PathNode());
}
+ public static LeafNodeVector<Optional<Path>, OptionalPathNode> createOptionalPathNodeVector(Collection<Optional<FileReference>> values) {
+ List<Optional<Path>> paths = new ArrayList<>();
+ for (Optional<FileReference> fileReference : values)
+ paths.add(fileReference.map(reference -> Paths.get(reference.value())));
+ return new LeafNodeVector<>(paths, new OptionalPathNode());
+ }
+
public static LeafNodeVector<File, UrlNode> createUrlNodeVector(Collection<UrlReference> values) {
List<File> files = new ArrayList<>();
for (UrlReference urlReference : values)
diff --git a/config-lib/src/main/java/com/yahoo/config/OptionalPathNode.java b/config-lib/src/main/java/com/yahoo/config/OptionalPathNode.java
new file mode 100644
index 00000000000..8a6414d798f
--- /dev/null
+++ b/config-lib/src/main/java/com/yahoo/config/OptionalPathNode.java
@@ -0,0 +1,84 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * Represents a 'path' in a {@link ConfigInstance}, usually a filename, can be optional
+ *
+ * @author hmusum
+ */
+public class OptionalPathNode extends LeafNode<Optional<Path>> {
+
+ private final Optional<FileReference> fileReference;
+
+ public OptionalPathNode() {
+ fileReference = Optional.empty();
+ }
+
+ public OptionalPathNode(FileReference fileReference) {
+ super(true);
+ this.value = Optional.of(Path.of(fileReference.value()));
+ this.fileReference = Optional.of(fileReference);
+ }
+
+ public OptionalPathNode(Optional<FileReference> fileReference) {
+ super(true);
+ this.value = fileReference.map(reference -> Path.of(reference.value()));
+ this.fileReference = fileReference;
+ }
+
+ public Optional<Path> value() {
+ return value;
+ }
+
+ @Override
+ public String getValue() {
+ return value.toString();
+ }
+
+ @Override
+ public String toString() {
+ return (value.isEmpty()) ? "(empty)" : '"' + value.get().toString() + '"';
+ }
+
+ @Override
+ protected boolean doSetValue(String stringVal) {
+ throw new UnsupportedOperationException("doSetValue should not be necessary anymore!");
+ }
+
+ @Override
+ void serialize(String name, Serializer serializer) {
+ value.ifPresent(path -> serializer.serialize(name, path.toString()));
+ }
+
+ @Override
+ void serialize(Serializer serializer) {
+ value.ifPresent(path -> serializer.serialize(path.toString()));
+ }
+
+ public Optional<FileReference> getFileReference() {
+ return fileReference;
+ }
+
+ public static List<Optional<FileReference>> toFileReferences(List<OptionalPathNode> pathNodes) {
+ List<Optional<FileReference>> fileReferences = new ArrayList<>();
+ for (OptionalPathNode pathNode : pathNodes)
+ fileReferences.add(pathNode.getFileReference());
+ return fileReferences;
+ }
+
+ public static Map<String, Optional<FileReference>> toFileReferenceMap(Map<String, OptionalPathNode> map) {
+ Map<String, Optional<FileReference>> ret = new LinkedHashMap<>();
+ for (Map.Entry<String, OptionalPathNode> e : map.entrySet()) {
+ ret.put(e.getKey(), e.getValue().getFileReference());
+ }
+ return ret;
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/DataplaneProxy.java b/config-model/src/main/java/com/yahoo/vespa/model/container/DataplaneProxy.java
index 13aa65909bd..3361793ec1a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/DataplaneProxy.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/DataplaneProxy.java
@@ -5,19 +5,23 @@ import com.yahoo.cloud.config.DataplaneProxyConfig;
import com.yahoo.container.jdisc.DataplaneProxyConfigurator;
import com.yahoo.vespa.model.container.component.SimpleComponent;
+import java.util.Set;
+
public class DataplaneProxy extends SimpleComponent implements DataplaneProxyConfig.Producer {
private final int mtlsPort;
private final int tokenPort;
private final String serverCertificate;
private final String serverKey;
+ private final Set<String> tokenEndpoints;
- public DataplaneProxy(int mtlsPort, int tokenPort, String serverCertificate, String serverKey) {
+ public DataplaneProxy(int mtlsPort, int tokenPort, String serverCertificate, String serverKey, Set<String> tokenEndpoints) {
super(DataplaneProxyConfigurator.class.getName());
this.mtlsPort = mtlsPort;
this.tokenPort = tokenPort;
this.serverCertificate = serverCertificate;
this.serverKey = serverKey;
+ this.tokenEndpoints = tokenEndpoints;
}
@Override
@@ -26,6 +30,7 @@ public class DataplaneProxy extends SimpleComponent implements DataplaneProxyCon
builder.tokenPort(tokenPort);
builder.serverCertificate(serverCertificate);
builder.serverKey(serverKey);
+ builder.tokenEndpoints(tokenEndpoints);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 459c54a2805..2baf8f053c9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -627,9 +627,16 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private void addCloudTokenSupport(DeployState state, ApplicationContainerCluster cluster) {
var server = cluster.getHttp().getHttpServer().get();
+ Set<String> tokenEndpoints = state.getEndpoints().stream()
+ .filter(endpoint -> endpoint.authMethod() == ApplicationClusterEndpoint.AuthMethod.token)
+ .map(ContainerEndpoint::names)
+ .flatMap(Collection::stream)
+ .collect(Collectors.toSet());
+
boolean enableTokenSupport = state.isHosted() && state.zone().system().isPublic()
&& state.featureFlags().enableDataplaneProxy()
- && cluster.getClients().stream().anyMatch(c -> !c.tokens().isEmpty());
+ && cluster.getClients().stream().anyMatch(c -> !c.tokens().isEmpty())
+ && ! tokenEndpoints.isEmpty();
if (!enableTokenSupport) return;
var endpointCert = state.endpointCertificateSecrets().orElseThrow();
int tokenPort = getTokenDataplanePort(state).orElseThrow();
@@ -641,7 +648,8 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
getMtlsDataplanePort(state),
tokenPort,
endpointCert.certificate(),
- endpointCert.key());
+ endpointCert.key(),
+ tokenEndpoints);
cluster.addComponent(dataplaneProxy);
// Setup dedicated connector
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 0e24c676b8f..2aa403791fc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -6,7 +6,6 @@ import com.yahoo.config.ModelReference;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.model.producer.AnyConfigProducer;
-import com.yahoo.config.model.producer.TreeConfigProducer;
import com.yahoo.config.model.producer.UserConfigRepo;
import com.yahoo.path.Path;
import com.yahoo.vespa.config.ConfigDefinition;
@@ -67,18 +66,19 @@ public class FileSender implements Serializable {
// Inspect fields at this level
sendEntries(builder, sentFiles, configDefinition.getFileDefs(), false);
sendEntries(builder, sentFiles, configDefinition.getPathDefs(), false);
+ sendEntries(builder, sentFiles, configDefinition.getOptionalPathDefs(), false);
sendEntries(builder, sentFiles, configDefinition.getModelDefs(), true);
// Inspect arrays
for (Map.Entry<String, ConfigDefinition.ArrayDef> entry : configDefinition.getArrayDefs().entrySet()) {
- if ( ! isAnyFileType(entry.getValue().getTypeSpec().getType())) continue;
+ if (isNotAnyFileType(entry.getValue().getTypeSpec().getType())) continue;
ConfigPayloadBuilder.Array array = builder.getArray(entry.getKey());
sendFileEntries(array.getElements(), sentFiles, "model".equals(entry.getValue().getTypeSpec().getType()));
}
// Inspect maps
for (Map.Entry<String, ConfigDefinition.LeafMapDef> entry : configDefinition.getLeafMapDefs().entrySet()) {
- if ( ! isAnyFileType(entry.getValue().getTypeSpec().getType())) continue;
+ if (isNotAnyFileType(entry.getValue().getTypeSpec().getType())) continue;
ConfigPayloadBuilder.MapBuilder map = builder.getMap(entry.getKey());
sendFileEntries(map.getElements(), sentFiles, "model".equals(entry.getValue().getTypeSpec().getType()));
}
@@ -101,16 +101,18 @@ public class FileSender implements Serializable {
}
}
- private static boolean isAnyFileType(String type) {
- return "file".equals(type) || "path".equals(type) || "model".equals(type);
+ private static boolean isNotAnyFileType(String type) {
+ return ! "file".equals(type) && ! "path".equals(type) && ! "model".equals(type);
}
private void sendEntries(ConfigPayloadBuilder builder,
Map<Path, FileReference> sentFiles,
Map<String, ?> entries,
boolean isModelType) {
- for (String name : entries.keySet()) {
+ for (Map.Entry<String, ?> entry : entries.entrySet()) {
+ String name = entry.getKey();
ConfigPayloadBuilder fileEntry = builder.getObject(name);
+ if (isEmptyOptionalPath(entry, fileEntry)) continue;
if (fileEntry.getValue() == null)
throw new IllegalArgumentException("Unable to send file for field '" + name +
"': Invalid config value " + fileEntry.getValue());
@@ -118,6 +120,10 @@ public class FileSender implements Serializable {
}
}
+ private static boolean isEmptyOptionalPath(Map.Entry<String, ?> entry, ConfigPayloadBuilder fileEntry) {
+ return entry.getValue() instanceof ConfigDefinition.OptionalPathDef && fileEntry.getValue() == null;
+ }
+
private void sendFileEntries(Collection<ConfigPayloadBuilder> builders, Map<Path, FileReference> sentFiles, boolean isModelType) {
for (ConfigPayloadBuilder builder : builders) {
sendFileEntry(builder, sentFiles, isModelType);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudTokenDataPlaneFilterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudTokenDataPlaneFilterTest.java
index 15e1d61c951..b4e2f53bb87 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudTokenDataPlaneFilterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/CloudTokenDataPlaneFilterTest.java
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.xml;
+import com.yahoo.config.model.api.ApplicationClusterEndpoint;
+import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.EndpointCertificateSecrets;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
import com.yahoo.config.model.deploy.DeployState;
@@ -27,6 +29,8 @@ import java.time.Instant;
import java.util.Collection;
import java.util.List;
import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.Set;
import static com.yahoo.vespa.model.container.xml.CloudDataPlaneFilterTest.createCertificate;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -99,6 +103,7 @@ public class CloudTokenDataPlaneFilterTest extends ContainerModelBuilderTestBase
new DataplaneToken.Version("myfingerprint2", "myaccesshash2", Optional.of(Instant.EPOCH.plus(Duration.ofDays(100000))))))))
.setHostedVespa(true))
.zone(new Zone(SystemName.PublicCd, Environment.dev, RegionName.defaultName()))
+ .endpoints(Set.of(new ContainerEndpoint("cluster", ApplicationClusterEndpoint.Scope.zone, List.of("name"), OptionalInt.empty(), ApplicationClusterEndpoint.RoutingMethod.exclusive, ApplicationClusterEndpoint.AuthMethod.token)))
.build();
return createModel(root, state, null, clusterElem);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/utils/FileSenderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/utils/FileSenderTest.java
index c122aba9cf1..6dc9207a8cc 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/utils/FileSenderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/utils/FileSenderTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.model.utils;
import com.yahoo.config.FileNode;
import com.yahoo.config.FileReference;
import com.yahoo.config.ModelReference;
+import com.yahoo.config.OptionalPathNode;
import com.yahoo.config.UrlReference;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
@@ -39,7 +40,6 @@ public class FileSenderTest {
private List<AbstractService> serviceList;
private final MyFileRegistry fileRegistry = new MyFileRegistry();
private ConfigDefinition def;
- private TestService service;
private static class MyFileRegistry implements FileRegistry {
public Map<String, FileReference> pathToRef = new HashMap<>();
@@ -72,7 +72,7 @@ public class FileSenderTest {
public void setup() {
MockRoot root = new MockRoot();
producer = new SimpleConfigProducer<>(root, "test");
- service = new TestService(root, "service");
+ TestService service = new TestService(root, "service");
serviceList = new ArrayList<>();
serviceList.add(service);
ConfigDefinitionKey key = new ConfigDefinitionKey("myname", "mynamespace");
@@ -248,6 +248,12 @@ public class FileSenderTest {
});
}
+ @Test
+ void require_that_empty_optional_paths_are_not_sent() {
+ def.addOptionalPathDef("optionalPathVal");
+ fileSender().sendUserConfiguredFiles(producer);
+ }
+
private static class TestService extends AbstractService {
public TestService(TreeConfigProducer<?> parent, String name) {
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigDefinition.java b/config/src/main/java/com/yahoo/vespa/config/ConfigDefinition.java
index 9d9e43de130..ba1040dbf36 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigDefinition.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigDefinition.java
@@ -38,6 +38,7 @@ public class ConfigDefinition {
private final Map<String, RefDef> referenceDefs = new LinkedHashMap<>();
private final Map<String, FileDef> fileDefs = new LinkedHashMap<>();
private final Map<String, PathDef> pathDefs = new LinkedHashMap<>();
+ private final Map<String, OptionalPathDef> optionalPathDefs = new LinkedHashMap<>();
private final Map<String, UrlDef> urlDefs = new LinkedHashMap<>();
private final Map<String, ModelDef> modelDefs = new LinkedHashMap<>();
private final Map<String, StructDef> structDefs = new LinkedHashMap<>();
@@ -98,6 +99,8 @@ public class ConfigDefinition {
verifyFile(id);
} else if (pathDefs.containsKey(id)) {
verifyPath(id);
+ } else if (optionalPathDefs.containsKey(id)) {
+ verifyOptionalPath(id);
} else if (urlDefs.containsKey(id)) {
verifyUrl(id);
} else if (modelDefs.containsKey(id)) {
@@ -540,6 +543,19 @@ public class ConfigDefinition {
}
}
+ public static class OptionalPathDef implements DefaultValued<String> {
+ private final String defVal;
+
+ OptionalPathDef(String defVal) {
+ this.defVal = defVal;
+ }
+
+ @Override
+ public String getDefVal() {
+ return defVal;
+ }
+ }
+
public static class UrlDef implements DefaultValued<String> {
private final String defVal;
@@ -659,6 +675,14 @@ public class ConfigDefinition {
pathDefs.put(refId, new PathDef(null));
}
+ public void addOptionalPathDef(String refId, String defVal) {
+ optionalPathDefs.put(refId, new OptionalPathDef(defVal));
+ }
+
+ public void addOptionalPathDef(String refId) {
+ optionalPathDefs.put(refId, new OptionalPathDef(null));
+ }
+
public void addUrlDef(String url, String defVal) {
urlDefs.put(url, new UrlDef(defVal));
}
@@ -701,6 +725,8 @@ public class ConfigDefinition {
public Map<String, PathDef> getPathDefs() { return pathDefs; }
+ public Map<String, OptionalPathDef> getOptionalPathDefs() { return optionalPathDefs; }
+
public Map<String, UrlDef> getUrlDefs() { return urlDefs; }
public Map<String, ModelDef> getModelDefs() { return modelDefs; }
@@ -868,6 +894,11 @@ public class ConfigDefinition {
throw new IllegalArgumentException("No such path in " + verifyWarning(id));
}
+ private void verifyOptionalPath(String id) {
+ if ( ! optionalPathDefs.containsKey(id))
+ throw new IllegalArgumentException("No such optional path in " + verifyWarning(id));
+ }
+
private void verifyUrl(String id) {
if ( ! urlDefs.containsKey(id))
throw new IllegalArgumentException("No such url in " + verifyWarning(id));
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigDefinitionBuilder.java b/config/src/main/java/com/yahoo/vespa/config/ConfigDefinitionBuilder.java
index da5048a99e8..9cf9fb41839 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigDefinitionBuilder.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigDefinitionBuilder.java
@@ -67,6 +67,8 @@ public class ConfigDefinitionBuilder {
addNode(def, (LeafCNode.FileLeaf) node);
} else if (node instanceof LeafCNode.PathLeaf) {
addNode(def, (LeafCNode.PathLeaf) node);
+ } else if (node instanceof LeafCNode.OptionalPathLeaf) {
+ addNode(def, (LeafCNode.OptionalPathLeaf) node);
} else if (node instanceof LeafCNode.UrlLeaf) {
addNode(def, (LeafCNode.UrlLeaf) node);
} else if (node instanceof LeafCNode.ModelLeaf) {
@@ -172,6 +174,14 @@ public class ConfigDefinitionBuilder {
}
}
+ private static void addNode(ConfigDefinition def, LeafCNode.OptionalPathLeaf leaf) {
+ if (leaf.getDefaultValue() != null) {
+ def.addOptionalPathDef(leaf.getName(), leaf.getDefaultValue().getValue());
+ } else {
+ def.addOptionalPathDef(leaf.getName(), null);
+ }
+ }
+
private static void addNode(ConfigDefinition def, LeafCNode.UrlLeaf leaf) {
if (leaf.getDefaultValue() != null) {
def.addUrlDef(leaf.getName(), leaf.getDefaultValue().getValue());
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigPayloadApplier.java b/config/src/main/java/com/yahoo/vespa/config/ConfigPayloadApplier.java
index e86b13b2c98..0dbc40a246c 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigPayloadApplier.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigPayloadApplier.java
@@ -21,6 +21,7 @@ import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
import java.util.logging.Logger;
@@ -213,7 +214,10 @@ public class ConfigPayloadApplier<T extends ConfigInstance.Builder> {
Inspector value = (Inspector)rawValue;
if (isPathField(builder, methodName))
return resolvePath(value.asString());
- else if (isUrlField(builder, methodName))
+ if (isOptionalPathField(builder, methodName)) {
+ String v = value.asString();
+ return resolvePath(v.isEmpty() ? Optional.empty() : Optional.of(v));
+ } else if (isUrlField(builder, methodName))
return value.asString().isEmpty() ? "" : resolveUrl(value.asString());
else if (isModelField(builder, methodName))
return value.asString().isEmpty() ? "" : resolveModel(value.asString());
@@ -234,6 +238,10 @@ public class ConfigPayloadApplier<T extends ConfigInstance.Builder> {
return new FileReference(path.toString());
}
+ private Optional<FileReference> resolvePath(Optional<String> value) {
+ return value.isEmpty() ? Optional.empty() : Optional.of(resolvePath(value.get()));
+ }
+
private UrlReference resolveUrl(String url) {
if ( ! isClientside()) return new UrlReference(url);
File file = urlDownloader.waitFor(new UrlReference(url), 60 * 60);
@@ -319,6 +327,16 @@ public class ConfigPayloadApplier<T extends ConfigInstance.Builder> {
return isFieldType(pathFieldSet, builder, methodName, FileReference.class);
}
+ /**
+ * Checks if this field is of type 'path', in which
+ * case some special handling might be needed. Caches the result.
+ */
+ private final Set<String> optionalPathFieldSet = new HashSet<>();
+ private boolean isOptionalPathField(Object builder, String methodName) {
+ // Paths are stored as Optional<FileReference> in Builder.
+ return isFieldType(optionalPathFieldSet, builder, methodName, Optional.class);
+ }
+
private final Set<String> urlFieldSet = new HashSet<>();
private boolean isUrlField(Object builder, String methodName) {
// Urls are stored as UrlReference in Builder.
diff --git a/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java b/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java
index a6273ad5ccb..0e50be83e7a 100644
--- a/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java
+++ b/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java
@@ -34,6 +34,7 @@ public class CfgConfigPayloadBuilderTest {
" 'parent:'",
" ],",
" 'pathVal': 'src/test/resources/configs/def-files/function-test.def',",
+ " 'optionalPathVal': 'src/test/resources/configs/def-files/function-test.def',",
" 'string_val': 'foo',",
" 'myStructMap': {",
" 'one': {",
diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java
index c656bfe1a60..f09462eb634 100644
--- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java
+++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstancePayloadTest.java
@@ -13,6 +13,7 @@ import org.junit.Test;
import java.io.File;
import java.util.Arrays;
import java.util.List;
+import java.util.Optional;
import static com.yahoo.foo.FunctionTestConfig.*;
import static org.junit.Assert.assertNotNull;
@@ -46,6 +47,7 @@ public class ConfigInstancePayloadTest {
refwithdef(":parent:").
fileVal("etc").
pathVal(FileReference.mockFileReferenceForUnitTesting(new File("src/test/resources/configs/def-files/function-test.def"))).
+ optionalPathVal(Optional.of(FileReference.mockFileReferenceForUnitTesting(new File("src/test/resources/configs/def-files/function-test.def")))).
boolarr(false).
longarr(9223372036854775807L).
longarr(-9223372036854775808L).
diff --git a/config/src/test/java/com/yahoo/config/subscription/FunctionTest.java b/config/src/test/java/com/yahoo/config/subscription/FunctionTest.java
index 8656c0e945f..7a3b0e437f2 100644
--- a/config/src/test/java/com/yahoo/config/subscription/FunctionTest.java
+++ b/config/src/test/java/com/yahoo/config/subscription/FunctionTest.java
@@ -30,7 +30,7 @@ public class FunctionTest {
public static final String PATH = "src/test/resources/configs/function-test/";
private FunctionTestConfig config;
- private ConfigSourceSet sourceSet = new ConfigSourceSet("function-test");
+ private final ConfigSourceSet sourceSet = new ConfigSourceSet("function-test");
public void configure(FunctionTestConfig config, ConfigSourceSet sourceSet) {
this.config = config;
@@ -222,6 +222,8 @@ public class FunctionTest {
assertEquals(":parent", config.refarr(1));
assertEquals("parent:", config.refarr(2));
assertEquals("bin", config.fileArr(0).value());
+ assertEquals("function-test.def", config.pathVal().toFile().getName());
+ assertEquals("function-test.def", config.optionalPathVal().get().toFile().getName()); // TODO
assertEquals("pom.xml", config.pathArr(0).toString());
assertEquals("pom.xml", config.pathMap("one").toString());
diff --git a/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionBuilderTest.java b/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionBuilderTest.java
index a9f09951d7e..523fc78bac0 100644
--- a/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionBuilderTest.java
+++ b/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionBuilderTest.java
@@ -77,10 +77,19 @@ public class ConfigDefinitionBuilderTest {
assertThat(def.getFileDefs().size(), is(1));
assertNotNull(def.getFileDefs().get("fileVal"));
- assertThat(def.getArrayDefs().size(), is(9));
+ assertThat(def.getPathDefs().size(), is(1));
+ assertNotNull(def.getPathDefs().get("pathVal"));
+ assertThat(def.getOptionalPathDefs().size(), is(1));
+ assertNotNull(def.getOptionalPathDefs().get("optionalPathVal"));
+
+ // An array does not have to have any elements set
+ assertThat(def.getArrayDefs().size(), is(10));
assertNotNull(def.getArrayDefs().get("boolarr"));
assertThat(def.getArrayDefs().get("boolarr").getTypeSpec().getType(), is("bool"));
+ assertNotNull(def.getArrayDefs().get("boolarrEmpty"));
+ assertThat(def.getArrayDefs().get("boolarrEmpty").getTypeSpec().getType(), is("bool"));
+
assertNotNull(def.getArrayDefs().get("enumarr"));
assertThat(def.getArrayDefs().get("enumarr").getTypeSpec().getType(), is("enum"));
assertThat(def.getArrayDefs().get("enumarr").getTypeSpec().getEnumVals().toString(), is("[ARRAY, VALUES]"));
diff --git a/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionTest.java b/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionTest.java
index fa85f582e99..62eb0095dd3 100755
--- a/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionTest.java
+++ b/config/src/test/java/com/yahoo/vespa/config/ConfigDefinitionTest.java
@@ -124,6 +124,8 @@ public class ConfigDefinitionTest {
def.addEnumDef("enumval", new EnumDef(List.of("FOO"), "FOO"));
def.addReferenceDef("refval");
def.addFileDef("fileval");
+ def.addPathDef("pathVal");
+ def.addOptionalPathDef("optionalPathVal");
def.addInnerArrayDef("innerarr");
def.addLeafMapDef("leafmap");
ConfigDefinition.ArrayDef intArray = def.arrayDef("intArray");
@@ -162,6 +164,8 @@ public class ConfigDefinitionTest {
assertVerify(def, "enumval", "FOO");
assertVerify(def, "refval", "foobar");
assertVerify(def, "fileval", "foobar");
+ assertVerify(def, "pathVal", "foobar");
+ assertVerify(def, "optionalPathVal", "foobar");
assertVerifyComplex(def, "innerarr");
assertVerifyComplex(def, "leafmap");
diff --git a/config/src/test/resources/configs/def-files/function-test.def b/config/src/test/resources/configs/def-files/function-test.def
index 4c4cb6bf08b..b97713b18f3 100644
--- a/config/src/test/resources/configs/def-files/function-test.def
+++ b/config/src/test/resources/configs/def-files/function-test.def
@@ -42,8 +42,10 @@ refval reference
refwithdef reference default=":parent:"
fileVal file
pathVal path
+optionalPathVal path optional
boolarr[] bool
+boolarrEmpty[] bool
intarr[] int
longarr[] long
doublearr[] double
diff --git a/config/src/test/resources/configs/function-test/variableaccess.txt b/config/src/test/resources/configs/function-test/variableaccess.txt
index 997de21750d..8c2cadcdbbc 100644
--- a/config/src/test/resources/configs/function-test/variableaccess.txt
+++ b/config/src/test/resources/configs/function-test/variableaccess.txt
@@ -14,7 +14,8 @@ enumwithdef BAR2
refval :parent:
refwithdef ":parent:"
fileVal "etc"
-pathVal "pom.xml"
+pathVal "function-test.def"
+optionalPathVal "function-test.def"
boolarr[1]
boolarr[0] false
intarr[0]
diff --git a/configdefinitions/src/vespa/dataplane-proxy.def b/configdefinitions/src/vespa/dataplane-proxy.def
index dd1d734a91c..eff5ae8c3a9 100644
--- a/configdefinitions/src/vespa/dataplane-proxy.def
+++ b/configdefinitions/src/vespa/dataplane-proxy.def
@@ -8,3 +8,6 @@ mtlsPort int
# Server certificate and key to be used when creating server socket
serverCertificate string
serverKey string
+
+# Host names that should be considered token endpoints
+tokenEndpoints[] string
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java
index 6cd344466e4..12469d7a3ef 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/BuilderGenerator.java
@@ -4,10 +4,12 @@ package com.yahoo.config.codegen;
import com.yahoo.config.codegen.LeafCNode.FileLeaf;
import com.yahoo.config.codegen.LeafCNode.ModelLeaf;
import com.yahoo.config.codegen.LeafCNode.PathLeaf;
+import com.yahoo.config.codegen.LeafCNode.OptionalPathLeaf;
import com.yahoo.config.codegen.LeafCNode.UrlLeaf;
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
import java.util.stream.Collectors;
import static com.yahoo.config.codegen.ConfigGenerator.boxedDataType;
@@ -89,7 +91,9 @@ public class BuilderGenerator {
private static String getUninitializedScalars(InnerCNode node) {
List<String> scalarsWithoutDefault = new ArrayList<>();
for (CNode child : node.getChildren()) {
- if (child instanceof LeafCNode && (!child.isArray && !child.isMap && ((LeafCNode) child).getDefaultValue() == null)) {
+ if (child instanceof LeafCNode
+ && (!child.isArray && !child.isMap && ((LeafCNode) child).getDefaultValue() == null)
+ && (! (child instanceof OptionalPathLeaf))) {
scalarsWithoutDefault.add("\"" + child.getName() + "\"");
}
}
@@ -109,7 +113,11 @@ public class BuilderGenerator {
} else if (node instanceof InnerCNode) {
return String.format("public %s %s = new %s();", builderType(node), node.getName(), builderType(node));
} else if (node instanceof LeafCNode) {
- return String.format("private %s %s = null;", boxedBuilderType((LeafCNode) node), node.getName());
+ String boxedBuilderType = boxedBuilderType((LeafCNode) node);
+ if (boxedBuilderType.startsWith("Optional<"))
+ return String.format("private %s %s = Optional.empty();", boxedBuilderType, node.getName());
+ else
+ return String.format("private %s %s = null;", boxedBuilderType, node.getName());
} else {
throw new IllegalStateException("Cannot produce builder field definition for node"); // Should not happen
}
@@ -207,6 +215,11 @@ public class BuilderGenerator {
private static String privateLeafNodeSetter(LeafCNode n) {
if ("String".equals(builderType(n)) || "FileReference".equals(builderType(n))) {
return "";
+ } else if ("Optional<FileReference>".equals(builderType(n))) {
+ return "\n\n" + //
+ "private Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "value) {\n" + //
+ " return " + n.getName() + "(" + builderType(n) + ".of(" + INTERNAL_PREFIX + "value));\n" + //
+ "}";
} else {
return "\n\n" + //
"private Builder " + n.getName() + "(String " + INTERNAL_PREFIX + "value) {\n" + //
@@ -270,14 +283,24 @@ public class BuilderGenerator {
: "";
String bType = builderType(n);
- String stringSetter = "";
- if ( ! "String".equals(bType) && ! "FileReference".equals(bType) && ! "ModelReference".equals(bType)) {
+ String privateSetter = "";
+ if ( ! Set.of("String", "FileReference", "ModelReference", "Optional<FileReference>").contains(bType)) {
String type = boxedDataType(n);
if ("UrlReference".equals(bType))
type = bType;
- stringSetter = String.format("\nprivate Builder %s(String %svalue) {\n" +
- " return %s(%s.valueOf(%svalue));\n" + //
- "}", name, INTERNAL_PREFIX, name, type, INTERNAL_PREFIX);
+ //
+ privateSetter = String.format("""
+
+ private Builder %s(String %svalue) {
+ return %s(%s.valueOf(%svalue));
+ }""", name, INTERNAL_PREFIX, name, type, INTERNAL_PREFIX);
+ } else if ("Optional<FileReference>".equals(bType)) {
+ //
+ privateSetter = String.format("""
+
+ private Builder %s(FileReference %svalue) {
+ return %s(Optional.of(%svalue));
+ }""", name, INTERNAL_PREFIX, name, INTERNAL_PREFIX);
}
String getNullGuard = bType.equals(boxedBuilderType(n)) ? String.format(
@@ -286,7 +309,7 @@ public class BuilderGenerator {
return String.format("public Builder %s(%s %svalue) {%s\n" +
" %s = %svalue;\n" + //
"%s", name, bType, INTERNAL_PREFIX, getNullGuard, name, INTERNAL_PREFIX, signalInitialized) +
- " return this;" + "\n}\n" + stringSetter;
+ " return this;" + "\n}\n" + privateSetter;
}
}
@@ -307,6 +330,8 @@ public class BuilderGenerator {
return name + "(" + nodeClass(child) + ".toFileReferenceMap(config." + name + "));";
} else if (child instanceof PathLeaf) {
return name + "(config." + name + ".getFileReference());";
+ } else if (child instanceof OptionalPathLeaf) {
+ return name + "(config." + name + ".getFileReference());";
} else if (child instanceof UrlLeaf && isArray) {
return name + "(" + nodeClass(child) + ".toUrlReferences(config." + name + "));";
} else if (child instanceof UrlLeaf && isMap) {
@@ -408,6 +433,8 @@ public class BuilderGenerator {
return "String";
} else if (node instanceof PathLeaf) {
return "FileReference";
+ } else if (node instanceof OptionalPathLeaf) {
+ return "Optional<FileReference>";
} else if (node instanceof UrlLeaf) {
return "UrlReference";
} else if (node instanceof ModelLeaf) {
@@ -424,6 +451,8 @@ public class BuilderGenerator {
return "String";
} else if (node instanceof PathLeaf) {
return "FileReference";
+ } else if (node instanceof OptionalPathLeaf) {
+ return "Optional<FileReference>";
} else if (node instanceof UrlLeaf) {
return "UrlReference";
} else if (node instanceof ModelLeaf) {
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java b/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
index cb10ffdc2be..903d8dc0865 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
@@ -7,6 +7,7 @@ import com.yahoo.config.codegen.LeafCNode.EnumLeaf;
import com.yahoo.config.codegen.LeafCNode.FileLeaf;
import com.yahoo.config.codegen.LeafCNode.IntegerLeaf;
import com.yahoo.config.codegen.LeafCNode.LongLeaf;
+import com.yahoo.config.codegen.LeafCNode.OptionalPathLeaf;
import com.yahoo.config.codegen.LeafCNode.PathLeaf;
import com.yahoo.config.codegen.LeafCNode.ReferenceLeaf;
import com.yahoo.config.codegen.LeafCNode.StringLeaf;
@@ -165,6 +166,8 @@ public class ConfigGenerator {
return name + " = LeafNodeVector.createFileNodeVector(builder." + name + ");";
} else if (child instanceof PathLeaf && isArray) {
return name + " = LeafNodeVector.createPathNodeVector(builder." + name + ");";
+ } else if (child instanceof OptionalPathLeaf && isArray) {
+ return name + " = LeafNodeVector.createOptionalPathNodeVector(builder." + name + ");";
} else if (child instanceof UrlLeaf && isArray) {
return name + " = LeafNodeVector.createUrlNodeVector(builder." + name + ");";
} else if (child instanceof ModelLeaf && isArray) {
@@ -175,6 +178,8 @@ public class ConfigGenerator {
return name + " = LeafNodeMaps.asFileNodeMap(builder." + name + ");";
} else if (child instanceof PathLeaf && isMap) {
return name + " = LeafNodeMaps.asPathNodeMap(builder." + name + ");";
+ } else if (child instanceof OptionalPathLeaf && isMap) {
+ return name + " = LeafNodeMaps.asOptionalPathNodeMap(builder." + name + ");";
} else if (child instanceof UrlLeaf && isMap) {
return name + " = LeafNodeMaps.asUrlNodeMap(builder." + name + ");";
} else if (child instanceof ModelLeaf && isMap) {
@@ -401,6 +406,8 @@ public class ConfigGenerator {
return "FileNode";
} else if (node instanceof PathLeaf) {
return "PathNode";
+ } else if (node instanceof OptionalPathLeaf) {
+ return "OptionalPathNode";
} else if (node instanceof UrlLeaf) {
return "UrlNode";
} else if (node instanceof ModelLeaf) {
@@ -431,6 +438,8 @@ public class ConfigGenerator {
return "FileReference";
} else if (node instanceof PathLeaf) {
return "Path";
+ } else if (node instanceof OptionalPathLeaf) {
+ return "Optional<Path>";
} else if (node instanceof UrlLeaf) {
return "File";
} else if (node instanceof ModelLeaf) {
@@ -456,6 +465,8 @@ public class ConfigGenerator {
return "Integer";
} else if (rawType.toLowerCase().equals(rawType)) {
return ConfiggenUtil.capitalize(rawType);
+ } else if (rawType.startsWith("Optional<")) {
+ return "Optional";
} else {
return rawType;
}
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java b/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
index 385c7f1979e..d6bffe349a8 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
@@ -7,6 +7,7 @@ import java.util.regex.Pattern;
public class DefLine {
private final static Pattern defaultPattern = Pattern.compile("^\\s*default\\s*=\\s*(\\S+)");
+ private final static Pattern optionalPattern = Pattern.compile("^\\s*optional\\s*");
private final static Pattern rangePattern = Pattern.compile("^\\s*range\\s*=\\s*([\\(\\[].*?[\\)\\]])");
private final static Pattern restartPattern = Pattern.compile("^\\s*restart\\s*");
private final static Pattern wordPattern = Pattern.compile("\\S+");
@@ -21,6 +22,7 @@ public class DefLine {
private final Type type = new Type();
private DefaultValue defaultValue = null;
+ private boolean optional = false;
private String range = null;
private boolean restart = false;
@@ -74,6 +76,9 @@ public class DefLine {
}
public Type getType() {
+ if (optional && type.name.equals("path"))
+ type.name = "optionalPath";
+
return type;
}
@@ -89,6 +94,8 @@ public class DefLine {
return enumArray;
}
+ public boolean isOptional() { return optional; }
+
/**
* Special function that searches through s and returns the index
* of the first occurrence of " that is not escaped.
@@ -114,6 +121,7 @@ public class DefLine {
private int parseOptions(CharSequence string) {
Matcher defaultNullMatcher = defaultNullPattern.matcher(string);
Matcher defaultMatcher = defaultPattern.matcher(string);
+ Matcher optionalMatcher = optionalPattern.matcher(string);
Matcher rangeMatcher = rangePattern.matcher(string);
Matcher restartMatcher = restartPattern.matcher(string);
@@ -133,6 +141,12 @@ public class DefLine {
defaultValue = new DefaultValue(deflt, type);
}
return defaultMatcher.end();
+ } else if (optionalMatcher.find()) {
+ if ( ! type.name.equals("path"))
+ throw new IllegalArgumentException("optional can only be used for 'path'");
+ optional = true;
+ type.name = "optionalPath";
+ return optionalMatcher.end();
} else if (rangeMatcher.find()) {
range = rangeMatcher.group(1);
return rangeMatcher.end();
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java b/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
index afd6acfbabf..c2470b0c703 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
@@ -26,6 +26,7 @@ public abstract class LeafCNode extends CNode {
case "reference" -> new ReferenceLeaf(parent, name);
case "file" -> new FileLeaf(parent, name);
case "path" -> new PathLeaf(parent, name);
+ case "optionalPath" -> new OptionalPathLeaf(parent, name);
case "enum" -> new EnumLeaf(parent, name, type.enumArray);
case "url" -> new UrlLeaf(parent, name);
case "model" -> new ModelLeaf(parent, name);
@@ -217,6 +218,17 @@ public abstract class LeafCNode extends CNode {
}
}
+ public static class OptionalPathLeaf extends NoClassLeafCNode {
+ OptionalPathLeaf(InnerCNode parent, String name) {
+ super(parent, name);
+ }
+
+ @Override
+ public String getType() {
+ return "optionalPath";
+ }
+ }
+
public static class UrlLeaf extends NoClassLeafCNode {
UrlLeaf(InnerCNode parent, String name) {
super(parent, name);
diff --git a/configgen/src/test/java/com/yahoo/config/codegen/DefLineParsingTest.java b/configgen/src/test/java/com/yahoo/config/codegen/DefLineParsingTest.java
index 0e2f6cc4d05..bd71fe1b064 100644
--- a/configgen/src/test/java/com/yahoo/config/codegen/DefLineParsingTest.java
+++ b/configgen/src/test/java/com/yahoo/config/codegen/DefLineParsingTest.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.codegen;
+import java.util.Optional;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.*;
@@ -246,4 +247,24 @@ public class DefLineParsingTest {
assertTrue(r1.getRestart());
}
+ @Test
+ void testParsePath() {
+ DefLine l = new DefLine("somePath path");
+
+ assertEquals("somePath", l.getName());
+ assertNull(l.getDefault());
+ assertFalse(l.isOptional());
+ assertEquals("path", l.getType().getName());
+ }
+
+ @Test
+ void testParseOptionalPath() {
+ DefLine l = new DefLine("anOptionalPath path optional");
+
+ assertEquals("anOptionalPath", l.getName());
+ assertNull(l.getDefault());
+ assertTrue(l.isOptional());
+ assertEquals("optionalPath", l.getType().getName());
+ }
+
}
diff --git a/configgen/src/test/java/com/yahoo/config/codegen/DefParserTest.java b/configgen/src/test/java/com/yahoo/config/codegen/DefParserTest.java
index 45d1f21763c..e5227282c05 100644
--- a/configgen/src/test/java/com/yahoo/config/codegen/DefParserTest.java
+++ b/configgen/src/test/java/com/yahoo/config/codegen/DefParserTest.java
@@ -28,7 +28,7 @@ public class DefParserTest {
CNode root = new DefParser("test", new FileReader(defFile)).getTree();
assertNotNull(root);
CNode[] children = root.getChildren();
- assertEquals(37, children.length);
+ assertEquals(38, children.length);
int numGrandChildren = 0;
int numGreatGrandChildren = 0;
@@ -70,7 +70,7 @@ public class DefParserTest {
void testMd5Sum() throws IOException {
File defFile = new File(DEF_NAME);
CNode root = new DefParser("test", new FileReader(defFile)).getTree();
- assertEquals("0501f9e2c4ecc8c283e100e0b1178ca4", root.defMd5);
+ assertEquals("ee37973499305fde315da46256e64b2e", root.defMd5);
}
@Test
diff --git a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java
index 428576e340f..c3145c03fff 100644
--- a/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java
+++ b/configgen/src/test/java/com/yahoo/config/codegen/JavaClassBuilderTest.java
@@ -120,7 +120,7 @@ public class JavaClassBuilderTest {
}
for (int i = 0; i < referenceClassLines.size(); i++) {
if (configClassLines.length <= i)
- fail("Missing lines i generated config class. First missing line:\n" + referenceClassLines.get(i));
+ fail("Missing lines in generated config class. First missing line:\n" + referenceClassLines.get(i));
assertEquals(referenceClassLines.get(i), configClassLines[i], "Line " + i);
}
}
diff --git a/configgen/src/test/java/com/yahoo/config/codegen/NormalizedDefinitionTest.java b/configgen/src/test/java/com/yahoo/config/codegen/NormalizedDefinitionTest.java
index 57b3ed962eb..18608102ffa 100644
--- a/configgen/src/test/java/com/yahoo/config/codegen/NormalizedDefinitionTest.java
+++ b/configgen/src/test/java/com/yahoo/config/codegen/NormalizedDefinitionTest.java
@@ -70,7 +70,7 @@ public class NormalizedDefinitionTest {
}
assertNotNull(out);
- assertEquals(75, out.size());
+ assertEquals(76, out.size());
assertNotNull(fileReader);
fileReader.close();
diff --git a/configgen/src/test/resources/allfeatures.reference b/configgen/src/test/resources/allfeatures.reference
index b7a79f663e7..79508b3a25f 100644
--- a/configgen/src/test/resources/allfeatures.reference
+++ b/configgen/src/test/resources/allfeatures.reference
@@ -35,7 +35,7 @@ import com.yahoo.config.*;
*/
public final class AllfeaturesConfig extends ConfigInstance {
- public final static String CONFIG_DEF_MD5 = "0501f9e2c4ecc8c283e100e0b1178ca4";
+ public final static String CONFIG_DEF_MD5 = "ee37973499305fde315da46256e64b2e";
public final static String CONFIG_DEF_NAME = "allfeatures";
public final static String CONFIG_DEF_NAMESPACE = "configgen";
public final static String[] CONFIG_DEF_SCHEMA = {
@@ -56,6 +56,7 @@ public final class AllfeaturesConfig extends ConfigInstance {
"refwithdef reference default=\":parent:\"",
"fileVal file",
"pathVal path",
+ "optionalPathVal path optional",
"urlVal url",
"modelVal model",
"boolarr[] bool",
@@ -130,6 +131,7 @@ public final class AllfeaturesConfig extends ConfigInstance {
private String refwithdef = null;
private String fileVal = null;
private FileReference pathVal = null;
+ private Optional<FileReference> optionalPathVal = Optional.empty();
private UrlReference urlVal = null;
private ModelReference modelVal = null;
public List<Boolean> boolarr = new ArrayList<>();
@@ -171,6 +173,7 @@ public final class AllfeaturesConfig extends ConfigInstance {
refwithdef(config.refwithdef());
fileVal(config.fileVal().value());
pathVal(config.pathVal.getFileReference());
+ optionalPathVal(config.optionalPathVal.getFileReference());
urlVal(config.urlVal.getUrlReference());
modelVal(config.modelVal.getModelReference());
boolarr(config.boolarr());
@@ -231,6 +234,8 @@ public final class AllfeaturesConfig extends ConfigInstance {
fileVal(__superior.fileVal);
if (__superior.pathVal != null)
pathVal(__superior.pathVal);
+ if (__superior.optionalPathVal != null)
+ optionalPathVal(__superior.optionalPathVal);
if (__superior.urlVal != null)
urlVal(__superior.urlVal);
if (__superior.modelVal != null)
@@ -412,6 +417,17 @@ public final class AllfeaturesConfig extends ConfigInstance {
}
+ public Builder optionalPathVal(Optional<FileReference> __value) {
+ if (__value == null) throw new IllegalArgumentException("Null value is not allowed.");
+ optionalPathVal = __value;
+ __uninitialized.remove("optionalPathVal");
+ return this;
+ }
+
+ private Builder optionalPathVal(FileReference __value) {
+ return optionalPathVal(Optional.of(__value));
+ }
+
public Builder urlVal(UrlReference __value) {
if (__value == null) throw new IllegalArgumentException("Null value is not allowed.");
urlVal = __value;
@@ -759,6 +775,7 @@ public final class AllfeaturesConfig extends ConfigInstance {
private final ReferenceNode refwithdef;
private final FileNode fileVal;
private final PathNode pathVal;
+ private final OptionalPathNode optionalPathVal;
private final UrlNode urlVal;
private final ModelNode modelVal;
private final LeafNodeVector<Boolean, BooleanNode> boolarr;
@@ -822,6 +839,8 @@ public final class AllfeaturesConfig extends ConfigInstance {
new FileNode() : new FileNode(builder.fileVal);
pathVal = (builder.pathVal == null) ?
new PathNode() : new PathNode(builder.pathVal);
+ optionalPathVal = (builder.optionalPathVal == null) ?
+ new OptionalPathNode() : new OptionalPathNode(builder.optionalPathVal);
urlVal = (builder.urlVal == null) ?
new UrlNode() : new UrlNode(builder.urlVal);
modelVal = (builder.modelVal == null) ?
@@ -960,6 +979,13 @@ public final class AllfeaturesConfig extends ConfigInstance {
}
/**
+ * @return allfeatures.optionalPathVal
+ */
+ public Optional<Path> optionalPathVal() {
+ return optionalPathVal.value();
+ }
+
+ /**
* @return allfeatures.urlVal
*/
public File urlVal() {
diff --git a/configgen/src/test/resources/configgen.allfeatures.def b/configgen/src/test/resources/configgen.allfeatures.def
index 1f93e29b73b..eee39dc18f3 100644
--- a/configgen/src/test/resources/configgen.allfeatures.def
+++ b/configgen/src/test/resources/configgen.allfeatures.def
@@ -39,6 +39,7 @@ refVal reference
refwithdef reference default=":parent:"
fileVal file
pathVal path
+optionalPathVal path optional
urlVal url
modelVal model
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
index 5d1d8da3648..f3dc095193b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/LogRetriever.java
@@ -2,10 +2,13 @@
package com.yahoo.vespa.config.server.http;
import ai.vespa.util.http.hc5.VespaHttpClientBuilder;
+import com.yahoo.container.jdisc.EmptyResponse;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.yolean.Exceptions;
import org.apache.hc.client5.http.classic.methods.HttpGet;
import org.apache.hc.client5.http.impl.classic.CloseableHttpClient;
+import org.apache.hc.core5.util.Timeout;
+
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
@@ -18,7 +21,10 @@ import java.util.Optional;
*/
public class LogRetriever {
- private final CloseableHttpClient httpClient = VespaHttpClientBuilder.custom().buildClient();
+ private final CloseableHttpClient httpClient = VespaHttpClientBuilder.custom()
+ .connectTimeout(Timeout.ofSeconds(5))
+ .socketTimeout(Timeout.ofSeconds(45))
+ .buildClient();
@SuppressWarnings("deprecation")
public HttpResponse getLogs(String logServerUri, Optional<Instant> deployTime) {
@@ -29,12 +35,8 @@ public class LogRetriever {
// It takes some time before nodes are up after first-time deployment, return empty log for up to 2 minutes
// if getting logs fail
if (deployTime.isPresent() && Instant.now().isBefore(deployTime.get().plus(Duration.ofMinutes(2))))
- return new HttpResponse(200) {
- @Override
- public void render(OutputStream outputStream) throws IOException {
- outputStream.write("".getBytes(StandardCharsets.UTF_8));
- }
- };
+ return new EmptyResponse();
+
return HttpErrorResponse.internalServerError("Failed to get logs: " + Exceptions.toMessageString(e));
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/ReindexingResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/ReindexingResponse.java
index 3ad09ea6345..47619536ae6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/ReindexingResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/ReindexingResponse.java
@@ -5,9 +5,12 @@ import com.yahoo.jdisc.Response;
import com.yahoo.slime.Cursor;
import com.yahoo.vespa.config.server.application.ApplicationReindexing;
import com.yahoo.vespa.config.server.application.ClusterReindexing;
+import com.yahoo.vespa.config.server.application.ClusterReindexing.State;
import com.yahoo.vespa.config.server.http.JSONResponse;
+import java.time.Instant;
import java.util.Map;
+import java.util.Optional;
import java.util.Set;
public class ReindexingResponse extends JSONResponse {
@@ -23,32 +26,35 @@ public class ReindexingResponse extends JSONResponse {
for (String type : types) {
Cursor statusObject = readyObject.setObject(type);
+ Instant readyAt = Instant.EPOCH;
+ State state = null;
if (reindexing.clusters().containsKey(cluster)) {
- if (reindexing.clusters().get(cluster).pending().containsKey(type))
+ if (reindexing.clusters().get(cluster).pending().containsKey(type)) {
pendingObject.setLong(type, reindexing.clusters().get(cluster).pending().get(type));
-
- if (reindexing.clusters().get(cluster).ready().containsKey(type))
- setStatus(statusObject, reindexing.clusters().get(cluster).ready().get(type));
+ state = State.PENDING;
+ }
+
+ if (reindexing.clusters().get(cluster).ready().containsKey(type)) {
+ ApplicationReindexing.Status readyStatus = reindexing.clusters().get(cluster).ready().get(type);
+ readyAt = readyStatus.ready();
+ statusObject.setLong("readyMillis", readyStatus.ready().toEpochMilli());
+ statusObject.setDouble("speed", readyStatus.speed());
+ statusObject.setString("cause", readyStatus.cause());
+ }
}
if (clusters.containsKey(cluster))
- if (clusters.get(cluster).documentTypeStatus().containsKey(type))
- setStatus(statusObject, clusters.get(cluster).documentTypeStatus().get(type));
+ if (clusters.get(cluster).documentTypeStatus().containsKey(type)) {
+ ClusterReindexing.Status status = clusters.get(cluster).documentTypeStatus().get(type);
+ statusObject.setLong("startedMillis", status.startedAt().toEpochMilli());
+ status.endedAt().ifPresent(endedAt -> statusObject.setLong("endedMillis", endedAt.toEpochMilli()));
+ if (status.startedAt().isAfter(readyAt) && status.state().isPresent()) state = status.state().get();
+ status.message().ifPresent(message -> statusObject.setString("message", message));
+ status.progress().ifPresent(progress -> statusObject.setDouble("progress", progress));
+ }
+ if (readyAt != Instant.EPOCH && state == null) state = State.PENDING;
+ if (state != null) statusObject.setString("state", state.asString());
}
});
}
- private static void setStatus(Cursor object, ApplicationReindexing.Status readyStatus) {
- object.setLong("readyMillis", readyStatus.ready().toEpochMilli());
- object.setDouble("speed", readyStatus.speed());
- object.setString("cause", readyStatus.cause());
- }
-
- private static void setStatus(Cursor object, ClusterReindexing.Status status) {
- object.setLong("startedMillis", status.startedAt().toEpochMilli());
- status.endedAt().ifPresent(endedAt -> object.setLong("endedMillis", endedAt.toEpochMilli()));
- status.state().map(ClusterReindexing.State::asString).ifPresent(state -> object.setString("state", state));
- status.message().ifPresent(message -> object.setString("message", message));
- status.progress().ifPresent(progress -> object.setDouble("progress", progress));
- }
-
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java
index 7a0ab6d2a23..951ef9df2f4 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java
@@ -318,7 +318,8 @@ public class ApplicationHandlerTest {
" \"bar\": {" +
" \"readyMillis\": " + (now - 1000) + ", " +
" \"speed\": 1.0," +
- " \"cause\": \"reindexing\"" +
+ " \"cause\": \"reindexing\"," +
+ " \"state\": \"pending\"" +
" }" +
" }" +
" }," +
@@ -328,17 +329,20 @@ public class ApplicationHandlerTest {
" \"bar\": {" +
" \"readyMillis\": " + now + ", " +
" \"speed\": 0.1," +
- " \"cause\": \"reindexing\"" +
+ " \"cause\": \"reindexing\"," +
+ " \"state\": \"pending\"" +
" }," +
" \"bax\": {" +
" \"readyMillis\": " + (now - 1000) + ", " +
" \"speed\": 1.0," +
- " \"cause\": \"reindexing\"" +
+ " \"cause\": \"reindexing\"," +
+ " \"state\": \"pending\"" +
" }," +
" \"baz\": {" +
" \"readyMillis\": " + now + ", " +
" \"speed\": 0.1," +
- " \"cause\": \"reindexing\"" +
+ " \"cause\": \"reindexing\"," +
+ " \"state\": \"pending\"" +
" }" +
" }" +
" }" +
@@ -579,9 +583,9 @@ public class ApplicationHandlerTest {
"baz": {
"startedMillis": 124456,
"endedMillis": 125456,
- "state": "failed",
"message": "message",
- "progress": 0.1
+ "progress": 0.1,
+ "state": "failed"
}
}
},
@@ -590,7 +594,9 @@ public class ApplicationHandlerTest {
"bar": 123
},
"ready": {
- "bar": {},
+ "bar": {
+ "state": "pending"
+ },
"hax": {}
}
},
@@ -606,9 +612,9 @@ public class ApplicationHandlerTest {
"cause": "reindexing",
"startedMillis": 124456,
"endedMillis": 125456,
- "state": "failed",
"message": "message",
- "progress": 0.1
+ "progress": 0.1,
+ "state": "failed"
}
}
}
diff --git a/container-core/src/main/java/com/yahoo/container/handler/threadpool/ContainerThreadpoolImpl.java b/container-core/src/main/java/com/yahoo/container/handler/threadpool/ContainerThreadpoolImpl.java
index 83e9e496411..303febe1e39 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/threadpool/ContainerThreadpoolImpl.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/threadpool/ContainerThreadpoolImpl.java
@@ -50,7 +50,7 @@ public class ContainerThreadpoolImpl extends AbstractComponent implements AutoCl
ThreadPoolMetric threadPoolMetric = new ThreadPoolMetric(metric, name);
WorkerCompletionTimingThreadPoolExecutor executor =
new WorkerCompletionTimingThreadPoolExecutor(minThreads, maxThreads,
- (int)config.keepAliveTime() * 1000, TimeUnit.MILLISECONDS,
+ (long) config.keepAliveTime() * 1000, TimeUnit.MILLISECONDS,
createQueue(queueSize),
ThreadFactoryFactory.getThreadFactory(name),
threadPoolMetric);
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/HttpRequestTestCase.java b/container-core/src/test/java/com/yahoo/container/jdisc/HttpRequestTestCase.java
index ca3c24113ff..4a71b28d578 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/HttpRequestTestCase.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/HttpRequestTestCase.java
@@ -19,7 +19,7 @@ import com.yahoo.text.Utf8;
/**
* API control of HttpRequest.
*
- * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
+ * @author Steinar Knutsen
*/
public class HttpRequestTestCase {
private static final String X_RAY_YANKEE_ZULU = "x-ray yankee zulu";
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/HttpResponseTestCase.java b/container-core/src/test/java/com/yahoo/container/jdisc/HttpResponseTestCase.java
index 963c98c2469..388f2e83b31 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/HttpResponseTestCase.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/HttpResponseTestCase.java
@@ -17,7 +17,7 @@ import com.yahoo.text.Utf8;
/**
* API test for HttpResponse.
*
- * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a>
+ * @author Steinar Knutsen
*/
public class HttpResponseTestCase {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/HttpResponseTestCase.java b/container-core/src/test/java/com/yahoo/jdisc/http/HttpResponseTestCase.java
index 09682e73630..2e9c43bc6c7 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/HttpResponseTestCase.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/HttpResponseTestCase.java
@@ -1,13 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http;
-import com.yahoo.jdisc.Container;
-import com.yahoo.jdisc.Request;
import com.yahoo.jdisc.Response;
-import com.yahoo.jdisc.service.CurrentContainer;
import org.junit.jupiter.api.Test;
-import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -15,7 +11,6 @@ import java.util.List;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
/**
* @author Simon Thoresen Hult
@@ -24,7 +19,7 @@ public class HttpResponseTestCase {
@Test
void requireThatAccessorsWork() throws Exception {
- final HttpResponse response = newResponse(6, "foo");
+ final HttpResponse response = newResponse(6);
assertEquals(6, response.getStatus());
assertEquals("foo", response.getMessage());
assertNull(response.getError());
@@ -83,7 +78,7 @@ public class HttpResponseTestCase {
@Test
void requireThatCookieHeaderCanBeEncoded() throws Exception {
- final HttpResponse response = newResponse(69, "foo");
+ final HttpResponse response = newResponse(69);
final List<Cookie> cookies = Collections.singletonList(new Cookie("foo", "bar"));
response.encodeSetCookieHeader(cookies);
final List<String> headers = response.headers().get(HttpHeaders.Names.SET_COOKIE);
@@ -93,7 +88,7 @@ public class HttpResponseTestCase {
@Test
void requireThatMultipleCookieHeadersCanBeEncoded() throws Exception {
- final HttpResponse response = newResponse(69, "foo");
+ final HttpResponse response = newResponse(69);
final List<Cookie> cookies = Arrays.asList(new Cookie("foo", "bar"), new Cookie("baz", "cox"));
response.encodeSetCookieHeader(cookies);
final List<String> headers = response.headers().get(HttpHeaders.Names.SET_COOKIE);
@@ -104,7 +99,7 @@ public class HttpResponseTestCase {
@Test
void requireThatCookieHeaderCanBeDecoded() throws Exception {
- final HttpResponse response = newResponse(69, "foo");
+ final HttpResponse response = newResponse(69);
final List<Cookie> cookies = Collections.singletonList(new Cookie("foo", "bar"));
response.encodeSetCookieHeader(cookies);
assertEquals(cookies, response.decodeSetCookieHeader());
@@ -112,25 +107,14 @@ public class HttpResponseTestCase {
@Test
void requireThatMultipleCookieHeadersCanBeDecoded() throws Exception {
- final HttpResponse response = newResponse(69, "foo");
+ final HttpResponse response = newResponse(69);
final List<Cookie> cookies = Arrays.asList(new Cookie("foo", "bar"), new Cookie("baz", "cox"));
response.encodeSetCookieHeader(cookies);
assertEquals(cookies, response.decodeSetCookieHeader());
}
- private static HttpResponse newResponse(final int status, final String message) throws Exception {
- final Request request = HttpRequest.newServerRequest(
- mockContainer(),
- new URI("http://localhost:1234/status.html"),
- HttpRequest.Method.GET,
- HttpRequest.Version.HTTP_1_1);
- return HttpResponse.newInstance(status, message);
+ private static HttpResponse newResponse(final int status) {
+ return HttpResponse.newInstance(status, "foo");
}
- private static CurrentContainer mockContainer() {
- final CurrentContainer currentContainer = mock(CurrentContainer.class);
- when(currentContainer.newReference(any(URI.class))).thenReturn(mock(Container.class));
- when(currentContainer.newReference(any(URI.class), any(Object.class))).thenReturn(mock(Container.class));
- return currentContainer;
- }
}
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/DataplaneProxyService.java b/container-disc/src/main/java/com/yahoo/container/jdisc/DataplaneProxyService.java
index 74e6954e1e1..ed3149d5406 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/DataplaneProxyService.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/DataplaneProxyService.java
@@ -11,10 +11,12 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
+import java.util.List;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
/**
* Configures a data plane proxy. Currently using Nginx.
@@ -105,8 +107,8 @@ public class DataplaneProxyService extends AbstractComponent {
serverKeyFile,
config.mtlsPort(),
config.tokenPort(),
- root
- ));
+ config.tokenEndpoints(),
+ root));
if (configChanged && state == NginxState.RUNNING) {
changeState(NginxState.RELOAD_REQUIRED);
}
@@ -194,6 +196,7 @@ public class DataplaneProxyService extends AbstractComponent {
Path serverKey,
int vespaMtlsPort,
int vespaTokenPort,
+ List<String> tokenEndpoints,
Path root) {
try {
@@ -205,6 +208,10 @@ public class DataplaneProxyService extends AbstractComponent {
nginxTemplate = replace(nginxTemplate, "vespa_mtls_port", Integer.toString(vespaMtlsPort));
nginxTemplate = replace(nginxTemplate, "vespa_token_port", Integer.toString(vespaTokenPort));
nginxTemplate = replace(nginxTemplate, "prefix", root.toString());
+ String tokenmapping = tokenEndpoints.stream()
+ .map(" %s vespatoken;"::formatted)
+ .collect(Collectors.joining("\n"));
+ nginxTemplate = replace(nginxTemplate, "vespa_token_endpoints", tokenmapping);
// TODO: verify that all template vars have been expanded
return nginxTemplate;
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 57d455a02f0..ffae12867fd 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -454,6 +454,7 @@
"public boolean isLocked()",
"public int getTermCount()",
"public java.util.Optional extractSingleChild()",
+ "public boolean acceptsItemsOfType(com.yahoo.prelude.query.Item$ItemType)",
"public bridge synthetic com.yahoo.prelude.query.Item clone()",
"public bridge synthetic java.lang.Object clone()"
],
@@ -509,6 +510,7 @@
"public com.yahoo.prelude.query.Item$ItemType getItemType()",
"public java.lang.String getName()",
"protected void adding(com.yahoo.prelude.query.Item)",
+ "public boolean acceptsItemsOfType(com.yahoo.prelude.query.Item$ItemType)",
"public static boolean acceptsChildrenOfType(com.yahoo.prelude.query.Item$ItemType)"
],
"fields" : [ ]
@@ -1109,6 +1111,7 @@
"public void setExplicit(boolean)",
"public boolean isExplicit()",
"public void addItem(com.yahoo.prelude.query.Item)",
+ "public boolean acceptsItemsOfType(com.yahoo.prelude.query.Item$ItemType)",
"public void addItem(int, com.yahoo.prelude.query.Item)",
"public com.yahoo.prelude.query.Item setItem(int, com.yahoo.prelude.query.Item)",
"public java.util.Optional extractSingleChild()",
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java b/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
index 30c02944f19..407e763c1e5 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/CompositeItem.java
@@ -311,6 +311,10 @@ public abstract class CompositeItem extends Item {
return getItemCount() == 1 ? Optional.of(getItem(0)) : Optional.empty();
}
+ public boolean acceptsItemsOfType(ItemType itemType) {
+ return true;
+ }
+
/** Handles mutator calls correctly */
private static class ListIteratorWrapper implements ListIterator<Item> {
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/EquivItem.java b/container-search/src/main/java/com/yahoo/prelude/query/EquivItem.java
index 43258db8963..de317ac317e 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/EquivItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/EquivItem.java
@@ -86,6 +86,11 @@ public class EquivItem extends CompositeTaggableItem {
acceptsChildrenOfType(item.getItemType()));
}
+ @Override
+ public boolean acceptsItemsOfType(ItemType itemType) {
+ return acceptsChildrenOfType(itemType);
+ }
+
/** Returns true if this accepts child items of the given type */
public static boolean acceptsChildrenOfType(ItemType itemType) {
return itemType == ItemType.WORD ||
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java b/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
index 379dfd6bb30..09be603740f 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/PhraseItem.java
@@ -93,6 +93,15 @@ public class PhraseItem extends CompositeIndexedItem {
}
@Override
+ public boolean acceptsItemsOfType(ItemType itemType) {
+ return itemType == ItemType.WORD ||
+ itemType == ItemType.WORD_ALTERNATIVES ||
+ itemType == ItemType.INT ||
+ itemType == ItemType.EXACT ||
+ itemType == ItemType.PHRASE;
+ }
+
+ @Override
public void addItem(int index, Item item) {
if (item instanceof WordItem || item instanceof PhraseSegmentItem) {
addIndexedItem(index, (IndexedItem) item);
@@ -115,8 +124,7 @@ public class PhraseItem extends CompositeIndexedItem {
return setIndexedItem(index, (IndexedItem) item);
} else if (item instanceof IntItem) {
return setIndexedItem(index, convertIntToWord(item));
- } else if (item instanceof PhraseItem) {
- PhraseItem phrase = (PhraseItem) item;
+ } else if (item instanceof PhraseItem phrase) {
Iterator<Item> i = phrase.getItemIterator();
// we assume we don't try to add empty phrases
IndexedItem firstItem = (IndexedItem) i.next();
diff --git a/container-search/src/main/java/com/yahoo/prelude/semantics/engine/Evaluation.java b/container-search/src/main/java/com/yahoo/prelude/semantics/engine/Evaluation.java
index d5ed89b0724..76f42bf0d9f 100644
--- a/container-search/src/main/java/com/yahoo/prelude/semantics/engine/Evaluation.java
+++ b/container-search/src/main/java/com/yahoo/prelude/semantics/engine/Evaluation.java
@@ -199,9 +199,8 @@ public class Evaluation {
CompositeItem converted;
if (item instanceof AndSegmentItem) {
converted = new AndItem();
- } else if (item instanceof PhraseSegmentItem) {
+ } else if (item instanceof PhraseSegmentItem old) {
PhraseItem p = new PhraseItem();
- PhraseSegmentItem old = (PhraseSegmentItem) item;
p.setIndexName(old.getIndexName());
converted = p;
} else {
@@ -250,7 +249,7 @@ public class Evaluation {
* @param index the index at which to insert these into the parent
* @param desiredParentType the desired type of the composite which contains items when this returns
*/
- public void insertItems(List<Item> items, CompositeItem parent, int index, TermType desiredParentType) {
+ public void insertItems(List<Item> items, CompositeItem parent, int index, TermType desiredParentType, boolean replacing) {
if (isEmpty(parent)) {
if (items.size() == 1 && desiredParentType.hasItemClass(items.get(0).getClass())) {
query.getModel().getQueryTree().setRoot(items.get(0));
@@ -286,7 +285,7 @@ public class Evaluation {
addItem(parent, index, items.get(0), desiredParentType);
}
else {
- insertWithDesiredParentType(items, parent, desiredParentType);
+ insertWithDesiredParentType(items, index, parent, desiredParentType, replacing);
}
}
@@ -329,36 +328,50 @@ public class Evaluation {
/** A special purpose check used to simplify the above */
private boolean equalIndexNameIfParentIsPhrase(List<Item> items, CompositeItem parent) {
- if ( ! (parent instanceof PhraseItem)) return true;
- var phrase = (PhraseItem)parent;
+ if ( ! (parent instanceof PhraseItem phrase)) return true;
for (Item item : items) {
- if ( ! (item instanceof IndexedItem)) continue;
- var indexedItem = (IndexedItem)item;
+ if ( ! (item instanceof IndexedItem indexedItem)) continue;
if (! indexedItem.getIndexName().equals(phrase.getIndexName())) return false;
}
return true;
}
- private void insertWithDesiredParentType(List<Item> items, CompositeItem parent, TermType desiredParentType) {
+ private void insertWithDesiredParentType(List<Item> items, int index, CompositeItem parent, TermType desiredParentType, boolean replacing) {
CompositeItem parentsParent = parent.getParent();
CompositeItem newParent = newParent(desiredParentType);
- if (! (parentsParent instanceof QueryTree) && parentsParent.getItemType() == newParent.getItemType()) { // Collapse
+ if (parentsParent != null && (! (parentsParent instanceof QueryTree) && parentsParent.getItemType() == newParent.getItemType())) { // Collapse
newParent = parentsParent;
}
for (Item item : items)
newParent.addItem(item);
- if (desiredParentType == TermType.EQUIV || desiredParentType == TermType.PHRASE) { // insert new parent below the current
- parent.addItem(newParent);
+ Item current = parent;
+ if (parent instanceof QueryTree && parent.getItemCount() > 0)
+ current = parent.getItem(0);
+ if (current instanceof CompositeItem && !replacing) { // insert new parent below the current
+ if (parent.getItemCount() > index) {
+ var combinedItem = combineItems(newParent, parent.getItem(index), desiredParentType);
+ parent.setItem(index, combinedItem);
+ }
+ else{
+ parent.addItem(newParent);
+ }
+ }
+ else if (newParent.acceptsItemsOfType(current.getItemType())) { // insert new parent above the current
+ newParent.addItem(current);
+ if (newParent != parentsParent) { // Insert new parent as root or child of old parent's parent
+ if (parentsParent != null)
+ parentsParent.setItem(parentsParent.getItemIndex(current), newParent);
+ else
+ parent.setItem(0, newParent);
+ }
}
- else { // insert new parent above the current
- newParent.addItem(parent);
- if (newParent != parentsParent) // Insert new parent as root or child of old parent's parent
- parentsParent.setItem(parentsParent.getItemIndex(parent), newParent);
+ else {
+ ((CompositeItem)current).addItem(newParent); // not an acceptable child -> composite
}
}
@@ -369,8 +382,7 @@ public class Evaluation {
private Item combineItems(Item first, Item second, TermType termType) {
if (first instanceof NullItem) {
return second;
- } else if (first instanceof NotItem) {
- NotItem notItem = (NotItem)first;
+ } else if (first instanceof NotItem notItem) {
if (termType == TermType.NOT) {
notItem.addNegativeItem(second);
}
@@ -380,8 +392,7 @@ public class Evaluation {
}
return notItem;
}
- else if (first instanceof CompositeItem) {
- CompositeItem composite = (CompositeItem)first;
+ else if (first instanceof CompositeItem composite) {
CompositeItem combined = createType(termType);
if (combined.getClass().equals(composite.getClass())) {
composite.addItem(second);
@@ -419,6 +430,9 @@ public class Evaluation {
phrase.setIndexName(index);
return phrase;
}
+ else if ((item instanceof CompositeItem) && ((CompositeItem)item).getItemCount() == 1) {
+ return makeEquivCompatible(((CompositeItem)item).getItem(0));
+ }
else {
return item; // Compatible, or can't be made so
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/semantics/engine/RuleEvaluation.java b/container-search/src/main/java/com/yahoo/prelude/semantics/engine/RuleEvaluation.java
index efb02034db9..214dec3920c 100644
--- a/container-search/src/main/java/com/yahoo/prelude/semantics/engine/RuleEvaluation.java
+++ b/container-search/src/main/java/com/yahoo/prelude/semantics/engine/RuleEvaluation.java
@@ -270,8 +270,8 @@ public class RuleEvaluation {
* @param index the index at which to insert this into the parent
* @param termType the kind of item to index, this decides the resulting structure
*/
- public void insertItems(List<Item> items, CompositeItem parent, int index, TermType termType) {
- evaluation.insertItems(items, parent, index, termType);
+ public void insertItems(List<Item> items, CompositeItem parent, int index, TermType termType, boolean replacing) {
+ evaluation.insertItems(items, parent, index, termType, replacing);
}
/** Returns a read-only view of the items of this */
diff --git a/container-search/src/main/java/com/yahoo/prelude/semantics/rule/LiteralPhraseProduction.java b/container-search/src/main/java/com/yahoo/prelude/semantics/rule/LiteralPhraseProduction.java
index af7aab23b85..aafb740de1d 100644
--- a/container-search/src/main/java/com/yahoo/prelude/semantics/rule/LiteralPhraseProduction.java
+++ b/container-search/src/main/java/com/yahoo/prelude/semantics/rule/LiteralPhraseProduction.java
@@ -50,16 +50,8 @@ public class LiteralPhraseProduction extends TermProduction {
for (String term : terms)
newPhrase.addItem(new WordItem(term));
- if (replacing) {
- Match matched = e.getNonreferencedMatch(0);
- insertMatch(e, matched, List.of(newPhrase), offset);
- }
- else {
- newPhrase.setWeight(getWeight());
- if (e.getTraceLevel() >= 6)
- e.trace(6, "Adding '" + newPhrase + "'");
- e.addItems(List.of(newPhrase), getTermType());
- }
+ Match matched = e.getNonreferencedMatch(0);
+ insertMatch(e, matched, List.of(newPhrase), offset);
}
public String toInnerTermString() {
diff --git a/container-search/src/main/java/com/yahoo/prelude/semantics/rule/TermProduction.java b/container-search/src/main/java/com/yahoo/prelude/semantics/rule/TermProduction.java
index 41d15bc9262..932908a32dc 100644
--- a/container-search/src/main/java/com/yahoo/prelude/semantics/rule/TermProduction.java
+++ b/container-search/src/main/java/com/yahoo/prelude/semantics/rule/TermProduction.java
@@ -58,7 +58,7 @@ public abstract class TermProduction extends Production {
}
/**
- * Inserts newItem at the position of this match
+ * Inserts newItems at the position of this match
* TODO: Move to ruleevaluation
*/
protected void insertMatch(RuleEvaluation e, Match matched, List<Item> newItems, int offset) {
@@ -73,7 +73,7 @@ public abstract class TermProduction extends Production {
insertPosition = matched.getParent().getItemCount();
}
- e.insertItems(newItems, matched.getParent(), insertPosition, getTermType());
+ e.insertItems(newItems, matched.getParent(), insertPosition, getTermType(), replacing);
if (e.getTraceLevel() >= 6)
e.trace(6, "Inserted items '" + newItems + "' at position " + insertPosition + " producing " +
e.getEvaluation().getQuery().getModel().getQueryTree());
diff --git a/container-search/src/test/java/com/yahoo/prelude/semantics/test/ExpansionTestCase.java b/container-search/src/test/java/com/yahoo/prelude/semantics/test/ExpansionTestCase.java
index 13b65716ffc..f83ad354c89 100644
--- a/container-search/src/test/java/com/yahoo/prelude/semantics/test/ExpansionTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/semantics/test/ExpansionTestCase.java
@@ -24,10 +24,11 @@ public class ExpansionTestCase extends RuleBaseAbstractTestCase {
assertSemantics("EQUIV testfield:e1 testfield:e2 testfield:e3", "testfield:foo");
}
+ // No equiv: Not optimal, but not wrong either
@Test
void testEquivExpansion3() {
assertSemantics("AND testfield:e1 testfield:e2 testfield:e3 testfield:e1 testfield:e2 testfield:e3",
- "testfield:foo testfield:bar");
+ "testfield:foo testfield:bar");
}
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/semantics/test/OrPhraseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/semantics/test/OrPhraseTestCase.java
index 045d8698547..370ca20e612 100644
--- a/container-search/src/test/java/com/yahoo/prelude/semantics/test/OrPhraseTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/semantics/test/OrPhraseTestCase.java
@@ -15,12 +15,26 @@ public class OrPhraseTestCase extends RuleBaseAbstractTestCase {
@Test
void testReplacing1() {
assertSemantics("OR title:\"software engineer\" (AND new york)", "software engineer new york");
- assertSemantics("title:\"software engineer\"", "software engineer"); // Skip or when there is nothing else
+ assertSemantics("title:\"software engineer\"", "software engineer"); // Skip OR when there is nothing else
}
@Test
void testReplacing2() {
- assertSemantics("OR lotr \"lord of the rings\"", "lotr");
+ assertSemantics("OR \"lord of the rings\" lotr", "lotr");
+ }
+
+ @Test
+ void testReplacing2WithFollowingQuery() {
+ assertSemantics("AND (OR \"lord of the rings\" lotr) is a movie", "lotr is a movie");
+ }
+
+ @Test
+ void testReplacing2WithPrecedingQuery() {
+ assertSemantics("AND a movie is (OR \"lord of the rings\" lotr)", "a movie is lotr");
+ }
+ @Test
+ void testReplacing2WithSurroundingQuery() {
+ assertSemantics("AND a movie is (OR \"lord of the rings\" lotr) yes", "a movie is lotr yes");
}
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/equiv.sr b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/equiv.sr
new file mode 100644
index 00000000000..99102fcd03f
--- /dev/null
+++ b/container-search/src/test/java/com/yahoo/prelude/semantics/test/rulebases/equiv.sr
@@ -0,0 +1,4 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@default
+
+lotr +> ="lord of the rings";
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index 8bba92f36e3..1272bf4d00d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -35,6 +35,8 @@ import com.yahoo.vespa.hosted.controller.routing.GeneratedEndpoints;
import com.yahoo.vespa.hosted.controller.routing.PreparedEndpoints;
import com.yahoo.vespa.hosted.controller.routing.RoutingId;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicies;
+import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
+import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyList;
import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext;
import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext.ExclusiveDeploymentRoutingContext;
import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext.SharedDeploymentRoutingContext;
@@ -133,12 +135,20 @@ public class RoutingController {
if (randomizedEndpointsEnabled(deployment.applicationId())) { // TODO(mpolden): Remove this guard once config-models < 8.220 are gone
boolean includeTokenEndpoint = tokenEndpointEnabled(deployment.applicationId());
Map<ClusterSpec.Id, List<GeneratedEndpoint>> generatedEndpointsByCluster = new HashMap<>();
+ RoutingPolicyList deploymentPolicies = policies().read(deployment);
for (var container : services.containers()) {
ClusterSpec.Id clusterId = ClusterSpec.Id.from(container.id());
boolean tokenSupported = includeTokenEndpoint && container.authMethods().contains(BasicServicesXml.Container.AuthMethod.token);
- List<GeneratedEndpoint> generatedForCluster = certificate.flatMap(EndpointCertificate::randomizedId)
- .map(id -> generateEndpoints(id, deployment.applicationId(), tokenSupported))
- .orElseGet(List::of);
+ // Use already existing generated endpoints, if any
+ List<GeneratedEndpoint> generatedForCluster = deploymentPolicies.cluster(clusterId)
+ .first()
+ .map(RoutingPolicy::generatedEndpoints)
+ .orElseGet(List::of);
+ if (generatedForCluster.isEmpty()) {
+ generatedForCluster = certificate.flatMap(EndpointCertificate::randomizedId)
+ .map(id -> generateEndpoints(id, deployment.applicationId(), tokenSupported))
+ .orElseGet(List::of);
+ }
if (!generatedForCluster.isEmpty()) {
generatedEndpointsByCluster.put(clusterId, generatedForCluster);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index ac896338643..02ecdcaad21 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -26,6 +26,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
@@ -308,7 +309,8 @@ public class DeploymentStatus {
jobs.merge(job, List.of(new Job(typeWithZone,
versions,
readiness.okAt(now) && jobs().get(job).get().isRunning() ? readiness.running() : readiness,
- change)), DeploymentStatus::union);
+ change,
+ null)), DeploymentStatus::union);
}
});
});
@@ -574,7 +576,7 @@ public class DeploymentStatus {
// which is the case when the next versions to run that test with is not the same as we want to deploy here.
List<Job> tests = job.type().isTest() ? null : jobs.get(new JobId(job.application(), JobType.productionTestOf(job.type().zone())));
readiness = tests != null && ! versions.targetsMatch(tests.get(0).versions) && readiness.okAt(now) ? readiness.blocked() : readiness;
- toRun.add(new Job(job.type(), versions, readiness, partial));
+ toRun.add(new Job(job.type(), versions, readiness, partial, null));
// Assume first partial change is applied before the second.
existingPlatform = Optional.of(versions.targetPlatform());
existingRevision = Optional.of(versions.targetRevision());
@@ -700,7 +702,8 @@ public class DeploymentStatus {
testJobs.merge(testJob, List.of(new Job(testJob.type(),
productionJob.versions(),
readiness.okAt(now) && jobs().get(testJob).get().isRunning() ? readiness.running() : readiness,
- productionJob.change)),
+ productionJob.change,
+ job)),
DeploymentStatus::union);
}
@@ -1192,12 +1195,14 @@ public class DeploymentStatus {
private final Versions versions;
private final Readiness readiness;
private final Change change;
+ private final JobId dependent;
- public Job(JobType type, Versions versions, Readiness readiness, Change change) {
+ public Job(JobType type, Versions versions, Readiness readiness, Change change, JobId dependent) {
this.type = type;
this.versions = type.isSystemTest() ? versions.withoutSources() : versions;
this.readiness = readiness;
this.change = change;
+ this.dependent = dependent;
}
public JobType type() {
@@ -1212,6 +1217,10 @@ public class DeploymentStatus {
return readiness;
}
+ public Reason reason() {
+ return new Reason(Optional.empty(), Optional.ofNullable(dependent), Optional.ofNullable(change));
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 81eaa17f95d..e247d6baa09 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -22,6 +22,7 @@ import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.DelayCause;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.Readiness;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import java.math.BigDecimal;
import java.time.Clock;
@@ -185,7 +186,12 @@ public class DeploymentTrigger {
/** Attempts to trigger the given job. */
private boolean trigger(Job job) {
try {
- trigger(job, null);
+ log.log(Level.FINE, () -> "Triggering " + job);
+ applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> {
+ jobs.start(job.applicationId(), job.jobType, job.versions, false, job.reason);
+ applications().store(application.with(job.applicationId().instance(), instance ->
+ instance.withJobPause(job.jobType, OptionalLong.empty())));
+ });
return true;
}
catch (Exception e) {
@@ -194,16 +200,6 @@ public class DeploymentTrigger {
}
}
- /** Attempts to trigger the given job. */
- private void trigger(Job job, String reason) {
- log.log(Level.FINE, () -> "Triggering " + job);
- applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> {
- jobs.start(job.applicationId(), job.jobType, job.versions, false, Optional.ofNullable(reason));
- applications().store(application.with(job.applicationId().instance(), instance ->
- instance.withJobPause(job.jobType, OptionalLong.empty())));
- });
- }
-
/** Force triggering of a job for given instance, with same versions as last run. */
public JobId reTrigger(ApplicationId applicationId, JobType jobType, String reason) {
Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId));
@@ -212,7 +208,8 @@ public class DeploymentTrigger {
JobStatus jobStatus = jobs.jobStatus(new JobId(applicationId, jobType));
Run last = jobStatus.lastTriggered()
.orElseThrow(() -> new IllegalArgumentException(job + " has never been triggered"));
- trigger(deploymentJob(instance, last.versions(), last.id().type(), jobStatus.isNodeAllocationFailure(), clock.instant()), reason);
+ trigger(deploymentJob(instance, last.versions(), last.id().type(), jobStatus.isNodeAllocationFailure(), clock.instant(),
+ new Reason(Optional.ofNullable(reason), last.reason().dependent(), last.reason().change())));
return job;
}
@@ -236,7 +233,7 @@ public class DeploymentTrigger {
if ( ! upgradeRevision && change.revision().isPresent()) change = change.withoutApplication();
if ( ! upgradePlatform && change.platform().isPresent()) change = change.withoutPlatform();
Versions versions = Versions.from(change, application, status.deploymentFor(job), status.fallbackPlatform(change, job));
- DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(job.type(), versions, new Readiness(controller.clock().instant()), instance.change());
+ DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(job.type(), versions, new Readiness(controller.clock().instant()), instance.change(), null);
Map<JobId, List<DeploymentStatus.Job>> testJobs = status.testJobs(Map.of(job, List.of(toTrigger)));
Map<JobId, List<DeploymentStatus.Job>> jobs = testJobs.isEmpty() || ! requireTests
@@ -245,13 +242,13 @@ public class DeploymentTrigger {
.filter(entry -> controller.jobController().last(entry.getKey()).map(Run::hasEnded).orElse(true))
.collect(toMap(Map.Entry::getKey, Map.Entry::getValue));
- jobs.forEach((jobId, versionsList) -> {
+ jobs.forEach((jobId, jobList) -> {
trigger(deploymentJob(application.require(jobId.application().instance()),
- versionsList.get(0).versions(),
+ jobList.get(0).versions(),
jobId.type(),
status.jobs().get(jobId).get().isNodeAllocationFailure(),
- clock.instant()),
- reason);
+ clock.instant(),
+ new Reason(Optional.of(reason), jobList.get(0).reason().dependent(), jobList.get(0).reason().change())));
});
return List.copyOf(jobs.keySet());
}
@@ -262,7 +259,7 @@ public class DeploymentTrigger {
last.versions().targetRevision(),
Optional.of(last.versions().targetPlatform()),
Optional.of(last.versions().targetRevision()));
- jobs.start(job.application(), job.type(), target, true, Optional.of(reason));
+ jobs.start(job.application(), job.type(), target, true, Reason.because(reason));
return List.of(job);
}
@@ -383,7 +380,8 @@ public class DeploymentTrigger {
job.versions(),
job.type(),
status.instanceJobs(jobId.application().instance()).get(jobId.type()).isNodeAllocationFailure(),
- job.readiness().at()));
+ job.readiness().at(),
+ job.reason()));
}
});
return Collections.unmodifiableList(jobs);
@@ -405,7 +403,7 @@ public class DeploymentTrigger {
private void abortIfOutdated(JobStatus job, List<DeploymentStatus.Job> jobs) {
job.lastTriggered()
- .filter(last -> ! last.hasEnded() && last.reason().isEmpty())
+ .filter(last -> ! last.hasEnded() && last.reason().reason().isEmpty())
.ifPresent(last -> {
if (jobs.stream().noneMatch(versions -> versions.versions().targetsMatch(last.versions())
&& versions.versions().sourcesMatchIfPresent(last.versions()))) {
@@ -458,8 +456,8 @@ public class DeploymentTrigger {
// ---------- Version and job helpers ----------
- private Job deploymentJob(Instance instance, Versions versions, JobType jobType, boolean isNodeAllocationFailure, Instant availableSince) {
- return new Job(instance, versions, jobType, availableSince, isNodeAllocationFailure, instance.change().revision().isPresent());
+ private Job deploymentJob(Instance instance, Versions versions, JobType jobType, boolean isNodeAllocationFailure, Instant availableSince, Reason reason) {
+ return new Job(instance, versions, jobType, availableSince, isNodeAllocationFailure, instance.change().revision().isPresent(), reason);
}
// ---------- Data containers ----------
@@ -473,15 +471,17 @@ public class DeploymentTrigger {
private final Instant availableSince;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
+ private final Run.Reason reason;
private Job(Instance instance, Versions versions, JobType jobType, Instant availableSince,
- boolean isRetry, boolean isApplicationUpgrade) {
+ boolean isRetry, boolean isApplicationUpgrade, Run.Reason reason) {
this.instanceId = instance.id();
this.jobType = jobType;
this.versions = versions;
this.availableSince = availableSince;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
+ this.reason = reason;
}
ApplicationId applicationId() { return instanceId; }
@@ -489,6 +489,7 @@ public class DeploymentTrigger {
Instant availableSince() { return availableSince; } // TODO jvenstad: This is 95% broken now. Change.at() can restore it.
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
+ Reason reason() { return reason; }
@Override
public String toString() {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 5ccb59e3ebc..0773c95e1f2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -36,6 +36,7 @@ import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff;
import com.yahoo.vespa.hosted.controller.application.pkg.TestPackage;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.notification.Notification;
import com.yahoo.vespa.hosted.controller.notification.Notification.Type;
import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
@@ -698,12 +699,12 @@ public class JobController {
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
- public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
+ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Reason reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
- public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
+ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Reason reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
@@ -775,7 +776,7 @@ public class JobController {
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
- Optional.empty());
+ Reason.empty());
});
locked(id, type, __ -> {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
index 0c5fb3fb3cb..76ab154688f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
@@ -2,7 +2,9 @@
package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.Change;
import java.security.cert.X509Certificate;
import java.time.Instant;
@@ -44,13 +46,13 @@ public class Run {
private final Optional<X509Certificate> testerCertificate;
private final boolean dryRun;
private final Optional<CloudAccount> cloudAccount;
- private final Optional<String> reason;
+ private final Reason reason;
// For deserialisation only -- do not use!
public Run(RunId id, Map<Step, StepInfo> steps, Versions versions, boolean isRedeployment, Instant start, Optional<Instant> end,
Optional<Instant> sleepUntil, RunStatus status, long lastTestRecord, Instant lastVespaLogTimestamp,
Optional<Instant> noNodesDownSince, Optional<ConvergenceSummary> convergenceSummary,
- Optional<X509Certificate> testerCertificate, boolean dryRun, Optional<CloudAccount> cloudAccount, Optional<String> reason) {
+ Optional<X509Certificate> testerCertificate, boolean dryRun, Optional<CloudAccount> cloudAccount, Reason reason) {
this.id = id;
this.steps = Collections.unmodifiableMap(new EnumMap<>(steps));
this.versions = versions;
@@ -69,12 +71,12 @@ public class Run {
this.reason = reason;
}
- public static Run initial(RunId id, Versions versions, boolean isRedeployment, Instant now, JobProfile profile, Optional<String> triggeredBy) {
+ public static Run initial(RunId id, Versions versions, boolean isRedeployment, Instant now, JobProfile profile, Reason reason) {
EnumMap<Step, StepInfo> steps = new EnumMap<>(Step.class);
profile.steps().forEach(step -> steps.put(step, StepInfo.initial(step)));
return new Run(id, steps, requireNonNull(versions), isRedeployment, requireNonNull(now), Optional.empty(),
Optional.empty(), running, -1, Instant.EPOCH, Optional.empty(), Optional.empty(),
- Optional.empty(), profile == JobProfile.developmentDryRun, Optional.empty(), triggeredBy);
+ Optional.empty(), profile == JobProfile.developmentDryRun, Optional.empty(), reason);
}
/** Returns a new Run with the status of the given completed step set accordingly. */
@@ -278,7 +280,7 @@ public class Run {
public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
/** The specific reason for triggering this run, if any. This should be empty for jobs triggered bvy deployment orchestration. */
- public Optional<String> reason() {
+ public Reason reason() {
return reason;
}
@@ -342,4 +344,10 @@ public class Run {
throw new IllegalStateException("This run ended at " + end.get() + " -- it can't be further modified!");
}
+ public record Reason(Optional<String> reason, Optional<JobId> dependent, Optional<Change> change) {
+ private static final Reason empty = new Reason(Optional.empty(), Optional.empty(), Optional.empty());
+ public static Reason empty() { return empty; }
+ public static Reason because(String reason) { return new Reason(Optional.of(reason), Optional.empty(), Optional.empty()); }
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArtifactExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArtifactExpirer.java
index 3cf87b9803b..1a2fc2f71c2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArtifactExpirer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArtifactExpirer.java
@@ -103,13 +103,13 @@ public class ArtifactExpirer extends ControllerMaintainer {
else if (system == SystemName.cd)
versions.addAll(versionsForSystem(SystemName.main));
- log.log(FINE, "model versions in use : " + versions);
+ log.log(FINE, "model versions in use: " + versions);
return versions;
}
private Set<Version> versionsForSystem(SystemName systemName) {
- var versions = readConfigModelVersionsForSystem(systemName.name());
- log.log(FINE, "versions for system " + systemName.name() + ": " + versions);
+ var versions = readConfigModelVersionsForSystem(systemName.name().toLowerCase());
+ log.log(FINE, "model versions in use in " + systemName.name() + ": " + versions);
return versions;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
index 8a3a2a11e09..82cac1e7520 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
@@ -71,7 +71,7 @@ public class DeploymentUpgrader extends ControllerMaintainer {
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
attempts.incrementAndGet();
- controller().jobController().start(instance.id(), JobType.deploymentTo(deployment.zone()), target, true, Optional.of("automated upgrade"));
+ controller().jobController().start(instance.id(), JobType.deploymentTo(deployment.zone()), target, true, Run.Reason.because("automated upgrade"));
} catch (Exception e) {
failures.incrementAndGet();
log.log(Level.WARNING, "Failed upgrading " + deployment + " of " + instance +
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
index c95112268c5..d302df0bae3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
@@ -281,7 +281,7 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer {
.filter(c -> c.instance().isPresent())
.filter(c -> c.certificate().randomizedId().isEmpty())
.filter(c -> assignRandomizedId.with(FetchVector.Dimension.APPLICATION_ID, c.application().instance(c.instance().get()).serializedForm()).value())
- .limit(5)
+ .limit(10)
.forEach(c -> assignRandomizedId(c.application(), c.instance().get()));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 73d0bf6cad6..b1ca6c63816 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -11,11 +11,14 @@ import com.yahoo.slime.Inspector;
import com.yahoo.slime.ObjectTraverser;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
import com.yahoo.vespa.hosted.controller.deployment.Run;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.deployment.Step.Status;
@@ -101,6 +104,8 @@ class RunSerializer {
private static final String isDryRunField = "isDryRun";
private static final String cloudAccountField = "account";
private static final String reasonField = "reason";
+ private static final String dependentField = "dependent";
+ private static final String changeField = "change";
Run runFromSlime(Slime slime) {
return runFromSlime(slime.get());
@@ -147,7 +152,7 @@ class RunSerializer {
SlimeUtils.optionalString(runObject.field(testerCertificateField)).map(X509CertificateUtils::fromPem),
runObject.field(isDryRunField).valid() && runObject.field(isDryRunField).asBool(),
SlimeUtils.optionalString(runObject.field(cloudAccountField)).map(CloudAccount::from),
- SlimeUtils.optionalString(runObject.field(reasonField)));
+ reasonFrom(runObject));
}
private Versions versionsFromSlime(Inspector versionsObject, RunId id) {
@@ -241,7 +246,7 @@ class RunSerializer {
});
runObject.setBool(isDryRunField, run.isDryRun());
run.cloudAccount().ifPresent(account -> runObject.setString(cloudAccountField, account.value()));
- run.reason().ifPresent(reason -> runObject.setString(reasonField, reason));
+ toSlime(run.reason(), runObject);
}
private void toSlime(Version platformVersion, RevisionId revsion, Cursor versionsObject) {
@@ -372,4 +377,42 @@ class RunSerializer {
};
}
+ Reason reasonFrom(Inspector object) {
+ return new Reason(SlimeUtils.optionalString(object.field(reasonField)),
+ Optional.ofNullable(jobIdFrom(object.field(dependentField))),
+ Optional.ofNullable(toChange(object.field(changeField))));
+ }
+
+ void toSlime(Reason reason, Cursor object) {
+ reason.reason().ifPresent(value -> object.setString(reasonField, value));
+ reason.dependent().ifPresent(dependent -> toSlime(dependent, object.setObject(dependentField)));
+ reason.change().ifPresent(change -> toSlime(change, object.setObject(changeField)));
+ }
+
+ JobId jobIdFrom(Inspector object) {
+ if ( ! object.valid()) return null;
+ return new JobId(ApplicationId.fromSerializedForm(object.field(applicationField).asString()),
+ JobType.ofSerialized(object.field(jobTypeField).asString()));
+ }
+
+ void toSlime(JobId jobId, Cursor object) {
+ object.setString(applicationField, jobId.application().serializedForm());
+ object.setString(jobTypeField, jobId.type().serialized());
+ }
+
+ Change toChange(Inspector object) {
+ if ( ! object.valid()) return null;
+ Change change = Change.empty();
+ if (object.field(platformVersionField).valid())
+ change = change.with(Version.fromString(object.field(platformVersionField).asString()));
+ if (object.field(buildField).valid())
+ change = change.with(RevisionId.forProduction(object.field(buildField).asLong()));
+ return change;
+ }
+
+ void toSlime(Change change, Cursor object) {
+ change.platform().ifPresent(version -> object.setString(platformVersionField, version.toString()));
+ change.revision().ifPresent(revision -> object.setLong(buildField, revision.number()));
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 982e81d92b1..0edfdb51055 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -6,7 +6,6 @@ import com.yahoo.config.application.api.DeploymentSpec.ChangeBlocker;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
-import com.yahoo.restapi.MessageResponse;
import com.yahoo.restapi.SlimeJsonResponse;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Slime;
@@ -31,6 +30,7 @@ import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus.Readiness;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.deployment.JobStatus;
import com.yahoo.vespa.hosted.controller.deployment.Run;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.deployment.RunLog;
import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
import com.yahoo.vespa.hosted.controller.deployment.Step;
@@ -125,7 +125,14 @@ class JobControllerApiHandlerHelper {
Run run = jobController.run(runId);
detailsObject.setBool("active", ! run.hasEnded());
detailsObject.setString("status", nameOf(run.status()));
- run.reason().ifPresent(reason -> detailsObject.setString("reason", reason));
+ run.reason().reason().ifPresent(reason -> detailsObject.setString("reason", reason));
+ run.reason().dependent().ifPresent(dependent -> {
+ Cursor dependentObject = detailsObject.setObject("dependent");
+ dependentObject.setString("instance", dependent.application().instance().value());
+ dependentObject.setString("region", dependent.type().zone().region().value());
+ run.reason().change().flatMap(Change::platform).ifPresent(platform -> dependentObject.setString("platform", platform.toFullString()));
+ run.reason().change().flatMap(Change::revision).ifPresent(revision -> dependentObject.setLong("build", revision.number()));
+ });
try {
jobController.updateTestLog(runId);
jobController.updateVespaLog(runId);
@@ -391,18 +398,18 @@ class JobControllerApiHandlerHelper {
JobStatus jobStatus = status.jobs().get(job).get();
Cursor toRunArray = stepObject.setArray("toRun");
showDelayCause = readiness.cause() == DelayCause.paused;
- for (DeploymentStatus.Job versions : jobsToRun.getOrDefault(job, List.of())) {
+ for (DeploymentStatus.Job jobToRun : jobsToRun.getOrDefault(job, List.of())) {
boolean running = jobStatus.lastTriggered()
.map(run -> jobStatus.isRunning()
- && versions.versions().targetsMatch(run.versions())
- && (job.type().isProduction() || versions.versions().sourcesMatchIfPresent(run.versions())))
+ && jobToRun.versions().targetsMatch(run.versions())
+ && (job.type().isProduction() || jobToRun.versions().sourcesMatchIfPresent(run.versions())))
.orElse(false);
if (running)
continue; // Run will be contained in the "runs" array.
showDelayCause = true;
Cursor runObject = toRunArray.addObject();
- toSlime(runObject.setObject("versions"), versions.versions(), application);
+ toSlime(runObject, jobToRun.versions(), jobToRun.reason(), application);
}
if ( ! jobStatus.runs().isEmpty())
@@ -411,6 +418,7 @@ class JobControllerApiHandlerHelper {
status.application().deploymentSpec())
.ifPresent(cloudAccount -> stepObject.setObject("enclave").setString("cloudAccount", cloudAccount.value()));
+
toSlime(stepObject.setArray("runs"), jobStatus.runs().descendingMap().values(), application, 10, baseUriForJob);
}
stepObject.setString("delayCause",
@@ -490,6 +498,18 @@ class JobControllerApiHandlerHelper {
return candidates;
}
+ private static void toSlime(Cursor runObject, Versions versions, Reason reason, Application application) {
+ reason.reason().ifPresent(because -> runObject.setString("reason", because));
+ reason.dependent().ifPresent(dependent -> {
+ Cursor dependentObject = runObject.setObject("dependent");
+ dependentObject.setString("instance", dependent.application().instance().value());
+ dependentObject.setString("region", dependent.type().zone().region().value());
+ reason.change().flatMap(Change::platform).ifPresent(platform -> dependentObject.setString("platform", platform.toFullString()));
+ reason.change().flatMap(Change::revision).ifPresent(revision -> dependentObject.setLong("build", revision.number()));
+ });
+ toSlime(runObject.setObject("versions"), versions, application);
+ }
+
private static void toSlime(Cursor runsArray, Collection<Run> runs, Application application, int limit, URI baseUriForJob) {
runs.stream().limit(limit).forEach(run -> {
Cursor runObject = runsArray.addObject();
@@ -498,8 +518,7 @@ class JobControllerApiHandlerHelper {
runObject.setLong("start", run.start().toEpochMilli());
run.end().ifPresent(end -> runObject.setLong("end", end.toEpochMilli()));
runObject.setString("status", nameOf(run.status()));
- run.reason().ifPresent(reason -> runObject.setString("reason", reason));
- toSlime(runObject.setObject("versions"), run.versions(), application);
+ toSlime(runObject, run.versions(), run.reason(), application);
Cursor runStepsArray = runObject.setArray("steps");
run.steps().forEach((step, info) -> {
Cursor runStepObject = runStepsArray.addObject();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java
index a5efc016c68..366c28a6be0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.routing;
import com.yahoo.collections.AbstractFilteringList;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
@@ -46,6 +47,11 @@ public class RoutingPolicyList extends AbstractFilteringList<RoutingPolicy, Rout
return matching(policy -> policy.id().owner().equals(instance));
}
+ /** Returns the subset of policies applying to given cluster */
+ public RoutingPolicyList cluster(ClusterSpec.Id cluster) {
+ return matching(policy -> policy.id().cluster().equals(cluster));
+ }
+
/** Returns the subset of policies applying to given deployment */
public RoutingPolicyList deployment(DeploymentId deployment) {
return matching(policy -> policy.appliesTo(deployment));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index 0c238ea7c9d..f39374a2a89 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -73,7 +73,7 @@ public class DeploymentExpirerTest {
// Dev application expires when enough time has passed since most recent attempt
// Redeployments done by DeploymentUpgrader do not affect this
tester.clock().advance(Duration.ofDays(12).plus(Duration.ofSeconds(1)));
- tester.jobs().start(devApp.instanceId(), DeploymentContext.devUsEast1, lastRun.versions(), true, Optional.of("upgrade"));
+ tester.jobs().start(devApp.instanceId(), DeploymentContext.devUsEast1, lastRun.versions(), true, Run.Reason.because("upgrade"));
expirer.maintain();
assertEquals(0, permanentDeployments(devApp.instance()));
assertEquals(1, permanentDeployments(prodApp.instance()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
index b0fe2867ab7..3ee6c7aadc3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.deployment.JobMetrics;
import com.yahoo.vespa.hosted.controller.deployment.JobProfile;
import com.yahoo.vespa.hosted.controller.deployment.Run;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.deployment.Step.Status;
@@ -423,7 +424,7 @@ public class JobRunnerTest {
}
private void start(JobController jobs, ApplicationId id, JobType type) {
- jobs.start(id, type, versions, false, Optional.empty());
+ jobs.start(id, type, versions, false, Reason.empty());
}
public static ExecutorService inThreadExecutor() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
index cae5037ab6f..fc1a694e0f7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
@@ -7,12 +7,15 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.JobProfile;
import com.yahoo.vespa.hosted.controller.deployment.Run;
+import com.yahoo.vespa.hosted.controller.deployment.Run.Reason;
import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.deployment.StepInfo;
@@ -82,11 +85,15 @@ public class RunSerializerTest {
assertFalse(run.hasEnded());
assertEquals(running, run.status());
assertEquals(3, run.lastTestLogEntry());
- assertEquals(new Version(1, 2, 3), run.versions().targetPlatform());
+ Version version1 = new Version(1, 2, 3);
+ assertEquals(version1, run.versions().targetPlatform());
RevisionId revision1 = RevisionId.forDevelopment(123, id.job());
RevisionId revision2 = RevisionId.forProduction(122);
assertEquals(revision1, run.versions().targetRevision());
- assertEquals("because", run.reason().get());
+ assertEquals(new Reason(Optional.of("because"),
+ Optional.of(new JobId(id.application(), id.type())),
+ Optional.of(Change.of(version1).with(revision2))),
+ run.reason());
assertEquals(new Version(1, 2, 2), run.versions().sourcePlatform().get());
assertEquals(revision2, run.versions().sourceRevision().get());
assertEquals(Optional.of(new ConvergenceSummary(1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233)),
@@ -145,7 +152,8 @@ public class RunSerializerTest {
assertEquals(new String(SlimeUtils.toJsonBytes(serializer.toSlime(run).get(), false), UTF_8),
new String(SlimeUtils.toJsonBytes(serializer.toSlime(phoenix).get(), false), UTF_8));
- Run initial = Run.initial(id, run.versions(), run.isRedeployment(), run.start(), JobProfile.production, Optional.empty());
+ Run initial = Run.initial(id, run.versions(), run.isRedeployment(), run.start(), JobProfile.production,
+ new Reason(Optional.empty(), Optional.empty(), Optional.empty()));
assertEquals(initial, serializer.runFromSlime(serializer.toSlime(initial)));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
index 1216bcefab6..618a7e66c5e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
@@ -54,6 +54,14 @@
"deployedDirectly": false
}
},
- "reason": "because"
+ "reason": "because",
+ "dependent": {
+ "id": "tenant:application:default",
+ "type": "prod.us-east-3"
+ },
+ "change": {
+ "platform": "1.2.3",
+ "build": 122
+ }
}
] \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
index 19b3d5dc2d7..f49b7d9ccae 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview-2.json
@@ -167,6 +167,11 @@
"start": 14403000,
"end": 14403000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -225,6 +230,11 @@
"start": 1000,
"end": 1000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 2
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -283,6 +293,11 @@
"start": 0,
"end": 0,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -351,6 +366,11 @@
"environment": "staging",
"toRun": [
{
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -376,6 +396,11 @@
"start": 14503000,
"end": 14503000,
"status": "installationFailed",
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -457,6 +482,11 @@
"start": 14403000,
"end": 14403000,
"status": "installationFailed",
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -538,6 +568,11 @@
"start": 14403000,
"end": 14403000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -619,6 +654,11 @@
"start": 1000,
"end": 1000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 2
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -700,6 +740,11 @@
"start": 0,
"end": 0,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
index 1d115049b35..617fb48a281 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
@@ -79,6 +79,11 @@
"url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/2",
"start": 1600000000000,
"status": "running",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 4
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -137,6 +142,11 @@
"start": 1600000000000,
"end": 1600000000000,
"status": "success",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -209,6 +219,11 @@
"url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/job/staging-test/run/2",
"start": 1600000000000,
"status": "running",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 4
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -283,6 +298,11 @@
"start": 1600000000000,
"end": 1600000000000,
"status": "success",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
index 9d82ed97849..ef9c8a608ab 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
@@ -5,11 +5,11 @@
"steps": [
{
"type": "instance",
- "dependencies": [],
+ "dependencies": [ ],
"declared": true,
"instance": "default",
"readyAt": 0,
- "deploying": {},
+ "deploying": { },
"latestVersions": {
"platform": {
"platform": "6.1.0",
@@ -21,7 +21,7 @@
"upgrade": false
}
],
- "blockers": []
+ "blockers": [ ]
},
"application": {
"application": {
@@ -42,21 +42,21 @@
}
}
],
- "blockers": []
+ "blockers": [ ]
}
},
"delayCause": null
},
{
"type": "test",
- "dependencies": [],
+ "dependencies": [ ],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "staging-test",
"url": "https://some.url:43/instance/default/job/staging-test",
"environment": "staging",
- "toRun": [],
+ "toRun": [ ],
"enclave": {
"cloudAccount": "aws:123456789012"
},
@@ -67,6 +67,11 @@
"start": 1600000000000,
"end": 1600000000000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "aws-us-east-1c",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -140,14 +145,14 @@
},
{
"type": "test",
- "dependencies": [],
+ "dependencies": [ ],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "system-test",
"url": "https://some.url:43/instance/default/job/system-test",
"environment": "test",
- "toRun": [],
+ "toRun": [ ],
"enclave": {
"cloudAccount": "aws:123456789012"
},
@@ -158,6 +163,11 @@
"start": 1600000000000,
"end": 1600000000000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "aws-us-east-1c",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -215,7 +225,11 @@
},
{
"type": "deployment",
- "dependencies": [0, 1, 2],
+ "dependencies": [
+ 0,
+ 1,
+ 2
+ ],
"declared": true,
"instance": "default",
"readyAt": 1600000000000,
@@ -230,7 +244,7 @@
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1"
},
- "toRun": [],
+ "toRun": [ ],
"enclave": {
"cloudAccount": "aws:123456789012"
},
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-runs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-runs.json
index 6c966f0de4d..6509611c3b3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-runs.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-runs.json
@@ -6,6 +6,11 @@
"start": 14503000,
"end": 14503000,
"status": "installationFailed",
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -87,6 +92,11 @@
"start": 14403000,
"end": 14403000,
"status": "installationFailed",
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -168,6 +178,11 @@
"start": 14403000,
"end": 14403000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 3
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -249,6 +264,11 @@
"start": 1000,
"end": 1000,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 2
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -330,6 +350,11 @@
"start": 0,
"end": 0,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
index ae44c851dc0..e825ca1f6ad 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/staging-test-log.json
@@ -1,6 +1,11 @@
{
"active": false,
"status": "installationFailed",
+ "dependent": {
+ "instance": "default",
+ "region": "us-east-3",
+ "build": 3
+ },
"log": {
"deployTester": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
index c79dcc99ecf..bb8024ff3ca 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-details.json
@@ -1,6 +1,11 @@
{
"active": false,
"status": "success",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 1
+ },
"log": {
"deployTester": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-job.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-job.json
index 1ac4658ce10..0d6ae341be4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-job.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-job.json
@@ -5,6 +5,11 @@
"url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/job/system-test/run/2",
"start": 1600000000000,
"status": "running",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 4
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
@@ -63,6 +68,11 @@
"start": 1600000000000,
"end": 1600000000000,
"status": "success",
+ "dependent": {
+ "instance": "instance1",
+ "region": "us-central-1",
+ "build": 1
+ },
"versions": {
"targetPlatform": "6.1.0",
"targetApplication": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
index 830512f2fcd..17f158ac9fc 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/system-test-log.json
@@ -1,6 +1,11 @@
{
"active": false,
"status": "success",
+ "dependent": {
+ "instance": "default",
+ "region": "us-central-1",
+ "build": 1
+ },
"log": {
"deployTester": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index d029987707f..b9da87771c0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -22,6 +22,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.LoadBalancer;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record.Type;
@@ -1091,6 +1092,7 @@ public class RoutingPoliciesTest {
assertEquals(2, generated.cluster(cluster1).size());
assertEquals(1, generated.cluster(cluster1).authMethod(AuthMethod.token).size());
}
+ Map<DeploymentId, Set<ContainerEndpoint>> containerEndpointsInProd = tester.containerEndpoints(Environment.prod);
// Ordinary endpoints point to expected targets
tester.assertTargets(context.instanceId(), EndpointId.of("foo"), cluster0, 0,
@@ -1109,6 +1111,7 @@ public class RoutingPoliciesTest {
// Next deployment does not change generated names
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
assertEquals(expectedRecords, tester.recordNames());
+ assertEquals(containerEndpointsInProd, tester.containerEndpoints(Environment.prod));
}
private void addCertificateToPool(String id, UnassignedCertificate.State state, RoutingPoliciesTester tester) {
@@ -1200,6 +1203,12 @@ public class RoutingPoliciesTest {
}
}
+ public Map<DeploymentId, Set<ContainerEndpoint>> containerEndpoints(Environment environment) {
+ return tester.controllerTester().configServer().containerEndpoints().entrySet().stream()
+ .filter(kv -> kv.getKey().zoneId().environment() == environment)
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+ }
+
public RoutingPolicies routingPolicies() {
return tester.controllerTester().controller().routing().policies();
}
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 5ac932d0ee2..113426eeae6 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -84,7 +84,7 @@
<commons-io.vespa.version>2.13.0</commons-io.vespa.version>
<commons-lang3.vespa.version>3.13.0</commons-lang3.vespa.version>
<commons.math3.vespa.version>3.6.1</commons.math3.vespa.version>
- <commons-compress.vespa.version>1.23.0</commons-compress.vespa.version>
+ <commons-compress.vespa.version>1.24.0</commons-compress.vespa.version>
<curator.vespa.version>5.5.0</curator.vespa.version>
<dropwizard.metrics.vespa.version>4.2.19</dropwizard.metrics.vespa.version>
<eclipse-collections.vespa.version>11.1.0</eclipse-collections.vespa.version>
@@ -120,12 +120,12 @@
<org.json.vespa.version>20230618</org.json.vespa.version>
<org.lz4.vespa.version>1.8.0</org.lz4.vespa.version>
<prometheus.client.vespa.version>0.16.0</prometheus.client.vespa.version>
- <protobuf.vespa.version>3.24.2</protobuf.vespa.version>
+ <protobuf.vespa.version>3.24.3</protobuf.vespa.version>
<questdb.vespa.version>7.3.1</questdb.vespa.version>
<spifly.vespa.version>1.3.6</spifly.vespa.version>
<snappy.vespa.version>1.1.10.3</snappy.vespa.version>
<surefire.vespa.version>3.1.2</surefire.vespa.version>
- <wiremock.vespa.version>3.0.2</wiremock.vespa.version>
+ <wiremock.vespa.version>3.0.4</wiremock.vespa.version>
<xerces.vespa.version>2.12.2</xerces.vespa.version>
<zero-allocation-hashing.vespa.version>0.16</zero-allocation-hashing.vespa.version>
<zookeeper.client.vespa.version>3.8.0</zookeeper.client.vespa.version>
@@ -144,7 +144,7 @@
<maven-core.vespa.version>3.9.4</maven-core.vespa.version>
<maven-dependency-plugin.vespa.version>3.6.0</maven-dependency-plugin.vespa.version>
<maven-deploy-plugin.vespa.version>3.1.1</maven-deploy-plugin.vespa.version>
- <maven-enforcer-plugin.vespa.version>3.4.0</maven-enforcer-plugin.vespa.version>
+ <maven-enforcer-plugin.vespa.version>3.4.1</maven-enforcer-plugin.vespa.version>
<maven-failsafe-plugin.vespa.version>3.1.2</maven-failsafe-plugin.vespa.version>
<maven-install-plugin.vespa.version>3.1.1</maven-install-plugin.vespa.version>
<maven-jar-plugin.vespa.version>3.3.0</maven-jar-plugin.vespa.version>
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 7351ed3a74b..868580eb852 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -42,179 +42,80 @@ License: Commercial
URL: http://vespa.ai
Source0: vespa-%{version}.tar.gz
-%if 0%{?centos} || 0%{?rocky} || 0%{?oraclelinux}
-BuildRequires: epel-release
-%endif
+BuildRequires: vespa-build-dependencies = 1.0.1
+
+Requires: %{name}-base = %{version}-%{release}
+Requires: %{name}-base-libs = %{version}-%{release}
+Requires: %{name}-config-model-fat = %{version}-%{release}
+Requires: %{name}-clients = %{version}-%{release}
+Requires: %{name}-jars = %{version}-%{release}
+Requires: %{name}-libs = %{version}-%{release}
+Requires: %{name}-malloc = %{version}-%{release}
+Requires: %{name}-tools = %{version}-%{release}
+
+Requires: gdb
+Requires: initscripts
+Requires: hostname
+Requires: libedit
+Requires: nc
+Requires: net-tools
+Requires: nghttp2
+Requires: numactl
+Requires: which
+Requires: unzip
+Requires: zlib
+Requires: zstd
+
%if 0%{?el8}
%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
-BuildRequires: gcc-toolset-12-gcc-c++
-BuildRequires: gcc-toolset-12-binutils
-BuildRequires: gcc-toolset-12-libatomic-devel
-%define _devtoolset_enable /opt/rh/gcc-toolset-12/enable
-BuildRequires: maven
-BuildRequires: maven-openjdk17
-BuildRequires: vespa-pybind11-devel
-BuildRequires: python39-devel
-BuildRequires: python39-pip
-BuildRequires: glibc-langpack-en
-%endif
-%if 0%{?el9}
-%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
-BuildRequires: gcc-toolset-12-gcc-c++
-BuildRequires: gcc-toolset-12-binutils
-BuildRequires: gcc-toolset-12-libatomic-devel
%define _devtoolset_enable /opt/rh/gcc-toolset-12/enable
-BuildRequires: pybind11-devel
-BuildRequires: python3-pytest
-BuildRequires: python3-devel
-BuildRequires: glibc-langpack-en
-%endif
-%if 0%{?fedora}
-BuildRequires: gcc-c++
-BuildRequires: libatomic
-BuildRequires: pybind11-devel
-BuildRequires: python3-pytest
-BuildRequires: python3-devel
-BuildRequires: glibc-langpack-en
-%endif
-%if 0%{?el8}
-BuildRequires: cmake >= 3.11.4-3
+
+%define _use_vespa_gtest 1
+%define _use_vespa_openblas 1
+%define _use_vespa_openssl 1
+%define _use_vespa_protobuf 1
+
%if 0%{?centos} || 0%{?rocky} || 0%{?oraclelinux}
-%if 0%{?centos}
-# Current cmake on CentOS 8 is broken and manually requires libarchive install
-BuildRequires: libarchive
-%endif
%define _command_cmake cmake
%endif
-BuildRequires: llvm-devel
-BuildRequires: vespa-boost-devel >= 1.76.0-1
-BuildRequires: vespa-openssl-devel >= 1.1.1o-1
-%define _use_vespa_openssl 1
-BuildRequires: vespa-gtest = 1.13.0
-%define _use_vespa_gtest 1
-BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.15.1
-BuildRequires: vespa-protobuf-devel = 3.21.12
-%define _use_vespa_protobuf 1
-BuildRequires: vespa-libzstd-devel >= 1.5.4-1
+
+Requires: vespa-gtest = 1.13.0
+
%endif
+
%if 0%{?el9}
-BuildRequires: cmake >= 3.20.2
-BuildRequires: maven
-BuildRequires: maven-openjdk17
-BuildRequires: openssl-devel
-BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.15.1
-BuildRequires: vespa-libzstd-devel >= 1.5.4-1
-BuildRequires: vespa-protobuf-devel = 3.21.12
+%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
+%define _devtoolset_enable /opt/rh/gcc-toolset-12/enable
%define _use_vespa_protobuf 1
-BuildRequires: llvm-devel
-BuildRequires: boost-devel >= 1.75
-BuildRequires: gtest-devel
-BuildRequires: gmock-devel
+
+Requires: gtest
%endif
-%if 0%{?fedora}
-BuildRequires: cmake >= 3.9.1
-BuildRequires: maven
+
%if 0%{?amzn2023}
-BuildRequires: maven-amazon-corretto17
%define _java_home /usr/lib/jvm/java-17-amazon-corretto
-%else
-%if %{?fedora} >= 35
-BuildRequires: maven-openjdk17
-%endif
-%endif
-BuildRequires: openssl-devel
-BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.15.1
-BuildRequires: vespa-libzstd-devel >= 1.5.4-1
-BuildRequires: protobuf-devel
-BuildRequires: llvm-devel
-BuildRequires: boost-devel
-BuildRequires: gtest-devel
-BuildRequires: gmock-devel
-%endif
-%if 0%{?amzn2023}
-BuildRequires: vespa-xxhash-devel >= 0.8.1
+%define _use_vespa_re2 1
%define _use_vespa_xxhash 1
-%else
-BuildRequires: xxhash-devel >= 0.8.1
-%endif
-%if 0%{?el8}
-BuildRequires: vespa-openblas-devel >= 0.3.21
-%define _use_vespa_openblas 1
-%else
-BuildRequires: openblas-devel
+
+Requires: vespa-xxhash >= 0.8.1
%endif
-%if 0%{?amzn2023}
-BuildRequires: vespa-re2-devel = 20210801
-%define _use_vespa_re2 1
-%else
-BuildRequires: re2-devel
+
+%if 0%{?fedora}
+Requires: gtest
%endif
-BuildRequires: zlib-devel
-BuildRequires: libicu-devel
-%if 0%{?amzn2023}
-BuildRequires: java-17-amazon-corretto-devel
-BuildRequires: java-17-amazon-corretto
-%else
-BuildRequires: java-17-openjdk-devel
-%endif
-BuildRequires: rpm-build
-BuildRequires: make
-BuildRequires: git
-BuildRequires: golang
-BuildRequires: systemd
-BuildRequires: flex >= 2.5.0
-BuildRequires: bison >= 3.0.0
-BuildRequires: libedit-devel
-Requires: libedit
-Requires: which
-Requires: initscripts
+
%if ! 0%{?el9}
Requires: libcgroup-tools
%endif
-Requires: numactl
-BuildRequires: perl
-BuildRequires: valgrind
-BuildRequires: perf
-%if 0%{?amzn2023}
-Requires: vespa-xxhash >= 0.8.1
-%else
+
+%if ! 0%{?amzn2023}
Requires: xxhash-libs >= 0.8.1
%endif
-Requires: gdb
-Requires: hostname
-Requires: nc
-Requires: nghttp2
-Requires: net-tools
-Requires: unzip
-Requires: zlib
-Requires: zstd
-%if 0%{?el8}
-Requires: vespa-gtest = 1.13.0
-%endif
-%if 0%{?el9}
-Requires: gtest
-%endif
-%if 0%{?fedora}
-Requires: gtest
-%endif
-Requires: %{name}-base = %{version}-%{release}
-Requires: %{name}-base-libs = %{version}-%{release}
-Requires: %{name}-libs = %{version}-%{release}
-Requires: %{name}-clients = %{version}-%{release}
-Requires: %{name}-config-model-fat = %{version}-%{release}
-Requires: %{name}-jars = %{version}-%{release}
-Requires: %{name}-malloc = %{version}-%{release}
-Requires: %{name}-tools = %{version}-%{release}
# Ugly workaround because vespamalloc/src/vespamalloc/malloc/mmap.cpp uses the private
# _dl_sym function.
# Exclude automated requires for libraries in /opt/vespa-deps/lib64.
%global __requires_exclude ^lib(c\\.so\\.6\\(GLIBC_PRIVATE\\)|pthread\\.so\\.0\\(GLIBC_PRIVATE\\)|(lz4%{?_use_vespa_protobuf:|protobuf}|zstd|onnxruntime%{?_use_vespa_openssl:|crypto|ssl}%{?_use_vespa_openblas:|openblas}%{?_use_vespa_re2:|re2}%{?_use_vespa_xxhash:|xxhash}%{?_use_vespa_gtest:|(gtest|gmock)(_main)?})\\.so\\.[0-9.]*\\([A-Za-z._0-9]*\\))\\(64bit\\)$
-
%description
Vespa - The open big data serving engine
@@ -229,8 +130,6 @@ Requires: java-17-amazon-corretto
%else
Requires: java-17-openjdk-devel
%endif
-BuildRequires: perl
-BuildRequires: perl-Getopt-Long
Requires(pre): shadow-utils
%description base
@@ -250,7 +149,7 @@ Requires: vespa-xxhash >= 0.8.1
Requires: xxhash-libs >= 0.8.1
%endif
%if 0%{?el8}
-Requires: vespa-openssl >= 1.1.1o-1
+Requires: vespa-openssl >= 3.1.2
%else
Requires: openssl-libs
%endif
@@ -281,7 +180,7 @@ Summary: Vespa - The open big data serving engine - C++ libraries
Requires: %{name}-base-libs = %{version}-%{release}
Requires: libicu
%if 0%{?el8}
-Requires: vespa-openssl >= 1.1.1o-1
+Requires: vespa-openssl >= 3.1.2
%else
Requires: openssl-libs
%endif
@@ -370,6 +269,17 @@ Requires: perf
Vespa - The open big data serving engine - tools for system tests
+%package devel
+
+Summary: Vespa - The open big data serving engine - devel package
+
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-base-libs = %{version}-%{release}
+
+%description devel
+
+Vespa - The open big data serving engine - devel package
+
%package ann-benchmark
Summary: Vespa - The open big data serving engine - ann-benchmark
@@ -600,7 +510,6 @@ fi
%{_prefix}/etc/systemd
%{_prefix}/etc/vespa
%exclude %{_prefix}/etc/vespamalloc.conf
-%{_prefix}/include
%dir %{_prefix}/lib
%dir %{_prefix}/lib/jars
%{_prefix}/lib/jars/cloud-tenant-cd-jar-with-dependencies.jar
@@ -644,6 +553,7 @@ fi
%{_prefix}/man
%{_prefix}/sbin
%{_prefix}/share
+%exclude %{_prefix}/share/cmake
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/var
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/var/crash
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/var/db
@@ -847,6 +757,12 @@ fi
%{_prefix}/bin/vespa-tensor-conformance
%{_prefix}/bin/vespa-tensor-instructions-benchmark
+%files devel
+%defattr(-,root,root,-)
+%dir %{_prefix}
+%{_prefix}/include
+%{_prefix}/share/cmake
+
%files ann-benchmark
%if %{_defattr_is_vespa_vespa}
%defattr(-,%{_vespa_user},%{_vespa_group},-)
diff --git a/document/src/main/java/com/yahoo/document/json/JsonWriter.java b/document/src/main/java/com/yahoo/document/json/JsonWriter.java
index 29d44dad761..725dcfc6167 100644
--- a/document/src/main/java/com/yahoo/document/json/JsonWriter.java
+++ b/document/src/main/java/com/yahoo/document/json/JsonWriter.java
@@ -146,8 +146,6 @@ public class JsonWriter implements DocumentWriter {
fieldNameIfNotNull(generator, field);
generator.writeStartObject();
- // this makes it impossible to refeed directly, not sure what's correct
- // perhaps just change to "put"?
generator.writeStringField("id", value.getId().toString());
writeFields(value);
diff --git a/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp b/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp
index 9851e209ba5..7faf7d57738 100644
--- a/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp
+++ b/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp
@@ -13,7 +13,7 @@ ValueType type(const vespalib::string &type_spec) {
TEST(DenseJoinReducePlanTest, make_trivial_plan) {
auto plan = DenseJoinReducePlan(type("double"), type("double"), type("double"));
- EXPECT_TRUE(plan.distinct_result());
+ EXPECT_TRUE(plan.is_distinct());
EXPECT_EQ(plan.lhs_size, 1);
EXPECT_EQ(plan.rhs_size, 1);
EXPECT_EQ(plan.res_size, 1);
@@ -39,7 +39,7 @@ TEST(DenseJoinReducePlanTest, make_simple_plan) {
SmallVector<size_t> expect_lhs_stride = {1,0};
SmallVector<size_t> expect_rhs_stride = {0,1};
SmallVector<size_t> expect_res_stride = {1,0};
- EXPECT_FALSE(plan.distinct_result());
+ EXPECT_FALSE(plan.is_distinct());
EXPECT_EQ(plan.lhs_size, 2);
EXPECT_EQ(plan.rhs_size, 3);
EXPECT_EQ(plan.res_size, 2);
@@ -69,7 +69,7 @@ TEST(DenseJoinReducePlanTest, make_distinct_plan) {
SmallVector<size_t> expect_lhs_stride = {1,0};
SmallVector<size_t> expect_rhs_stride = {0,1};
SmallVector<size_t> expect_res_stride = {3,1};
- EXPECT_TRUE(plan.distinct_result());
+ EXPECT_TRUE(plan.is_distinct());
EXPECT_EQ(plan.lhs_size, 2);
EXPECT_EQ(plan.rhs_size, 3);
EXPECT_EQ(plan.res_size, 6);
@@ -88,7 +88,7 @@ TEST(DenseJoinReducePlanTest, make_complex_plan) {
SmallVector<size_t> expect_lhs_stride = {6,0,2,1};
SmallVector<size_t> expect_rhs_stride = {4,1,0,0};
SmallVector<size_t> expect_res_stride = {12,3,1,0};
- EXPECT_FALSE(plan.distinct_result());
+ EXPECT_FALSE(plan.is_distinct());
EXPECT_EQ(plan.lhs_size, 180);
EXPECT_EQ(plan.rhs_size, 120);
EXPECT_EQ(plan.res_size, 360);
diff --git a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
index 6c0726dab37..e1967f012cb 100644
--- a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
+++ b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
@@ -27,31 +27,7 @@ using vespalib::make_string_short::fmt;
const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
bool bench = false;
double budget = 1.0;
-
-GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 33.0) / 16.0; };
-
-GenSpec G() { return GenSpec().seq(N_16ths); }
-
-const std::vector<GenSpec> layouts = {
- G(), G(),
- G().idx("x", 5), G().idx("x", 5),
- G().idx("x", 5), G().idx("y", 5),
- G().idx("x", 5), G().idx("x", 5).idx("y", 5),
- G().idx("y", 3), G().idx("x", 2).idx("z", 3),
- G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
- G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
- G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
- G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
- G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
- G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
- G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
- G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo", "bar"}).idx("z", 7),
- G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
-};
-
-const std::vector<std::vector<vespalib::string>> reductions = {
- {}, {"x"}, {"y"}, {"z"}, {"x", "y"}, {"x", "z"}, {"y", "z"}
-};
+size_t verify_cnt = 0;
std::vector<std::string> ns_list = {
{"vespalib::eval::instruction::(anonymous namespace)::"},
@@ -76,14 +52,19 @@ std::string strip_ns(const vespalib::string &str) {
return tmp;
}
-TensorSpec make_spec(const vespalib::string &param_name, size_t idx) {
- return GenSpec::from_desc(param_name).cells_double().seq(N(1 + idx));
+using select_cell_type_t = std::function<CellType(size_t idx)>;
+CellType always_double(size_t) { return CellType::DOUBLE; }
+select_cell_type_t select(CellType lct) { return [lct](size_t)noexcept{ return lct; }; }
+select_cell_type_t select(CellType lct, CellType rct) { return [lct,rct](size_t idx)noexcept{ return idx ? rct : lct; }; }
+
+TensorSpec make_spec(const vespalib::string &param_name, size_t idx, select_cell_type_t select_cell_type) {
+ return GenSpec::from_desc(param_name).cells(select_cell_type(idx)).seq(N(1 + idx));
}
-TensorSpec eval_ref(const Function &fun) {
+TensorSpec eval_ref(const Function &fun, select_cell_type_t select_cell_type) {
std::vector<TensorSpec> params;
for (size_t i = 0; i < fun.num_params(); ++i) {
- params.push_back(make_spec(fun.param_name(i), i));
+ params.push_back(make_spec(fun.param_name(i), i, select_cell_type));
}
return ReferenceEvaluation::eval(fun, params);
}
@@ -134,19 +115,58 @@ Optimize universal_only() {
return Optimize::specific("universal_only", my_optimizer);
}
+void verify(const vespalib::string &expr, select_cell_type_t select_cell_type) {
+ ++verify_cnt;
+ auto fun = Function::parse(expr);
+ ASSERT_FALSE(fun->has_error());
+ std::vector<Value::UP> values;
+ for (size_t i = 0; i < fun->num_params(); ++i) {
+ auto value = value_from_spec(make_spec(fun->param_name(i), i, select_cell_type), prod_factory);
+ values.push_back(std::move(value));
+ }
+ SimpleObjectParams params({});
+ std::vector<ValueType> param_types;
+ for (auto &&up: values) {
+ params.params.emplace_back(*up);
+ param_types.push_back(up->type());
+ }
+ NodeTypes node_types(*fun, param_types);
+ const ValueType &expected_type = node_types.get_type(fun->root());
+ ASSERT_FALSE(expected_type.is_error());
+ Stash stash;
+ size_t count = 0;
+ const TensorFunction &plain_fun = make_tensor_function(prod_factory, fun->root(), node_types, stash);
+ const TensorFunction &optimized = apply_tensor_function_optimizer(plain_fun, universal_only().optimizer, stash, &count);
+ ASSERT_GT(count, 0);
+ InterpretedFunction ifun(prod_factory, optimized);
+ InterpretedFunction::Context ctx(ifun);
+ const Value &actual = ifun.eval(ctx, params);
+ EXPECT_EQ(actual.type(), expected_type);
+ EXPECT_EQ(actual.cells().type, expected_type.cell_type());
+ if (expected_type.count_mapped_dimensions() == 0) {
+ EXPECT_EQ(actual.index().size(), TrivialIndex::get().size());
+ EXPECT_EQ(actual.cells().size, expected_type.dense_subspace_size());
+ } else {
+ EXPECT_EQ(actual.cells().size, actual.index().size() * expected_type.dense_subspace_size());
+ }
+ auto expected = eval_ref(*fun, select_cell_type);
+ EXPECT_EQ(spec_from_value(actual), expected);
+}
+void verify(const vespalib::string &expr) { verify(expr, always_double); }
+
using cost_list_t = std::vector<std::pair<vespalib::string,double>>;
std::vector<std::pair<vespalib::string,cost_list_t>> benchmark_results;
void benchmark(const vespalib::string &expr, std::vector<Optimize> list) {
+ verify(expr);
auto fun = Function::parse(expr);
ASSERT_FALSE(fun->has_error());
- auto expected = eval_ref(*fun);
cost_list_t cost_list;
fprintf(stderr, "BENCH: %s\n", expr.c_str());
for (Optimize &optimize: list) {
std::vector<Value::UP> values;
for (size_t i = 0; i < fun->num_params(); ++i) {
- auto value = value_from_spec(make_spec(fun->param_name(i), i), prod_factory);
+ auto value = value_from_spec(make_spec(fun->param_name(i), i, always_double), prod_factory);
values.push_back(std::move(value));
}
SimpleObjectParams params({});
@@ -181,8 +201,6 @@ void benchmark(const vespalib::string &expr, std::vector<Optimize> list) {
InterpretedFunction ifun(prod_factory, *optimized, &ctf_meta);
InterpretedFunction::ProfiledContext pctx(ifun);
ASSERT_EQ(ctf_meta.steps.size(), ifun.program_size());
- EXPECT_EQ(spec_from_value(ifun.eval(pctx.context, params)), expected);
- EXPECT_EQ(spec_from_value(ifun.eval(pctx, params)), expected);
std::vector<duration> prev_time(ctf_meta.steps.size(), duration::zero());
std::vector<duration> min_time(ctf_meta.steps.size(), duration::max());
BenchmarkTimer timer(budget);
@@ -214,47 +232,98 @@ void benchmark(const vespalib::string &expr, std::vector<Optimize> list) {
benchmark_results.emplace_back(expr, std::move(cost_list));
}
-TensorSpec perform_dot_product(const TensorSpec &a, const TensorSpec &b, const std::vector<vespalib::string> &dims)
-{
- Stash stash;
- auto lhs = value_from_spec(a, prod_factory);
- auto rhs = value_from_spec(b, prod_factory);
- auto res_type = ValueType::join(lhs->type(), rhs->type()).reduce(dims);
- EXPECT_FALSE(res_type.is_error());
- UniversalDotProduct dot_product(res_type,
- tensor_function::inject(lhs->type(), 0, stash),
- tensor_function::inject(rhs->type(), 1, stash));
- auto my_op = dot_product.compile_self(prod_factory, stash);
- InterpretedFunction::EvalSingle single(prod_factory, my_op);
- return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs,*rhs})));
+TEST(UniversalDotProductTest, test_select_cell_types) {
+ auto always = always_double;
+ EXPECT_EQ(always(0), CellType::DOUBLE);
+ EXPECT_EQ(always(1), CellType::DOUBLE);
+ EXPECT_EQ(always(0), CellType::DOUBLE);
+ EXPECT_EQ(always(1), CellType::DOUBLE);
+ for (CellType lct: CellTypeUtils::list_types()) {
+ auto sel1 = select(lct);
+ EXPECT_EQ(sel1(0), lct);
+ EXPECT_EQ(sel1(1), lct);
+ EXPECT_EQ(sel1(0), lct);
+ EXPECT_EQ(sel1(1), lct);
+ for (CellType rct: CellTypeUtils::list_types()) {
+ auto sel2 = select(lct, rct);
+ EXPECT_EQ(sel2(0), lct);
+ EXPECT_EQ(sel2(1), rct);
+ EXPECT_EQ(sel2(0), lct);
+ EXPECT_EQ(sel2(1), rct);
+ }
+ }
}
-TEST(UniversalDotProductTest, generic_dot_product_works_for_various_cases) {
- size_t test_cases = 0;
- ASSERT_TRUE((layouts.size() % 2) == 0);
- for (size_t i = 0; i < layouts.size(); i += 2) {
- const auto &l = layouts[i];
- const auto &r = layouts[i+1];
- for (CellType lct : CellTypeUtils::list_types()) {
- auto lhs = l.cpy().cells(lct);
- if (lhs.bad_scalar()) continue;
- for (CellType rct : CellTypeUtils::list_types()) {
- auto rhs = r.cpy().cells(rct);
- if (rhs.bad_scalar()) continue;
- for (const std::vector<vespalib::string> &dims: reductions) {
- if (ValueType::join(lhs.type(), rhs.type()).reduce(dims).is_error()) continue;
- ++test_cases;
- SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
- auto expect = ReferenceOperations::reduce(ReferenceOperations::join(lhs, rhs, operation::Mul::f), Aggr::SUM, dims);
- auto actual = perform_dot_product(lhs, rhs, dims);
- // fprintf(stderr, "\n===\nLHS: %s\nRHS: %s\n===\nRESULT: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str(), actual.to_string().c_str());
- EXPECT_EQ(actual, expect);
- }
- }
+TEST(UniversalDotProductTest, universal_dot_product_works_for_various_cases) {
+ // forward, distinct, single
+ verify("reduce(2.0*3.0, sum)");
+
+ for (CellType lct: CellTypeUtils::list_types()) {
+ for (CellType rct: CellTypeUtils::list_types()) {
+ auto sel2 = select(lct, rct);
+ // !forward, !distinct, !single
+ verify("reduce(a4_1x8*a2_1x8,sum,a,x)", sel2);
+
+ // !forward, !distinct, single
+ verify("reduce(a4_1x8*a2_1x8,sum,a)", sel2);
+
+ // !forward, distinct, !single
+ verify("reduce(a4_1x8*a2_1x8,sum,x)", sel2);
+
+ // forward, !distinct, !single
+ verify("reduce(a4_1x8*b2_1x8,sum,b,x)", sel2);
+
+ // forward, !distinct, single
+ verify("reduce(a4_1x8*b2_1x8,sum,b)", sel2);
+
+ // forward, distinct, !single
+ verify("reduce(a4_1x8*x8,sum,x)", sel2);
}
}
- EXPECT_GT(test_cases, 500);
- fprintf(stderr, "total test cases run: %zu\n", test_cases);
+ // !forward, distinct, single
+
+ // This case is not possible since 'distinct' implies '!single' as
+ // long as we reduce anything. The only expression allowed to
+ // reduce nothing is the scalar case.
+}
+
+TEST(UniversalDotProductTest, universal_dot_product_works_with_complex_dimension_nesting) {
+ verify("reduce(a4_1b4_1c4_1x4y3z2w1*a2_1c1_1x4z2,sum,b,c,x)");
+}
+
+TEST(UniversalDotProductTest, forwarding_empty_result) {
+ verify("reduce(x0_0*y8_1,sum,y)");
+ verify("reduce(x8_1*y0_0,sum,y)");
+ verify("reduce(x0_0z16*y8_1z16,sum,y)");
+ verify("reduce(x8_1z16*y0_0z16,sum,y)");
+}
+
+TEST(UniversalDotProductTest, nonforwarding_empty_result) {
+ verify("reduce(x0_0y8*x1_1y8,sum,y)");
+ verify("reduce(x1_1y8*x0_0y8,sum,y)");
+ verify("reduce(x1_7y8z2*x1_1y8z2,sum,y)");
+}
+
+TEST(UniversalDotProductTest, forwarding_expanding_reduce) {
+ verify("reduce(5.0*y0_0,sum,y)");
+ verify("reduce(5.0*y0_0z1,sum,y)");
+ verify("reduce(z16*y0_0,sum,y)");
+ verify("reduce(x1_1*y0_0,sum,y)");
+ verify("reduce(x0_0*y1_1,sum,y)");
+ verify("reduce(x1_1z16*y0_0,sum,y)");
+ verify("reduce(x0_0z16*y1_1,sum,y)");
+}
+
+TEST(UniversalDotProductTest, nonforwarding_expanding_reduce) {
+ verify("reduce(x0_0*y1_1,sum,x,y)");
+ verify("reduce(x1_1*y0_0,sum,x,y)");
+ verify("reduce(x1_1*y0_0z1,sum,x,y)");
+ verify("reduce(x0_0y16*x1_1y16,sum,x)");
+ verify("reduce(x1_1y16*x0_0y16,sum,x)");
+ verify("reduce(x1_7*y1_1,sum,x,y)");
+ verify("reduce(x1_1*y1_7,sum,x,y)");
+ verify("reduce(x1_7y16*x1_1y16,sum,x)");
+ verify("reduce(x1_1y16*x1_7y16,sum,x)");
}
TEST(UniversalDotProductTest, bench_vector_dot_product) {
@@ -264,8 +333,11 @@ TEST(UniversalDotProductTest, bench_vector_dot_product) {
}
auto optimize_list = std::vector<Optimize>({baseline(), with_universal(), universal_only()});
- benchmark("reduce(1.0*2.0,sum)", optimize_list);
+ benchmark("reduce(2.0*3.0,sum)", optimize_list);
benchmark("reduce(5.0*x128,sum,x)", optimize_list);
+ benchmark("reduce(a1*x128,sum,x)", optimize_list);
+ benchmark("reduce(a8*x128,sum,x)", optimize_list);
+ benchmark("reduce(a1_1b8*x128,sum,x)", optimize_list);
benchmark("reduce(x16*x16,sum,x)", optimize_list);
benchmark("reduce(x768*x768,sum,x)", optimize_list);
benchmark("reduce(y64*x8y64,sum,x,y)", optimize_list);
@@ -284,8 +356,6 @@ TEST(UniversalDotProductTest, bench_vector_dot_product) {
benchmark("reduce(b64_1x8y128*x8y128,sum,y)", optimize_list);
benchmark("reduce(b64_1x128*x128,sum,b,x)", optimize_list);
benchmark("reduce(a1_1x128*a2_1b64_1x128,sum,a,x)", optimize_list);
- benchmark("reduce(x0_0*y8_1,sum,y)", optimize_list);
- benchmark("reduce(x8_1*y0_0,sum,y)", optimize_list);
size_t max_expr_size = 0;
for (const auto &[expr, cost_list]: benchmark_results) {
@@ -347,5 +417,7 @@ int main(int argc, char **argv) {
--argc;
}
::testing::InitGoogleTest(&argc, argv);
- return RUN_ALL_TESTS();
+ int result = RUN_ALL_TESTS();
+ fprintf(stderr, "verify called %zu times\n", verify_cnt);
+ return result;
}
diff --git a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp
index 20b7d3364a8..8d09abbfe15 100644
--- a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp
+++ b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp
@@ -82,7 +82,7 @@ DenseJoinReducePlan::DenseJoinReducePlan(const ValueType &lhs, const ValueType &
DenseJoinReducePlan::~DenseJoinReducePlan() = default;
bool
-DenseJoinReducePlan::distinct_result() const
+DenseJoinReducePlan::is_distinct() const
{
for (size_t stride: res_stride) {
if (stride == 0) {
diff --git a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h
index 8f9d5218630..3cf55e9ace4 100644
--- a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h
+++ b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h
@@ -21,7 +21,10 @@ struct DenseJoinReducePlan {
template <typename F> void execute(size_t lhs, size_t rhs, size_t res, const F &f) const {
run_nested_loop(lhs, rhs, res, loop_cnt, lhs_stride, rhs_stride, res_stride, f);
}
- bool distinct_result() const;
+ template <typename F> void execute_distinct(size_t lhs, size_t rhs, const F &f) const {
+ run_nested_loop(lhs, rhs, loop_cnt, lhs_stride, rhs_stride, f);
+ }
+ bool is_distinct() const;
};
} // namespace
diff --git a/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h
index 75b8d329763..7176e6ea6e9 100644
--- a/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h
+++ b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h
@@ -67,7 +67,7 @@ public:
SparseJoinReducePlan(const ValueType &lhs, const ValueType &rhs, const ValueType &res);
~SparseJoinReducePlan();
size_t res_dims() const { return _res_dims; }
- bool distinct_result() const { return _res_dims == _in_res.size(); }
+ bool is_distinct() const { return _res_dims == _in_res.size(); }
bool maybe_forward_lhs_index() const;
bool maybe_forward_rhs_index() const;
size_t estimate_result_size(const Value::Index &lhs, const Value::Index &rhs) const {
diff --git a/eval/src/vespa/eval/instruction/universal_dot_product.cpp b/eval/src/vespa/eval/instruction/universal_dot_product.cpp
index 3811508a543..414a54f09a8 100644
--- a/eval/src/vespa/eval/instruction/universal_dot_product.cpp
+++ b/eval/src/vespa/eval/instruction/universal_dot_product.cpp
@@ -42,87 +42,146 @@ struct UniversalDotProductParam {
}
};
-template <typename LCT, typename RCT, typename OCT>
-void my_universal_dot_product_op(InterpretedFunction::State &state, uint64_t param_in) {
- using dot_product = DotProduct<LCT,RCT>;
- const auto &param = unwrap_param<UniversalDotProductParam>(param_in);
- const auto &lhs = state.peek(1);
- const auto &rhs = state.peek(0);
- const auto &lhs_index = lhs.index();
- const auto &rhs_index = rhs.index();
- const auto lhs_cells = lhs.cells().typify<LCT>();
- const auto rhs_cells = rhs.cells().typify<RCT>();
- auto &stored_result = state.stash.create<std::unique_ptr<FastValue<OCT,true>>>(
- std::make_unique<FastValue<OCT,true>>(param.res_type, param.sparse_plan.res_dims(), param.dense_plan.res_size,
- param.sparse_plan.estimate_result_size(lhs_index, rhs_index)));
- auto &result = *(stored_result.get());
- ArrayRef<OCT> dst;
- auto dense_fun = [&](size_t lhs_idx, size_t rhs_idx, size_t dst_idx) {
- dst[dst_idx] += dot_product::apply(&lhs_cells[lhs_idx], &rhs_cells[rhs_idx], param.vector_size);
- };
- auto sparse_fun = [&](size_t lhs_subspace, size_t rhs_subspace, ConstArrayRef<string_id> res_addr) {
- bool first;
- std::tie(dst, first) = result.insert_subspace(res_addr);
- if (first) {
- std::fill(dst.begin(), dst.end(), OCT{});
- }
- param.dense_plan.execute(lhs_subspace * param.dense_plan.lhs_size,
- rhs_subspace * param.dense_plan.rhs_size,
- 0, dense_fun);
- };
- param.sparse_plan.execute(lhs_index, rhs_index, sparse_fun);
- state.pop_pop_push(result);
+template <typename OCT>
+const Value &create_empty_result(const UniversalDotProductParam &param, Stash &stash) {
+ if (param.sparse_plan.res_dims() == 0) {
+ auto zero_cells = stash.create_array<OCT>(param.dense_plan.res_size);
+ return stash.create<ValueView>(param.res_type, TrivialIndex::get(), TypedCells(zero_cells));
+ } else {
+ return stash.create<ValueView>(param.res_type, EmptyIndex::get(), TypedCells(nullptr, get_cell_type<OCT>(), 0));
+ }
}
-template <typename LCT, typename RCT, typename OCT>
-void my_universal_dense_dot_product_op(InterpretedFunction::State &state, uint64_t param_in) {
- using dot_product = DotProduct<LCT,RCT>;
- const auto &param = unwrap_param<UniversalDotProductParam>(param_in);
- const auto &lhs = state.peek(1);
- const auto &rhs = state.peek(0);
- size_t lhs_index_size = lhs.index().size();
- size_t rhs_index_size = rhs.index().size();
- if (rhs_index_size == 0 || lhs_index_size == 0) {
- const Value &empty = state.stash.create<ValueView>(param.res_type, EmptyIndex::get(), TypedCells(nullptr, get_cell_type<OCT>(), 0));
- state.pop_pop_push(empty);
- return;
+template <typename LCT, typename RCT, bool single> struct MyDotProduct;
+template <typename LCT, typename RCT> struct MyDotProduct<LCT, RCT, false> {
+ size_t vector_size;
+ MyDotProduct(size_t vector_size_in) : vector_size(vector_size_in) {}
+ auto operator()(const LCT *lhs, const RCT *rhs) const {
+ return DotProduct<LCT,RCT>::apply(lhs, rhs, vector_size);
+ }
+};
+template <typename LCT, typename RCT> struct MyDotProduct<LCT, RCT, true> {
+ MyDotProduct(size_t) {}
+ auto operator()(const LCT *lhs, const RCT *rhs) const {
+ return (*lhs) * (*rhs);
}
- const auto lhs_cells = lhs.cells().typify<LCT>();
- const auto rhs_cells = rhs.cells().typify<RCT>();
- auto dst_cells = state.stash.create_array<OCT>(lhs_index_size * param.dense_plan.res_size);
- auto dense_fun = [&](size_t lhs_idx, size_t rhs_idx, size_t dst_idx) {
- dst_cells[dst_idx] += dot_product::apply(&lhs_cells[lhs_idx], &rhs_cells[rhs_idx], param.vector_size);
- };
- for (size_t lhs_subspace = 0; lhs_subspace < lhs_index_size; ++lhs_subspace) {
- for (size_t rhs_subspace = 0; rhs_subspace < rhs_index_size; ++rhs_subspace) {
- param.dense_plan.execute(lhs_subspace * param.dense_plan.lhs_size,
- rhs_subspace * param.dense_plan.rhs_size,
- lhs_subspace * param.dense_plan.res_size, dense_fun);
+};
+
+template <typename LCT, typename RCT, typename OCT, bool distinct, bool single>
+struct DenseFun {
+ [[no_unique_address]] MyDotProduct<LCT,RCT,single> dot_product;
+ const LCT *lhs;
+ const RCT *rhs;
+ mutable OCT *dst;
+ DenseFun(size_t vector_size_in, const Value &lhs_in, const Value &rhs_in)
+ : dot_product(vector_size_in),
+ lhs(lhs_in.cells().typify<LCT>().data()),
+ rhs(rhs_in.cells().typify<RCT>().data()) {}
+ void operator()(size_t lhs_idx, size_t rhs_idx) const requires distinct {
+ *dst++ = dot_product(lhs + lhs_idx, rhs + rhs_idx);
+ }
+ void operator()(size_t lhs_idx, size_t rhs_idx, size_t dst_idx) const requires (!distinct) {
+ dst[dst_idx] += dot_product(lhs + lhs_idx, rhs + rhs_idx);
+ }
+};
+
+template <typename OCT, bool forward> struct Result {};
+template <typename OCT> struct Result<OCT, false> {
+ mutable FastValue<OCT,true> *fast;
+};
+
+template <typename LCT, typename RCT, typename OCT, bool forward, bool distinct, bool single>
+struct SparseFun {
+ const UniversalDotProductParam &param;
+ DenseFun<LCT,RCT,OCT,distinct,single> dense_fun;
+ [[no_unique_address]] Result<OCT, forward> result;
+ SparseFun(uint64_t param_in, const Value &lhs_in, const Value &rhs_in)
+ : param(unwrap_param<UniversalDotProductParam>(param_in)),
+ dense_fun(param.vector_size, lhs_in, rhs_in),
+ result() {}
+ void operator()(size_t lhs_subspace, size_t rhs_subspace, ConstArrayRef<string_id> res_addr) const requires (!forward && !distinct) {
+ auto [space, first] = result.fast->insert_subspace(res_addr);
+ if (first) {
+ std::fill(space.begin(), space.end(), OCT{});
+ }
+ dense_fun.dst = space.data();
+ param.dense_plan.execute(lhs_subspace * param.dense_plan.lhs_size,
+ rhs_subspace * param.dense_plan.rhs_size,
+ 0, dense_fun);
+ };
+ void operator()(size_t lhs_subspace, size_t rhs_subspace, ConstArrayRef<string_id> res_addr) const requires (!forward && distinct) {
+ dense_fun.dst = result.fast->add_subspace(res_addr).data();
+ param.dense_plan.execute_distinct(lhs_subspace * param.dense_plan.lhs_size,
+ rhs_subspace * param.dense_plan.rhs_size,
+ dense_fun);
+ };
+ void operator()(size_t lhs_subspace, size_t rhs_subspace) const requires (forward && !distinct) {
+ param.dense_plan.execute(lhs_subspace * param.dense_plan.lhs_size,
+ rhs_subspace * param.dense_plan.rhs_size,
+ lhs_subspace * param.dense_plan.res_size, dense_fun);
+ };
+ void operator()(size_t lhs_subspace, size_t rhs_subspace) const requires (forward && distinct) {
+ param.dense_plan.execute_distinct(lhs_subspace * param.dense_plan.lhs_size,
+ rhs_subspace * param.dense_plan.rhs_size, dense_fun);
+ };
+ const Value &calculate_result(const Value::Index &lhs, const Value::Index &rhs, Stash &stash) const requires (!forward) {
+ auto &stored_result = stash.create<std::unique_ptr<FastValue<OCT,true>>>(
+ std::make_unique<FastValue<OCT,true>>(param.res_type, param.sparse_plan.res_dims(), param.dense_plan.res_size,
+ param.sparse_plan.estimate_result_size(lhs, rhs)));
+ result.fast = stored_result.get();
+ param.sparse_plan.execute(lhs, rhs, *this);
+ if (result.fast->my_index.map.size() == 0 && param.sparse_plan.res_dims() == 0) {
+ auto empty = result.fast->add_subspace(ConstArrayRef<string_id>());
+ std::fill(empty.begin(), empty.end(), OCT{});
+ }
+ return *(result.fast);
+ }
+ const Value &calculate_result(const Value::Index &lhs, const Value::Index &rhs, Stash &stash) const requires forward {
+ size_t lhs_size = lhs.size();
+ size_t rhs_size = rhs.size();
+ if (lhs_size == 0 || rhs_size == 0) {
+ return create_empty_result<OCT>(param, stash);
+ }
+ auto dst_cells = (distinct)
+ ? stash.create_uninitialized_array<OCT>(lhs_size * param.dense_plan.res_size)
+ : stash.create_array<OCT>(lhs_size * param.dense_plan.res_size);
+ dense_fun.dst = dst_cells.data();
+ for (size_t lhs_idx = 0; lhs_idx < lhs_size; ++lhs_idx) {
+ for (size_t rhs_idx = 0; rhs_idx < rhs_size; ++rhs_idx) {
+ (*this)(lhs_idx, rhs_idx);
+ }
}
+ return stash.create<ValueView>(param.res_type, lhs, TypedCells(dst_cells));
}
- const Value &result = state.stash.create<ValueView>(param.res_type, lhs.index(), TypedCells(dst_cells));
- state.pop_pop_push(result);
+};
+
+template <typename LCT, typename RCT, typename OCT, bool forward, bool distinct, bool single>
+void my_universal_dot_product_op(InterpretedFunction::State &state, uint64_t param_in) {
+ SparseFun<LCT,RCT,OCT,forward,distinct,single> sparse_fun(param_in, state.peek(1), state.peek(0));
+ state.pop_pop_push(sparse_fun.calculate_result(state.peek(1).index(), state.peek(0).index(), state.stash));
}
struct SelectUniversalDotProduct {
- template <typename LCM, typename RCM, typename SCALAR> static auto invoke(const UniversalDotProductParam &param) {
+ template <typename LCM, typename RCM, typename SCALAR, typename FORWARD, typename DISTINCT, typename SINGLE>
+ static auto invoke() {
constexpr CellMeta ocm = CellMeta::join(LCM::value, RCM::value).reduce(SCALAR::value);
using LCT = CellValueType<LCM::value.cell_type>;
using RCT = CellValueType<RCM::value.cell_type>;
using OCT = CellValueType<ocm.cell_type>;
- if (param.sparse_plan.maybe_forward_lhs_index()) {
- return my_universal_dense_dot_product_op<LCT,RCT,OCT>;
+ if constexpr ((std::same_as<LCT,float> && std::same_as<RCT,float>) ||
+ (std::same_as<LCT,double> && std::same_as<RCT,double>))
+ {
+ return my_universal_dot_product_op<LCT,RCT,OCT,FORWARD::value,DISTINCT::value,SINGLE::value>;
}
- return my_universal_dot_product_op<LCT,RCT,OCT>;
+ return my_universal_dot_product_op<LCT,RCT,OCT,FORWARD::value,false,false>;
}
};
-bool check_types(const ValueType &res, const ValueType &lhs, const ValueType &rhs) {
- (void) res;
+bool check_types(const ValueType &lhs, const ValueType &rhs) {
if (lhs.is_double() || rhs.is_double()) {
return false;
}
- if (lhs.count_mapped_dimensions() > 0 || rhs.count_mapped_dimensions() > 0) {
+ if (lhs.count_mapped_dimensions() > 0 && rhs.count_mapped_dimensions() > 0) {
return true;
}
return false;
@@ -142,10 +201,12 @@ UniversalDotProduct::compile_self(const ValueBuilderFactory &, Stash &stash) con
{
auto &param = stash.create<UniversalDotProductParam>(result_type(), lhs().result_type(), rhs().result_type());
using MyTypify = TypifyValue<TypifyCellMeta,TypifyBool>;
- auto op = typify_invoke<3,MyTypify,SelectUniversalDotProduct>(lhs().result_type().cell_meta(),
+ auto op = typify_invoke<6,MyTypify,SelectUniversalDotProduct>(lhs().result_type().cell_meta(),
rhs().result_type().cell_meta(),
result_type().cell_meta().is_scalar,
- param);
+ param.sparse_plan.maybe_forward_lhs_index(),
+ param.sparse_plan.is_distinct() && param.dense_plan.is_distinct(),
+ param.vector_size == 1);
return InterpretedFunction::Instruction(op, wrap_param<UniversalDotProductParam>(param));
}
@@ -157,7 +218,7 @@ UniversalDotProduct::optimize(const TensorFunction &expr, Stash &stash, bool for
const ValueType &res_type = expr.result_type();
const ValueType &lhs_type = join->lhs().result_type();
const ValueType &rhs_type = join->rhs().result_type();
- if (force || check_types(res_type, lhs_type, rhs_type)) {
+ if (force || check_types(lhs_type, rhs_type)) {
SparseJoinReducePlan sparse_plan(lhs_type, rhs_type, res_type);
if (sparse_plan.maybe_forward_rhs_index() && !sparse_plan.maybe_forward_lhs_index()) {
return stash.create<UniversalDotProduct>(res_type, join->rhs(), join->lhs());
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index f1d639432cc..bd6d772c215 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -15,6 +15,8 @@ import java.util.function.Predicate;
import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CLOUD_ACCOUNT;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_ID;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CONSOLE_USER_EMAIL;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
@@ -330,7 +332,7 @@ public class Flags {
APPLICATION_ID
);
public static final UnboundBooleanFlag ENABLE_NESTED_MULTIVALUE_GROUPING = defineFeatureFlag(
- "enable-nested-multivalue-grouping", false,
+ "enable-nested-multivalue-grouping", true,
List.of("baldersheim"), "2023-06-29", "2023-12-31",
"Should we enable proper nested multivalue grouping",
"Takes effect at redeployment",
@@ -375,6 +377,13 @@ public class Flags {
"Whether to write application data (active session id, last deployed session id etc. ) as json",
"Takes effect immediately");
+ public static final UnboundIntFlag MIN_EXCLUSIVE_ADVERTISED_MEMORY_GB = defineIntFlag(
+ "min-exclusive-advertised-memory-gb", 4,
+ List.of("freva"), "2023-09-08", "2023-11-01",
+ "Minimum amount of advertised memory for exclusive nodes",
+ "Takes effect immediately",
+ APPLICATION_ID, CLUSTER_ID, CLUSTER_TYPE);
+
public static final UnboundBooleanFlag ASSIGN_RANDOMIZED_ID = defineFeatureFlag(
"assign-randomized-id", false,
List.of("mortent"), "2023-08-31", "2024-02-01",
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index 4b47810ea74..8d943d1d19e 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -19,7 +19,7 @@ javax.annotation:javax.annotation-api:1.2
javax.inject:javax.inject:1
org.apache-extras.beanshell:bsh:2.0b6
org.apache.commons:commons-collections4:4.4
-org.apache.commons:commons-compress:1.23.0
+org.apache.commons:commons-compress:1.24.0
org.apache.commons:commons-lang3:3.13.0
org.apache.maven:maven-archiver:3.6.1
org.apache.maven:maven-artifact:3.9.4
@@ -32,8 +32,8 @@ org.apache.maven:maven-repository-metadata:3.9.4
org.apache.maven:maven-resolver-provider:3.9.4
org.apache.maven:maven-settings:3.9.4
org.apache.maven:maven-settings-builder:3.9.4
-org.apache.maven.enforcer:enforcer-api:3.4.0
-org.apache.maven.enforcer:enforcer-rules:3.4.0
+org.apache.maven.enforcer:enforcer-api:3.4.1
+org.apache.maven.enforcer:enforcer-rules:3.4.1
org.apache.maven.plugin-tools:maven-plugin-annotations:3.9.0
org.apache.maven.plugins:maven-shade-plugin:3.5.0
org.apache.maven.resolver:maven-resolver-api:1.9.14
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
index 2cfdcd85d9e..19e27162d74 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
@@ -321,10 +321,10 @@ public class Vespa9VespaMetricSet {
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count));
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count));
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS, EnumSet.of(max, sum, count));
// Executors shared between all document dbs
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count));
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
index 1b86819ddc0..916e8c23f21 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
@@ -355,10 +355,10 @@ public class VespaMetricSet {
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count));
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, count, average));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count));
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count));
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS, EnumSet.of(max, sum, count));
// Executors shared between all document dbs
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
index 2aa1d12c491..68dab0b32fb 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
@@ -7,6 +7,7 @@ import com.yahoo.vespa.hosted.node.admin.container.image.Image;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import java.time.Duration;
@@ -48,7 +49,11 @@ public interface ContainerEngine {
CommandResult execute(NodeAgentContext context, UnixUser user, Duration timeout, String... command);
/** Execute command inside the container's network namespace. Throws on non-zero exit code */
- CommandResult executeInNetworkNamespace(NodeAgentContext context, String... command);
+ CommandResult executeInNetworkNamespace(NodeAgentContext context, CommandLine.Options options, String... command);
+
+ default CommandResult executeInNetworkNamespace(NodeAgentContext context, String... command) {
+ return executeInNetworkNamespace(context, new CommandLine.Options(), command);
+ }
/** Download given image */
void pullImage(TaskContext context, DockerImage image, RegistryCredentials registryCredentials);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
index fa933e9622a..cae47a88961 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
@@ -78,7 +78,11 @@ public class ContainerOperations {
/** Execute command in inside containers network namespace, identified by given context. Throws on non-zero exit code */
public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, String... command) {
- return containerEngine.executeInNetworkNamespace(context, command);
+ return executeCommandInNetworkNamespace(context, new CommandLine.Options(), command);
+ }
+
+ public CommandResult executeCommandInNetworkNamespace(NodeAgentContext context, CommandLine.Options options, String... command) {
+ return containerEngine.executeInNetworkNamespace(context, options, command);
}
/** Resume node. Resuming a node means that it is ready to receive traffic */
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
index e8d10805a45..1cfe73e8937 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
@@ -9,10 +9,10 @@ import com.yahoo.vespa.hosted.node.admin.task.util.file.Editor;
import com.yahoo.vespa.hosted.node.admin.task.util.file.LineEditor;
import com.yahoo.vespa.hosted.node.admin.task.util.network.IPAddresses;
import com.yahoo.vespa.hosted.node.admin.task.util.network.IPVersion;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
import java.io.IOException;
import java.net.InetAddress;
-import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
@@ -89,7 +89,7 @@ public class AclMaintainer {
private Supplier<List<String>> listTable(NodeAgentContext context, String table, IPVersion ipVersion) {
return () -> containerOperations
- .executeCommandInNetworkNamespace(context, ipVersion.iptablesCmd(), "-S", "-t", table)
+ .executeCommandInNetworkNamespace(context, new CommandLine.Options().setSilent(true), ipVersion.iptablesCmd(), "-S", "-t", table)
.mapEachLine(String::trim);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/CommandLine.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/CommandLine.java
index 2153a15e76b..3d45f515d96 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/CommandLine.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/CommandLine.java
@@ -144,6 +144,23 @@ public class CommandLine {
return doExecute();
}
+ public static class Options {
+ private boolean silent = false;
+
+ public Options() {}
+
+ /** Invoke {@link #executeSilently()} instead of {@link #execute()} (default). */
+ public Options setSilent(boolean silent) {
+ this.silent = silent;
+ return this;
+ }
+ }
+
+ /** Convenience method to bundle up a bunch of calls on this into an options object. */
+ public CommandResult execute(Options options) {
+ return options.silent ? executeSilently() : execute();
+ }
+
/**
* Record an already executed executeSilently() as having modified the system.
* For instance with YUM it is not known until after a 'yum install' whether it
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
index af869786504..28e733ac018 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.fs.ContainerPath;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.hosted.node.admin.task.util.process.TestTerminal;
@@ -158,13 +159,11 @@ public class ContainerEngineMock implements ContainerEngine {
}
@Override
- public CommandResult executeInNetworkNamespace(NodeAgentContext context, String... command) {
+ public CommandResult executeInNetworkNamespace(NodeAgentContext context, CommandLine.Options options, String... command) {
if (terminal == null) {
return new CommandResult(null, 0, "");
}
- return terminal.newCommandLine(context)
- .add(command)
- .executeSilently();
+ return terminal.newCommandLine(context).add(command).execute(options);
}
@Override
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
index 827c6ebb6ec..32e82627d9a 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextImpl;
import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath;
import com.yahoo.vespa.hosted.node.admin.task.util.network.IPAddressesMock;
import com.yahoo.vespa.hosted.node.admin.task.util.network.IPVersion;
+import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.test.file.TestFileSystem;
import org.junit.jupiter.api.BeforeEach;
@@ -64,7 +65,7 @@ public class AclMaintainerTest {
aclMaintainer.converge(context);
- verify(containerOperations, times(4)).executeCommandInNetworkNamespace(eq(context), any(), eq("-S"), eq("-t"), any());
+ verify(containerOperations, times(4)).executeCommandInNetworkNamespace(eq(context), any(CommandLine.Options.class), any(), eq("-S"), eq("-t"), any());
verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), eq("iptables-restore"), any());
verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), eq("ip6tables-restore"), any());
verifyNoMoreInteractions(containerOperations);
@@ -131,7 +132,7 @@ public class AclMaintainerTest {
aclMaintainer.converge(context);
- verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), any(), eq("-S"), eq("-t"), any());
+ verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), any(CommandLine.Options.class), any(), eq("-S"), eq("-t"), any());
verify(containerOperations, times(1)).executeCommandInNetworkNamespace(eq(context), eq("iptables-restore"), any());
verify(containerOperations, times(1)).executeCommandInNetworkNamespace(eq(context), eq("ip6tables-restore"), any());
verifyNoMoreInteractions(containerOperations);
@@ -188,7 +189,7 @@ public class AclMaintainerTest {
aclMaintainer.converge(context);
- verify(containerOperations, times(3)).executeCommandInNetworkNamespace(eq(context), any(), eq("-S"), eq("-t"), any());
+ verify(containerOperations, times(3)).executeCommandInNetworkNamespace(eq(context), any(CommandLine.Options.class), any(), eq("-S"), eq("-t"), any());
verify(containerOperations, times(1)).executeCommandInNetworkNamespace(eq(context), eq("iptables-restore"), any());
verify(containerOperations, never()).executeCommandInNetworkNamespace(eq(context), eq("ip6tables-restore"), any()); //we don't have a ip4 address for the container so no redirect
verifyNoMoreInteractions(containerOperations);
@@ -237,7 +238,7 @@ public class AclMaintainerTest {
aclMaintainer.converge(context);
- verify(containerOperations, times(3)).executeCommandInNetworkNamespace(eq(context), any(), eq("-S"), eq("-t"), any());
+ verify(containerOperations, times(3)).executeCommandInNetworkNamespace(eq(context), any(CommandLine.Options.class), any(), eq("-S"), eq("-t"), any());
verify(containerOperations, times(1)).executeCommandInNetworkNamespace(eq(context), eq("iptables-restore"), any());
verify(containerOperations, times(1)).executeCommandInNetworkNamespace(eq(context), eq("iptables"), eq("-F"), eq("-t"), eq("filter"));
verifyNoMoreInteractions(containerOperations);
@@ -271,7 +272,7 @@ public class AclMaintainerTest {
aclMaintainer.converge(context);
- verify(containerOperations, times(4)).executeCommandInNetworkNamespace(eq(context), any(), eq("-S"), eq("-t"), any());
+ verify(containerOperations, times(4)).executeCommandInNetworkNamespace(eq(context), any(CommandLine.Options.class), any(), eq("-S"), eq("-t"), any());
verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), eq("iptables-restore"), any());
verify(containerOperations, times(2)).executeCommandInNetworkNamespace(eq(context), eq("ip6tables-restore"), any());
verifyNoMoreInteractions(containerOperations);
@@ -343,7 +344,7 @@ public class AclMaintainerTest {
private void whenListRules(NodeAgentContext context, String table, IPVersion ipVersion, String output) {
when(containerOperations.executeCommandInNetworkNamespace(
- eq(context), eq(ipVersion.iptablesCmd()), eq("-S"), eq("-t"), eq(table)))
+ eq(context), any(CommandLine.Options.class), eq(ipVersion.iptablesCmd()), eq("-S"), eq("-t"), eq(table)))
.thenReturn(new CommandResult(null, 0, output));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 15913fec5ed..a7e82250275 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -310,42 +310,47 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
}
private void updateLockMetrics() {
+ Set<Pair<Metric.Context, String>> currentNonZeroMetrics = new HashSet<>();
LockStats.getGlobal().getLockMetricsByPath()
.forEach((lockPath, lockMetrics) -> {
Metric.Context context = getContext(Map.of("lockPath", lockPath));
LatencyMetrics acquireLatencyMetrics = lockMetrics.getAndResetAcquireLatencyMetrics();
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_MAX_ACTIVE_LATENCY.baseName(), acquireLatencyMetrics.maxActiveLatencySeconds(), context);
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_HZ.baseName(), acquireLatencyMetrics.startHz(), context);
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_LOAD.baseName(), acquireLatencyMetrics.load(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_MAX_ACTIVE_LATENCY.baseName(), acquireLatencyMetrics.maxActiveLatencySeconds(), context, currentNonZeroMetrics);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_HZ.baseName(), acquireLatencyMetrics.startHz(), context, currentNonZeroMetrics);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_LOAD.baseName(), acquireLatencyMetrics.load(), context, currentNonZeroMetrics);
LatencyMetrics lockedLatencyMetrics = lockMetrics.getAndResetLockedLatencyMetrics();
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LATENCY.baseName(), lockedLatencyMetrics.maxLatencySeconds(), context);
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD.baseName(), lockedLatencyMetrics.load(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LATENCY.baseName(), lockedLatencyMetrics.maxLatencySeconds(), context, currentNonZeroMetrics);
+ lockedLatencyMetrics.loadByThread().forEach((name, load) -> {
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD.baseName(), load, getContext(Map.of("lockPath", lockPath, "thread", name)), currentNonZeroMetrics);
+ });
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_TIMED_OUT.baseName(), lockMetrics.getAndResetAcquireTimedOutCount(), context);
- setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_DEADLOCK.baseName(), lockMetrics.getAndResetDeadlockCount(), context);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ACQUIRE_TIMED_OUT.baseName(), lockMetrics.getAndResetAcquireTimedOutCount(), context, currentNonZeroMetrics);
+ setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_DEADLOCK.baseName(), lockMetrics.getAndResetDeadlockCount(), context, currentNonZeroMetrics);
// bucket for various rare errors - to reduce #metrics
setNonZero(ConfigServerMetrics.LOCK_ATTEMPT_ERRORS.baseName(),
- lockMetrics.getAndResetAcquireFailedCount() +
- lockMetrics.getAndResetReleaseFailedCount() +
- lockMetrics.getAndResetNakedReleaseCount() +
- lockMetrics.getAndResetAcquireWithoutReleaseCount() +
- lockMetrics.getAndResetForeignReleaseCount(),
- context);
+ lockMetrics.getAndResetAcquireFailedCount() +
+ lockMetrics.getAndResetReleaseFailedCount() +
+ lockMetrics.getAndResetNakedReleaseCount() +
+ lockMetrics.getAndResetAcquireWithoutReleaseCount() +
+ lockMetrics.getAndResetForeignReleaseCount(),
+ context,
+ currentNonZeroMetrics);
});
+ // Need to set the metric to 0 after it has been set to non-zero, to avoid carrying a non-zero 'last' from earlier periods.
+ nonZeroMetrics.removeIf(currentNonZeroMetrics::contains); // Retain those that turned zero for this period.
+ nonZeroMetrics.forEach(metricKey -> metric.set(metricKey.getSecond(), 0, metricKey.getFirst()));
+ nonZeroMetrics.clear();
+ nonZeroMetrics.addAll(currentNonZeroMetrics);
}
- private void setNonZero(String key, Number value, Metric.Context context) {
+ private void setNonZero(String key, Number value, Metric.Context context, Set<Pair<Metric.Context, String>> nonZeroMetrics) {
var metricKey = new Pair<>(context, key);
if (Double.compare(value.doubleValue(), 0.0) != 0) {
metric.set(key, value, context);
nonZeroMetrics.add(metricKey);
- } else if (nonZeroMetrics.remove(metricKey)) {
- // Need to set the metric to 0 after it has been set to non-zero, to avoid carrying
- // a non-zero 'last' from earlier periods.
- metric.set(key, value, context);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index ac13ee992c2..132cd0e6d67 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -15,6 +15,7 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Provisioner;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.transaction.Mutex;
@@ -119,9 +120,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
if (!requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: GPU capacity cannot have ranges");
- if (!requested.minResources().nodeResources().gpuResources().isZero() && !zone.system().isPublic())
- throw new IllegalArgumentException(requested + " is invalid: GPUs are not supported in " + zone);
-
logInsufficientDiskResources(cluster, requested, logger);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 47c388f97a8..06ab9eb1a10 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -7,6 +7,9 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.IntFlag;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.util.Locale;
@@ -20,19 +23,22 @@ import java.util.Locale;
public class NodeResourceLimits {
private final NodeRepository nodeRepository;
+ private final IntFlag minExclusiveAdvertisedMemoryGbFlag;
public NodeResourceLimits(NodeRepository nodeRepository) {
this.nodeRepository = nodeRepository;
+ this.minExclusiveAdvertisedMemoryGbFlag = Flags.MIN_EXCLUSIVE_ADVERTISED_MEMORY_GB.bindTo(nodeRepository.flagSource());
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
- if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster))
- illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster));
- if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(cluster))
- illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(cluster));
- if (! requested.diskGbIsUnspecified() && requested.diskGb() < minAdvertisedDiskGb(requested, cluster.isExclusive()))
- illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
+ boolean exclusive = nodeRepository.exclusiveAllocation(cluster);
+ if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster, exclusive))
+ illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster, exclusive));
+ if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(applicationId, cluster, exclusive))
+ illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(applicationId, cluster, exclusive));
+ if (! requested.diskGbIsUnspecified() && requested.diskGb() < minAdvertisedDiskGb(requested, exclusive))
+ illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, exclusive));
}
// TODO: Remove this when we are ready to fail, not just warn on this. */
@@ -64,23 +70,28 @@ public class NodeResourceLimits {
if (followRecommendations) // TODO: Do unconditionally when we enforce this limit
requested = requested.withDiskGb(Math.max(minAdvertisedDiskGb(requested, cluster), requested.diskGb()));
- return requested.withVcpu(Math.max(minAdvertisedVcpu(applicationId, cluster), requested.vcpu()))
- .withMemoryGb(Math.max(minAdvertisedMemoryGb(cluster), requested.memoryGb()))
+ return requested.withVcpu(Math.max(minAdvertisedVcpu(applicationId, cluster, exclusive), requested.vcpu()))
+ .withMemoryGb(Math.max(minAdvertisedMemoryGb(applicationId, cluster, exclusive), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
}
- private double minAdvertisedVcpu(ApplicationId applicationId, ClusterSpec cluster) {
+ private double minAdvertisedVcpu(ApplicationId applicationId, ClusterSpec cluster, boolean exclusive) {
if (cluster.type() == ClusterSpec.Type.admin) return 0.1;
if (zone().environment().isProduction() && ! zone().system().isCd() &&
- nodeRepository.exclusiveAllocation(cluster) && ! applicationId.instance().isTester()) return 2;
+ exclusive && ! applicationId.instance().isTester()) return 2;
if (zone().environment().isProduction() && cluster.type().isContent()) return 1.0;
- if (zone().environment() == Environment.dev && ! nodeRepository.exclusiveAllocation(cluster)) return 0.1;
+ if (zone().environment() == Environment.dev && ! exclusive) return 0.1;
return 0.5;
}
- private double minAdvertisedMemoryGb(ClusterSpec cluster) {
+ private double minAdvertisedMemoryGb(ApplicationId applicationId, ClusterSpec cluster, boolean exclusive) {
if (cluster.type() == ClusterSpec.Type.admin) return 1;
- return 4;
+ if (!exclusive) return 4;
+ return minExclusiveAdvertisedMemoryGbFlag
+ .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm())
+ .with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
+ .with(FetchVector.Dimension.CLUSTER_TYPE, cluster.type().name())
+ .value();
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
@@ -105,7 +116,7 @@ public class NodeResourceLimits {
}
private double minRealVcpu(ApplicationId applicationId, ClusterSpec cluster) {
- return minAdvertisedVcpu(applicationId, cluster);
+ return minAdvertisedVcpu(applicationId, cluster, nodeRepository.exclusiveAllocation(cluster));
}
private static double minRealMemoryGb(ClusterSpec cluster) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java
index 42904bb6d68..8e4bcd6d942 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java
@@ -115,6 +115,13 @@ public class LocksResponse extends HttpResponse {
setNonZeroDouble(cursor, name + "MaxActiveLatency", latencyMetrics.maxActiveLatencySeconds());
setNonZeroDouble(cursor, name + "Hz", latencyMetrics.endHz());
setNonZeroDouble(cursor, name + "Load", latencyMetrics.load());
+ if (latencyMetrics.loadByThread().isEmpty()) return;
+ Cursor loadByThreadCursor = cursor.setArray(name + "LoadByThread");
+ latencyMetrics.loadByThread().forEach((threadName, load) -> {
+ Cursor loadForThreadCursor = loadByThreadCursor.addObject();
+ loadForThreadCursor.setString("name", threadName);
+ loadForThreadCursor.setDouble("load", load);
+ });
}
private static void setNonZeroDouble(Cursor cursor, String fieldName, double value) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 4e19d04ffac..52d4c85bcaf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -13,7 +13,6 @@ import com.yahoo.config.provision.NodeResources.DiskSpeed;
import com.yahoo.config.provision.NodeResources.StorageType;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import org.junit.Test;
@@ -25,7 +24,6 @@ import java.util.Optional;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.slow;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
/**
@@ -899,7 +897,7 @@ public class AutoscalingTest {
@Test
public void test_changing_exclusivity() {
- var min = new ClusterResources( 2, 1, new NodeResources( 3, 4, 100, 1));
+ var min = new ClusterResources( 2, 1, new NodeResources( 3, 8, 100, 1));
var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index 38b8836188b..54703b40781 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -321,8 +321,8 @@ public class DynamicProvisioningTest {
@Test
public void migrates_nodes_on_host_flavor_flag_change() {
InMemoryFlagSource flagSource = new InMemoryFlagSource();
- List<Flavor> flavors = List.of(new Flavor("x86", new NodeResources(2, 4, 50, 0.1, fast, local, Architecture.x86_64)),
- new Flavor("arm", new NodeResources(2, 4, 50, 0.1, fast, local, Architecture.arm64)));
+ List<Flavor> flavors = List.of(new Flavor("x86", new NodeResources(2, 8, 50, 0.1, fast, local, Architecture.x86_64)),
+ new Flavor("arm", new NodeResources(2, 8, 50, 0.1, fast, local, Architecture.arm64)));
MockHostProvisioner hostProvisioner = new MockHostProvisioner(flavors);
ProvisioningTester tester = new ProvisioningTester.Builder()
.dynamicProvisioning(true, false)
@@ -335,7 +335,7 @@ public class DynamicProvisioningTest {
ApplicationId app = ProvisioningTester.applicationId("a1");
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("8").build();
- Capacity capacity = Capacity.from(new ClusterResources(4, 2, new NodeResources(2, 4, 50, 0.1, DiskSpeed.any, StorageType.any, Architecture.any)));
+ Capacity capacity = Capacity.from(new ClusterResources(4, 2, new NodeResources(2, 8, 50, 0.1, DiskSpeed.any, StorageType.any, Architecture.any)));
hostProvisioner.setHostFlavor("x86", ClusterSpec.Type.content);
tester.activate(app, cluster, capacity);
@@ -391,10 +391,10 @@ public class DynamicProvisioningTest {
}
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 5, 20),
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 8, 20),
resources(6, 3, 4, 20, 40)));
tester.assertNodes("Initial allocation at first actual flavor above min (except for disk)",
- 4, 2, 2, 20, 20,
+ 4, 2, 2, 20, 24,
app1, cluster1);
@@ -413,7 +413,7 @@ public class DynamicProvisioningTest {
app1, cluster1);
// Widening window does not change allocation
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 5, 15),
+ tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 8, 15),
resources(8, 4, 4, 20, 30)));
tester.assertNodes("No change",
6, 2, 2, 20, 25,
@@ -421,7 +421,7 @@ public class DynamicProvisioningTest {
// Force 1 more groups: Reducing to 2 nodes per group to preserve node count is rejected
// since it will reduce total group memory from 60 to 40.
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 2, 5, 10),
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 2, 8, 10),
resources(9, 3, 5, 20, 15)));
tester.assertNodes("Group size is preserved",
9, 3, 2, 20, 15,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 6ec189d98c3..d64006a6e64 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -361,7 +361,7 @@ public class VirtualNodeProvisioningTest {
@Test
public void application_deployment_with_exclusive_app_first() {
NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(2, 4, 100, 1);
+ NodeResources nodeResources = new NodeResources(2, 8, 100, 1);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyHosts(4, hostResources).activateTenantHosts();
ApplicationId application1 = ProvisioningTester.applicationId("app1");
@@ -380,7 +380,7 @@ public class VirtualNodeProvisioningTest {
@Test
public void application_deployment_with_exclusive_app_last() {
NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(2, 4, 100, 1);
+ NodeResources nodeResources = new NodeResources(2, 8, 100, 1);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyHosts(4, hostResources).activateTenantHosts();
ApplicationId application1 = ProvisioningTester.applicationId("app1");
@@ -399,7 +399,7 @@ public class VirtualNodeProvisioningTest {
@Test
public void application_deployment_change_to_exclusive_and_back() {
NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(2, 4, 100, 1);
+ NodeResources nodeResources = new NodeResources(2, 8, 100, 1);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyHosts(4, hostResources).activateTenantHosts();
@@ -426,7 +426,7 @@ public class VirtualNodeProvisioningTest {
ApplicationId application2 = ApplicationId.from("tenant2", "app2", "default");
ApplicationId application3 = ApplicationId.from("tenant1", "app3", "default");
NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(2, 4, 100, 1);
+ NodeResources nodeResources = new NodeResources(2, 8, 100, 1);
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
tester.makeReadyHosts(4, hostResources).activateTenantHosts();
@@ -441,7 +441,7 @@ public class VirtualNodeProvisioningTest {
catch (Exception e) {
assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
"Could not satisfy request for 3 nodes with " +
- "[vcpu: 2.0, memory: 4.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
+ "[vcpu: 2.0, memory: 8.0 Gb, disk: 100.0 Gb, bandwidth: 1.0 Gbps, architecture: any] " +
"in tenant2.app2 container cluster 'my-container' 6.39: " +
"Not enough suitable nodes available due to host exclusivity constraints",
Exceptions.toMessageString(e));
diff --git a/parent/pom.xml b/parent/pom.xml
index b1ea1e0dab9..3297724aa89 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -671,6 +671,11 @@
</dependency>
<dependency>
<groupId>io.netty</groupId>
+ <artifactId>netty-resolver</artifactId>
+ <version>${netty.vespa.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>io.netty</groupId>
<artifactId>netty-transport-native-epoll</artifactId>
<version>${netty.vespa.version}</version>
</dependency>
diff --git a/storage/src/tests/distributor/bucketstateoperationtest.cpp b/storage/src/tests/distributor/bucketstateoperationtest.cpp
index c9fab0b37e5..44da88b4587 100644
--- a/storage/src/tests/distributor/bucketstateoperationtest.cpp
+++ b/storage/src/tests/distributor/bucketstateoperationtest.cpp
@@ -201,4 +201,27 @@ TEST_F(BucketStateOperationTest, bucket_db_not_updated_on_failure) {
EXPECT_FALSE(op.ok());
}
+TEST_F(BucketStateOperationTest, cancelled_node_does_not_update_bucket_db) {
+ document::BucketId bid(16, 1);
+ insertBucketInfo(bid, 0, 0xabc, 10, 1100, true, false);
+
+ BucketAndNodes bucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(0));
+ std::vector<uint16_t> active = {0};
+ SetBucketStateOperation op(dummy_cluster_context, bucketAndNodes, active);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender);
+ op.cancel(_sender, CancelScope::of_node_subset({0}));
+
+ ASSERT_EQ(_sender.commands().size(), 1);
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply());
+ op.receive(_sender, reply);
+
+ BucketDatabase::Entry entry = getBucket(bid);
+ ASSERT_TRUE(entry.valid());
+ EXPECT_FALSE(entry->getNodeRef(0).active()); // Should not be updated
+ EXPECT_FALSE(op.ok());
+}
+
} // namespace storage::distributor
diff --git a/storage/src/tests/distributor/distributor_stripe_test.cpp b/storage/src/tests/distributor/distributor_stripe_test.cpp
index 566fb704105..c87f5133997 100644
--- a/storage/src/tests/distributor/distributor_stripe_test.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test.cpp
@@ -14,6 +14,7 @@
#include <vespa/storageapi/message/visitor.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/vespalib/util/stringfmt.h>
#include <gmock/gmock.h>
using document::Bucket;
@@ -47,8 +48,8 @@ struct DistributorStripeTest : Test, DistributorStripeTestUtil {
// Simple type aliases to make interfacing with certain utility functions
// easier. Note that this is only for readability and does not provide any
// added type safety.
- using NodeCount = int;
- using Redundancy = int;
+ using NodeCount = uint16_t;
+ using Redundancy = uint16_t;
using ConfigBuilder = vespa::config::content::core::StorDistributormanagerConfigBuilder;
@@ -137,82 +138,114 @@ struct DistributorStripeTest : Test, DistributorStripeTestUtil {
return _stripe->handleMessage(msg);
}
- void configure_stale_reads_enabled(bool enabled) {
+ template <typename Func>
+ void configure_stripe_with(Func f) {
ConfigBuilder builder;
- builder.allowStaleReadsDuringClusterStateTransitions = enabled;
+ f(builder);
configure_stripe(builder);
}
+ void configure_stale_reads_enabled(bool enabled) {
+ configure_stripe_with([&](auto& builder) {
+ builder.allowStaleReadsDuringClusterStateTransitions = enabled;
+ });
+ }
+
void configure_update_fast_path_restart_enabled(bool enabled) {
- ConfigBuilder builder;
- builder.restartWithFastUpdatePathIfAllGetTimestampsAreConsistent = enabled;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.restartWithFastUpdatePathIfAllGetTimestampsAreConsistent = enabled;
+ });
}
void configure_merge_operations_disabled(bool disabled) {
- ConfigBuilder builder;
- builder.mergeOperationsDisabled = disabled;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.mergeOperationsDisabled = disabled;
+ });
}
void configure_use_weak_internal_read_consistency(bool use_weak) {
- ConfigBuilder builder;
- builder.useWeakInternalReadConsistencyForClientGets = use_weak;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.useWeakInternalReadConsistencyForClientGets = use_weak;
+ });
}
void configure_metadata_update_phase_enabled(bool enabled) {
- ConfigBuilder builder;
- builder.enableMetadataOnlyFetchPhaseForInconsistentUpdates = enabled;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.enableMetadataOnlyFetchPhaseForInconsistentUpdates = enabled;
+ });
}
void configure_prioritize_global_bucket_merges(bool enabled) {
- ConfigBuilder builder;
- builder.prioritizeGlobalBucketMerges = enabled;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.prioritizeGlobalBucketMerges = enabled;
+ });
}
void configure_max_activation_inhibited_out_of_sync_groups(uint32_t n_groups) {
- ConfigBuilder builder;
- builder.maxActivationInhibitedOutOfSyncGroups = n_groups;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.maxActivationInhibitedOutOfSyncGroups = n_groups;
+ });
}
void configure_implicitly_clear_priority_on_schedule(bool implicitly_clear) {
- ConfigBuilder builder;
- builder.implicitlyClearBucketPriorityOnSchedule = implicitly_clear;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.implicitlyClearBucketPriorityOnSchedule = implicitly_clear;
+ });
}
void configure_use_unordered_merge_chaining(bool use_unordered) {
- ConfigBuilder builder;
- builder.useUnorderedMergeChaining = use_unordered;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.useUnorderedMergeChaining = use_unordered;
+ });
}
void configure_enable_two_phase_garbage_collection(bool use_two_phase) {
- ConfigBuilder builder;
- builder.enableTwoPhaseGarbageCollection = use_two_phase;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.enableTwoPhaseGarbageCollection = use_two_phase;
+ });
}
void configure_enable_condition_probing(bool enable_probing) {
- ConfigBuilder builder;
- builder.enableConditionProbing = enable_probing;
- configure_stripe(builder);
+ configure_stripe_with([&](auto& builder) {
+ builder.enableConditionProbing = enable_probing;
+ });
+ }
+
+ void configure_enable_operation_cancellation(bool enable_cancellation) {
+ configure_stripe_with([&](auto& builder) {
+ builder.enableOperationCancellation = enable_cancellation;
+ });
}
- bool scheduler_has_implicitly_clear_priority_on_schedule_set() const noexcept {
+ [[nodiscard]] bool scheduler_has_implicitly_clear_priority_on_schedule_set() const noexcept {
return _stripe->_scheduler->implicitly_clear_priority_on_schedule();
}
+ [[nodiscard]] bool distributor_owns_bucket_in_current_and_pending_states(document::BucketId bucket_id) const {
+ return (getDistributorBucketSpace().get_bucket_ownership_flags(bucket_id).owned_in_pending_state() &&
+ getDistributorBucketSpace().check_ownership_in_pending_and_current_state(bucket_id).isOwned());
+ }
+
void configureMaxClusterClockSkew(int seconds);
void configure_mutation_sequencing(bool enabled);
void configure_merge_busy_inhibit_duration(int seconds);
void set_up_and_start_get_op_with_stale_reads_enabled(bool enabled);
+ void simulate_cluster_state_transition(const vespalib::string& state_str, bool clear_pending);
+ static std::shared_ptr<api::RemoveReply> make_remove_reply_with_bucket_remap(api::StorageCommand& originator_cmd);
+
+ // TODO dedupe
+ auto sent_get_command(size_t idx) { return sent_command<api::GetCommand>(idx); }
+
+ auto make_get_reply(size_t idx, api::Timestamp ts, bool is_tombstone, bool condition_matched) {
+ return std::make_shared<api::GetReply>(*sent_get_command(idx), std::shared_ptr<document::Document>(), ts,
+ false, is_tombstone, condition_matched);
+ }
+
+ void set_up_for_bucket_ownership_cancellation(uint32_t superbucket_idx);
+ void do_test_cancelled_pending_op_with_bucket_ownership_change(bool clear_pending_state);
+ void do_test_not_cancelled_pending_op_without_bucket_ownership_change(bool clear_pending_state);
};
DistributorStripeTest::DistributorStripeTest()
@@ -224,6 +257,34 @@ DistributorStripeTest::DistributorStripeTest()
DistributorStripeTest::~DistributorStripeTest() = default;
+void
+DistributorStripeTest::simulate_cluster_state_transition(const vespalib::string& state_str, bool clear_pending)
+{
+ simulate_set_pending_cluster_state(state_str);
+ if (clear_pending) {
+ enable_cluster_state(state_str);
+ clear_pending_cluster_state_bundle();
+ }
+}
+
+std::shared_ptr<api::RemoveReply>
+DistributorStripeTest::make_remove_reply_with_bucket_remap(api::StorageCommand& originator_cmd)
+{
+ auto& cmd_as_remove = dynamic_cast<api::RemoveCommand&>(originator_cmd);
+ auto reply = std::dynamic_pointer_cast<api::RemoveReply>(std::shared_ptr<api::StorageReply>(cmd_as_remove.makeReply()));
+ reply->setOldTimestamp(100);
+ // Including a bucket remapping as part of the response is a pragmatic way to avoid false
+ // negatives when testing whether cancelled operations may mutate the DB. This is because
+ // non-remapped buckets are not created in the DB if they are already removed (which will
+ // be the case after bucket pruning on a cluster state change), but remapped buckets _are_
+ // implicitly created upon insert.
+ // We expect the original bucket is 16 bits and fake a remap to a split bucket one level
+ // below the original bucket.
+ reply->remapBucketId(BucketId(17, (cmd_as_remove.getBucketId().getId() & 0xFFFF) | 0x10000));
+ reply->setBucketInfo(api::BucketInfo(0x1234, 2, 300));
+ return reply;
+}
+
TEST_F(DistributorStripeTest, operation_generation)
{
setup_stripe(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
@@ -455,8 +516,7 @@ TEST_F(DistributorStripeTest, no_db_resurrection_for_bucket_not_owned_in_pending
simulate_set_pending_cluster_state("storage:10 distributor:10");
document::BucketId nonOwnedBucket(16, 3);
- EXPECT_FALSE(getDistributorBucketSpace().get_bucket_ownership_flags(nonOwnedBucket).owned_in_pending_state());
- EXPECT_FALSE(getDistributorBucketSpace().check_ownership_in_pending_and_current_state(nonOwnedBucket).isOwned());
+ ASSERT_FALSE(distributor_owns_bucket_in_current_and_pending_states(nonOwnedBucket));
std::vector<BucketCopy> copies;
copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
@@ -1052,4 +1112,168 @@ TEST_F(DistributorStripeTest, enable_condition_probing_config_is_propagated_to_i
EXPECT_TRUE(getConfig().enable_condition_probing());
}
+TEST_F(DistributorStripeTest, enable_operation_cancellation_config_is_propagated_to_internal_config) {
+ setup_stripe(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
+
+ EXPECT_FALSE(getConfig().enable_operation_cancellation()); // TODO switch default once ready
+
+ configure_enable_operation_cancellation(false);
+ EXPECT_FALSE(getConfig().enable_operation_cancellation());
+
+ configure_enable_operation_cancellation(true);
+ EXPECT_TRUE(getConfig().enable_operation_cancellation());
+}
+
+TEST_F(DistributorStripeTest, cluster_state_node_down_edge_cancels_pending_operations_on_unavailable_nodes) {
+ setup_stripe(Redundancy(1), NodeCount(1), "version:1 distributor:1 storage:1");
+ configure_enable_operation_cancellation(true); // Test will fail without cancellation enabled
+ addNodesToBucketDB(BucketId(16, 1), "0=3/4/5/t");
+
+ stripe_handle_message(makeDummyRemoveCommand()); // Remove is for bucket {16, 1}
+ ASSERT_EQ(_sender.getCommands(true), "Remove => 0");
+
+ // Oh no, node 0 goes down while we have a pending operation!
+ simulate_cluster_state_transition("version:2 distributor:1 storage:1 .0.s:d", true);
+ EXPECT_EQ("NONEXISTING", dumpBucket(BucketId(16, 1))); // Implicitly cleared
+
+ auto reply = make_remove_reply_with_bucket_remap(*_sender.command(0));
+ // Before we receive the reply, node 0 is back online. Even though the node is available in the
+ // cluster state, we should not apply its bucket info to our DB, as it may represent stale
+ // information (it's possible that our node froze up and that another distributor took over and
+ // mutated the bucket in the meantime; a classic ABA scenario).
+ simulate_cluster_state_transition("version:5 distributor:1 storage:1", true);
+
+ _stripe->handleReply(std::move(reply));
+ EXPECT_EQ("NONEXISTING", dumpBucket(BucketId(17, 0x10001)));
+}
+
+TEST_F(DistributorStripeTest, distribution_config_change_edge_cancels_pending_operations_on_unavailable_nodes) {
+ setup_stripe(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
+ configure_enable_operation_cancellation(true); // Test will fail without cancellation enabled
+ addNodesToBucketDB(BucketId(16, 1), "0=3/4/5/t,1=3/4/5/t");
+
+ stripe_handle_message(makeDummyRemoveCommand()); // Remove is for bucket {16, 1}
+ ASSERT_EQ(_sender.getCommands(true), "Remove => 0,Remove => 1");
+
+ // Node 1 is configured away; only node 0 remains. This is expected to be closely followed by
+ // (or--depending on the timing of operations in the cluster--preceded by) a cluster state with
+ // the node marked as down, but the ordering is not guaranteed.
+ auto new_config = make_default_distribution_config(1, 1);
+ simulate_distribution_config_change(std::move(new_config));
+
+ auto node_0_reply = make_remove_reply_with_bucket_remap(*_sender.command(0));
+ auto node_1_reply = make_remove_reply_with_bucket_remap(*_sender.command(1));
+
+ _stripe->handleReply(std::move(node_0_reply));
+ _stripe->handleReply(std::move(node_1_reply));
+
+ // Only node 0 should be present in the DB
+ EXPECT_EQ("BucketId(0x4000000000000001) : " // Original bucket
+ "node(idx=0,crc=0x3,docs=4/4,bytes=5/5,trusted=true,active=false,ready=false)",
+ dumpBucket(BucketId(16, 1)));
+ EXPECT_EQ("BucketId(0x4400000000010001) : " // Remapped bucket
+ "node(idx=0,crc=0x1234,docs=2/2,bytes=300/300,trusted=true,active=false,ready=false)",
+ dumpBucket(BucketId(17, 0x10001)));
+}
+
+void DistributorStripeTest::set_up_for_bucket_ownership_cancellation(uint32_t superbucket_idx) {
+ setup_stripe(Redundancy(1), NodeCount(10), "version:1 distributor:2 storage:2");
+ configure_stripe_with([](auto& builder) {
+ builder.enableConditionProbing = true;
+ builder.enableOperationCancellation = true;
+ });
+
+ NodeSupportedFeatures features;
+ features.document_condition_probe = true;
+ set_node_supported_features(0, features);
+ set_node_supported_features(1, features);
+
+ // Note: replicas are intentionally out of sync to trigger a write-repair.
+ addNodesToBucketDB(BucketId(16, superbucket_idx), "0=3/4/5,1=4/5/6");
+}
+
+namespace {
+
+std::shared_ptr<api::RemoveCommand> make_conditional_remove_request(uint32_t superbucket_idx) {
+ auto client_remove = std::make_shared<api::RemoveCommand>(
+ makeDocumentBucket(document::BucketId(0)),
+ document::DocumentId(vespalib::make_string("id:foo:testdoctype1:n=%u:foo", superbucket_idx)),
+ api::Timestamp(0));
+ client_remove->setCondition(documentapi::TestAndSetCondition("foo.bar==baz"));
+ return client_remove;
+}
+
+}
+
+void DistributorStripeTest::do_test_cancelled_pending_op_with_bucket_ownership_change(bool clear_pending_state) {
+ constexpr uint32_t superbucket_idx = 3;
+ const BucketId bucket_id(16, superbucket_idx);
+ set_up_for_bucket_ownership_cancellation(superbucket_idx);
+ // To actually check if cancellation is happening, we need to trigger a code path that
+ // is only covered by cancellation and not the legacy "check buckets at DB insert time"
+ // logic. The latter would give a false negative.
+ stripe_handle_message(make_conditional_remove_request(superbucket_idx));
+ ASSERT_EQ(_sender.getCommands(true), "Get => 0,Get => 1"); // Condition probes, thunder cats go!
+
+ simulate_cluster_state_transition("version:2 distributor:10 storage:10", clear_pending_state);
+ ASSERT_FALSE(distributor_owns_bucket_in_current_and_pending_states(bucket_id));
+ EXPECT_EQ("NONEXISTING", dumpBucket(bucket_id)); // Should have been pruned
+
+ _stripe->handleReply(make_get_reply(0, 100, false, true));
+ _stripe->handleReply(make_get_reply(1, 100, false, true));
+
+ // Condition probe was successful, but operation is cancelled and shall not continue.
+ ASSERT_EQ(_sender.getCommands(true, false, 2), "");
+ EXPECT_EQ("RemoveReply(BucketId(0x0000000000000000), "
+ "id:foo:testdoctype1:n=3:foo, timestamp 1, not found) "
+ "ReturnCode(ABORTED, Failed during write repair condition probe step. Reason: "
+ "Operation has been cancelled (likely due to a cluster state change))",
+ _sender.getLastReply());
+ EXPECT_EQ("NONEXISTING", dumpBucket(bucket_id)); // And definitely no resurrection
+}
+
+TEST_F(DistributorStripeTest, bucket_ownership_change_cancels_pending_operations_for_non_owned_buckets_pending_case) {
+ do_test_cancelled_pending_op_with_bucket_ownership_change(false);
+}
+
+TEST_F(DistributorStripeTest, bucket_ownership_change_cancels_pending_operations_for_non_owned_buckets_not_pending_case) {
+ do_test_cancelled_pending_op_with_bucket_ownership_change(true);
+}
+
+void DistributorStripeTest::do_test_not_cancelled_pending_op_without_bucket_ownership_change(bool clear_pending_state) {
+ constexpr uint32_t superbucket_idx = 14;
+ const BucketId bucket_id(16, superbucket_idx);
+ set_up_for_bucket_ownership_cancellation(superbucket_idx);
+
+ stripe_handle_message(make_conditional_remove_request(superbucket_idx));
+ ASSERT_EQ(_sender.getCommands(true), "Get => 0,Get => 1");
+
+ simulate_cluster_state_transition("version:2 distributor:10 storage:10", clear_pending_state);
+ ASSERT_TRUE(distributor_owns_bucket_in_current_and_pending_states(bucket_id));
+ EXPECT_EQ("BucketId(0x400000000000000e) : "
+ "node(idx=0,crc=0x3,docs=4/4,bytes=5/5,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x4,docs=5/5,bytes=6/6,trusted=false,active=false,ready=false)",
+ dumpBucket(bucket_id)); // Should _not_ have been pruned
+
+ _stripe->handleReply(make_get_reply(0, 100, false, true));
+ _stripe->handleReply(make_get_reply(1, 100, false, true));
+
+ // Operation can proceed as planned as it has not been cancelled
+ ASSERT_EQ(_sender.getCommands(true, false, 2), "Remove => 0,Remove => 1");
+}
+
+TEST_F(DistributorStripeTest, bucket_ownership_change_does_not_cancel_pending_operations_for_owned_buckets_pending_case) {
+ do_test_not_cancelled_pending_op_without_bucket_ownership_change(false);
+}
+
+TEST_F(DistributorStripeTest, bucket_ownership_change_does_not_cancel_pending_operations_for_owned_buckets_not_pending_case) {
+ do_test_not_cancelled_pending_op_without_bucket_ownership_change(true);
+}
+
+// TODO we do not have good handling of bucket ownership changes combined with
+// distribution config changes... Hard to remove all such edge cases unless we
+// make state+config change an atomic operation initiated by the cluster controller
+// (hint: we should do this).
+
+
}
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.cpp b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
index 5babde49380..ba6c4cb4ac4 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
@@ -72,7 +72,7 @@ DistributorStripeTestUtil::setup_stripe(int redundancy, int node_count, const li
// explicitly (which is what happens in "real life"), that is what would
// take place.
// The inverse case of this can be explicitly accomplished by calling
- // triggerDistributionChange().
+ // trigger_distribution_change().
// This isn't pretty, folks, but it avoids breaking the world for now,
// as many tests have implicit assumptions about this being the behavior.
auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(std::move(distribution));
@@ -93,10 +93,31 @@ void
DistributorStripeTestUtil::trigger_distribution_change(lib::Distribution::SP distr)
{
_node->getComponentRegister().setDistribution(distr);
- auto new_config = BucketSpaceDistributionConfigs::from_default_distribution(distr);
+ auto new_config = BucketSpaceDistributionConfigs::from_default_distribution(std::move(distr));
_stripe->update_distribution_config(new_config);
}
+void
+DistributorStripeTestUtil::simulate_distribution_config_change(std::shared_ptr<lib::Distribution> new_config)
+{
+ trigger_distribution_change(std::move(new_config));
+ _stripe->notifyDistributionChangeEnabled();
+ for (auto& space : _stripe->getBucketSpaceRepo()) {
+ auto cur_state = current_cluster_state_bundle().getDerivedClusterState(space.first); // no change in state itself
+ _stripe->remove_superfluous_buckets(space.first, *cur_state, true);
+ }
+}
+
+std::shared_ptr<lib::Distribution>
+DistributorStripeTestUtil::make_default_distribution_config(uint16_t redundancy, uint16_t node_count)
+{
+ lib::Distribution::DistributionConfigBuilder config(lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
+ config.redundancy = redundancy;
+ config.initialRedundancy = redundancy;
+ config.ensurePrimaryPersisted = true;
+ return std::make_shared<lib::Distribution>(config);
+}
+
std::shared_ptr<DistributorConfiguration>
DistributorStripeTestUtil::make_config() const
{
@@ -416,6 +437,11 @@ DistributorStripeTestUtil::getDistributorBucketSpace() {
return getBucketSpaceRepo().get(makeBucketSpace());
}
+const DistributorBucketSpace&
+DistributorStripeTestUtil::getDistributorBucketSpace() const {
+ return getBucketSpaceRepo().get(makeBucketSpace());
+}
+
BucketDatabase&
DistributorStripeTestUtil::getBucketDatabase() {
return getDistributorBucketSpace().getBucketDatabase();
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.h b/storage/src/tests/distributor/distributor_stripe_test_util.h
index 272301bf4a6..2892cec6fcf 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.h
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.h
@@ -138,6 +138,7 @@ public:
// TODO explicit notion of bucket spaces for tests
DistributorBucketSpace& getDistributorBucketSpace();
+ const DistributorBucketSpace& getDistributorBucketSpace() const;
BucketDatabase& getBucketDatabase(); // Implicit default space only
BucketDatabase& getBucketDatabase(document::BucketSpace space);
const BucketDatabase& getBucketDatabase() const; // Implicit default space only
@@ -175,6 +176,8 @@ public:
void set_redundancy(uint32_t redundancy);
void trigger_distribution_change(std::shared_ptr<lib::Distribution> distr);
+ void simulate_distribution_config_change(std::shared_ptr<lib::Distribution> new_config);
+ static std::shared_ptr<lib::Distribution> make_default_distribution_config(uint16_t redundancy, uint16_t node_count);
using ConfigBuilder = vespa::config::content::core::StorDistributormanagerConfigBuilder;
diff --git a/storage/src/tests/distributor/joinbuckettest.cpp b/storage/src/tests/distributor/joinbuckettest.cpp
index 570fe24679e..ae389ecd6c2 100644
--- a/storage/src/tests/distributor/joinbuckettest.cpp
+++ b/storage/src/tests/distributor/joinbuckettest.cpp
@@ -109,4 +109,32 @@ TEST_F(JoinOperationTest, send_sparse_joins_to_nodes_without_both_source_buckets
ASSERT_NO_FATAL_FAILURE(checkSourceBucketsAndSendReply(op, 1, {{33, 1}, {33, 1}}));
}
+TEST_F(JoinOperationTest, cancelled_node_does_not_update_bucket_db) {
+ auto cfg = make_config();
+ cfg->setJoinCount(100);
+ cfg->setJoinSize(1000);
+ configure_stripe(cfg);
+
+ addNodesToBucketDB(document::BucketId(33, 1), "0=250/50/300");
+ addNodesToBucketDB(document::BucketId(33, 0x100000001), "0=300/40/200");
+ enable_cluster_state("distributor:1 storage:1");
+
+ JoinOperation op(dummy_cluster_context,
+ BucketAndNodes(makeDocumentBucket(document::BucketId(32, 0)), toVector<uint16_t>(0)),
+ {document::BucketId(33, 1), document::BucketId(33, 0x100000001)});
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender);
+
+ op.cancel(_sender, CancelScope::of_node_subset({0}));
+
+ checkSourceBucketsAndSendReply(op, 0, {{33, 1}, {33, 0x100000001}});
+
+ // DB is not touched, so source buckets remain unchanged and target buckets is not created
+ EXPECT_TRUE(getBucket(document::BucketId(33, 0x100000001)).valid());
+ EXPECT_TRUE(getBucket(document::BucketId(33, 1)).valid());
+ EXPECT_FALSE(getBucket(document::BucketId(32, 0)).valid());
+ EXPECT_FALSE(op.ok());
+}
+
}
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 512c092d8ae..12280980998 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -548,8 +548,7 @@ TEST_F(MergeOperationTest, merge_operation_is_not_blocked_by_request_bucket_info
EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
}
-TEST_F(MergeOperationTest, on_blocked_updates_metrics)
-{
+TEST_F(MergeOperationTest, on_blocked_updates_metrics) {
auto op = setup_minimal_merge_op();
auto metrics = getIdealStateManager().getMetrics().operations[IdealStateOperation::MERGE_BUCKET];
EXPECT_EQ(0, metrics->blocked.getValue());
@@ -557,8 +556,7 @@ TEST_F(MergeOperationTest, on_blocked_updates_metrics)
EXPECT_EQ(1, metrics->blocked.getValue());
}
-TEST_F(MergeOperationTest, on_throttled_updates_metrics)
-{
+TEST_F(MergeOperationTest, on_throttled_updates_metrics) {
auto op = setup_minimal_merge_op();
auto metrics = getIdealStateManager().getMetrics().operations[IdealStateOperation::MERGE_BUCKET];
EXPECT_EQ(0, metrics->throttled.getValue());
@@ -628,4 +626,22 @@ TEST_F(MergeOperationTest, delete_bucket_priority_is_capped_to_feed_pri_120) {
EXPECT_EQ(int(del_cmd->getPriority()), 120);
}
+TEST_F(MergeOperationTest, no_delete_bucket_ops_sent_if_fully_cancelled) {
+ auto op = setup_simple_merge_op();
+ ASSERT_NO_FATAL_FAILURE(assert_simple_merge_bucket_command());
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+ sendReply(*op);
+ EXPECT_EQ(_sender.getCommands(true, false, 1), ""); // nothing more
+ EXPECT_FALSE(op->ok());
+}
+
+TEST_F(MergeOperationTest, no_delete_bucket_ops_sent_if_node_subset_cancelled) {
+ auto op = setup_simple_merge_op(); // to nodes, 0, 2, 1 (source only)
+ ASSERT_NO_FATAL_FAILURE(assert_simple_merge_bucket_command());
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+ sendReply(*op);
+ EXPECT_EQ(_sender.getCommands(true, false, 1), ""); // nothing more
+ EXPECT_FALSE(op->ok());
+}
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index ee87fe84df6..8cab6a3003d 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -202,10 +202,7 @@ TEST_F(PutOperationTest, failed_CreateBucket_removes_replica_from_db_and_sends_R
"node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId())));
- // TODO remove revert concept; does not make sense with Proton (since it's not a multi-version store and
- // therefore does not have anything to revert back to) and is config-disabled by default for this provider.
- ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1,"
- "Revert(BucketId(0x4000000000008f09)) => 0",
+ ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1",
_sender.getCommands(true, true, 4));
}
@@ -430,80 +427,6 @@ TEST_F(PutOperationTest, multiple_copies_early_return_primary_required_not_done)
ASSERT_EQ(0, _sender.replies().size());
}
-TEST_F(PutOperationTest, do_not_revert_on_failure_after_early_return) {
- setup_stripe(Redundancy(3),NodeCount(4), "storage:4 distributor:1",
- ReturnAfter(2), RequirePrimaryWritten(false));
-
- sendPut(createPut(createDummyDocument("test", "test")));
-
- ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
- "Create bucket => 1,Put => 3,Put => 2,Put => 1",
- _sender.getCommands(true));
-
- for (uint32_t i = 0; i < 3; i++) {
- sendReply(i); // CreateBucket
- }
- for (uint32_t i = 0; i < 2; i++) {
- sendReply(3 + i); // Put
- }
-
- ASSERT_EQ("PutReply(id:test:testdoctype1::test, BucketId(0x0000000000000000), "
- "timestamp 100) ReturnCode(NONE)",
- _sender.getLastReply());
-
- sendReply(5, api::ReturnCode::INTERNAL_FAILURE);
- // Should not be any revert commands sent
- ASSERT_EQ("Create bucket => 3,Create bucket => 2,"
- "Create bucket => 1,Put => 3,Put => 2,Put => 1",
- _sender.getCommands(true));
-}
-
-TEST_F(PutOperationTest, revert_successful_copies_when_one_fails) {
- setup_stripe(3, 4, "storage:4 distributor:1");
-
- createAndSendSampleDocument(TIMEOUT);
-
- ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
-
- for (uint32_t i = 0; i < 2; i++) {
- sendReply(i);
- }
-
- sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
-
- ASSERT_EQ("PutReply(id:test:testdoctype1::, "
- "BucketId(0x0000000000000000), timestamp 100) "
- "ReturnCode(INTERNAL_FAILURE)",
- _sender.getLastReply(true));
-
- ASSERT_EQ("Revert => 0,Revert => 2", _sender.getCommands(true, false, 3));
-}
-
-TEST_F(PutOperationTest, no_revert_if_revert_disabled) {
- close();
- getDirConfig().getConfig("stor-distributormanager")
- .set("enable_revert", "false");
- SetUp();
- setup_stripe(3, 4, "storage:4 distributor:1");
-
- createAndSendSampleDocument(TIMEOUT);
-
- ASSERT_EQ("Put => 0,Put => 2,Put => 1", _sender.getCommands(true));
-
- for (uint32_t i = 0; i < 2; i++) {
- sendReply(i);
- }
-
- sendReply(2, api::ReturnCode::INTERNAL_FAILURE);
-
- ASSERT_EQ("PutReply(id:test:testdoctype1::, "
- "BucketId(0x0000000000000000), timestamp 100) "
- "ReturnCode(INTERNAL_FAILURE)",
- _sender.getLastReply(true));
-
- ASSERT_EQ("", _sender.getCommands(true, false, 3));
-}
-
TEST_F(PutOperationTest, do_not_send_CreateBucket_if_already_pending) {
setup_stripe(2, 2, "storage:2 distributor:1");
diff --git a/storage/src/tests/distributor/removebucketoperationtest.cpp b/storage/src/tests/distributor/removebucketoperationtest.cpp
index 68d86884036..b9d1d804761 100644
--- a/storage/src/tests/distributor/removebucketoperationtest.cpp
+++ b/storage/src/tests/distributor/removebucketoperationtest.cpp
@@ -24,6 +24,14 @@ struct RemoveBucketOperationTest : Test, DistributorStripeTestUtil {
void TearDown() override {
close();
}
+
+ void reject_with_bucket_info(RemoveBucketOperation& op, size_t msg_index) {
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(msg_index);
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply());
+ dynamic_cast<api::DeleteBucketReply&>(*reply).setBucketInfo(api::BucketInfo(10, 200, 1));
+ reply->setResult(api::ReturnCode::REJECTED);
+ op.receive(_sender, reply);
+ }
};
TEST_F(RemoveBucketOperationTest, simple) {
@@ -36,11 +44,10 @@ TEST_F(RemoveBucketOperationTest, simple) {
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)),
- toVector<uint16_t>(1,2)));
+ toVector<uint16_t>(1, 2)));
op.setIdealStateManager(&getIdealStateManager());
op.start(_sender);
-
ASSERT_EQ("Delete bucket => 1,"
"Delete bucket => 2",
_sender.getCommands(true));
@@ -75,16 +82,11 @@ TEST_F(RemoveBucketOperationTest, bucket_info_mismatch_failure) {
ASSERT_EQ("Delete bucket => 1", _sender.getCommands(true));
ASSERT_EQ(1, _sender.commands().size());
- std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
- std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
- dynamic_cast<api::DeleteBucketReply&>(*reply).setBucketInfo(
- api::BucketInfo(10, 100, 1));
- reply->setResult(api::ReturnCode::REJECTED);
- op.receive(_sender, reply);
+ reject_with_bucket_info(op, 0);
// RemoveBucketOperation should reinsert bucketinfo into database
ASSERT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=1,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)",
+ "node(idx=1,crc=0xa,docs=200/200,bytes=1/1,trusted=true,active=false,ready=false)",
dumpBucket(document::BucketId(16, 1)));
}
@@ -130,4 +132,35 @@ TEST_F(RemoveBucketOperationTest, operation_blocked_when_pending_message_to_targ
EXPECT_FALSE(op.shouldBlockThisOperation(api::MessageType::PUT_ID, 2, 120));
}
+TEST_F(RemoveBucketOperationTest, cancelled_node_does_not_update_bucket_db_upon_rejection) {
+ addNodesToBucketDB(document::BucketId(16, 1),
+ "0=10/100/1/t,"
+ "1=10/100/1/t,"
+ "2=10/100/1/t");
+ set_redundancy(1);
+ enable_cluster_state("distributor:1 storage:3");
+
+ RemoveBucketOperation op(dummy_cluster_context,
+ BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)),
+ toVector<uint16_t>(1,2)));
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender);
+
+ ASSERT_EQ("Delete bucket => 1,"
+ "Delete bucket => 2",
+ _sender.getCommands(true));
+
+ op.cancel(_sender, CancelScope::of_node_subset({1}));
+
+ // Rejections will by default reinsert the bucket into the DB with the bucket info contained
+ // in the reply, but here the node is cancelled and should therefore not be reinserted.
+ reject_with_bucket_info(op, 0);
+ sendReply(op, 1);
+ // Node 1 not reinserted
+ ASSERT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1/1,trusted=true,active=false,ready=false)",
+ dumpBucket(document::BucketId(16, 1)));
+ EXPECT_FALSE(op.ok());
+}
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/removelocationtest.cpp b/storage/src/tests/distributor/removelocationtest.cpp
index b19a448199b..be688e64a8b 100644
--- a/storage/src/tests/distributor/removelocationtest.cpp
+++ b/storage/src/tests/distributor/removelocationtest.cpp
@@ -67,4 +67,6 @@ TEST_F(RemoveLocationOperationTest, simple) {
_sender.getLastReply());
}
+// TODO test cancellation (implicitly covered via operation PersistenceMessageTracker)
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/splitbuckettest.cpp b/storage/src/tests/distributor/splitbuckettest.cpp
index edb392d9532..05a13f67bc9 100644
--- a/storage/src/tests/distributor/splitbuckettest.cpp
+++ b/storage/src/tests/distributor/splitbuckettest.cpp
@@ -48,14 +48,14 @@ SplitOperationTest::SplitOperationTest()
}
namespace {
- api::StorageMessageAddress _Storage0Address(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 0);
+ api::StorageMessageAddress _storage0Address(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 0);
}
TEST_F(SplitOperationTest, simple) {
enable_cluster_state("distributor:1 storage:1");
insertBucketInfo(document::BucketId(16, 1), 0, 0xabc, 1000,
- tooLargeBucketSize, 250);
+ tooLargeBucketSize, true);
SplitOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)),
@@ -72,10 +72,10 @@ TEST_F(SplitOperationTest, simple) {
std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
- EXPECT_EQ(_Storage0Address.toString(),
+ EXPECT_EQ(_storage0Address.toString(),
msg->getAddress()->toString());
- std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply());
auto* sreply = static_cast<api::SplitBucketReply*>(reply.get());
sreply->getSplitInfo().emplace_back(document::BucketId(17, 1),
@@ -142,19 +142,15 @@ TEST_F(SplitOperationTest, multi_node_failure) {
{
std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
- EXPECT_EQ(_Storage0Address.toString(),
- msg->getAddress()->toString());
+ EXPECT_EQ(_storage0Address.toString(), msg->getAddress()->toString());
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply());
+ auto& sreply = dynamic_cast<api::SplitBucketReply&>(*reply);
- auto* sreply = static_cast<api::SplitBucketReply*>(msg->makeReply().release());
- sreply->setResult(api::ReturnCode::OK);
+ sreply.setResult(api::ReturnCode::OK);
+ sreply.getSplitInfo().emplace_back(document::BucketId(17, 1), api::BucketInfo(100, 600, 5000000));
+ sreply.getSplitInfo().emplace_back(document::BucketId(17, 0x10001), api::BucketInfo(110, 400, 6000000));
- sreply->getSplitInfo().emplace_back(document::BucketId(17, 1),
- api::BucketInfo(100, 600, 5000000));
-
- sreply->getSplitInfo().emplace_back(document::BucketId(17, 0x10001),
- api::BucketInfo(110, 400, 6000000));
-
- op.receive(_sender, std::shared_ptr<api::StorageReply>(sreply));
+ op.receive(_sender, reply);
}
sendReply(op, 1, api::ReturnCode::NOT_CONNECTED);
@@ -230,7 +226,7 @@ TEST_F(SplitOperationTest, copy_trusted_status_not_carried_over_after_split) {
for (int i = 0; i < 2; ++i) {
std::shared_ptr<api::StorageCommand> msg = _sender.command(i);
ASSERT_EQ(msg->getType(), api::MessageType::SPLITBUCKET);
- std::shared_ptr<api::StorageReply> reply(msg->makeReply().release());
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply());
auto* sreply = static_cast<api::SplitBucketReply*>(reply.get());
// Make sure copies differ so they cannot become implicitly trusted.
@@ -271,7 +267,7 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
};
auto joinCmd = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(joinTarget));
joinCmd->getSourceBuckets() = joinSources;
- joinCmd->setAddress(_Storage0Address);
+ joinCmd->setAddress(_storage0Address);
pending_message_tracker().insert(joinCmd);
@@ -307,7 +303,7 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
enable_cluster_state("distributor:1 storage:2");
document::BucketId source_bucket(16, 1);
- insertBucketInfo(source_bucket, 0, 0xabc, 1000, tooLargeBucketSize, 250);
+ insertBucketInfo(source_bucket, 0, 0xabc, 1000, tooLargeBucketSize, true);
SplitOperation op(dummy_cluster_context, BucketAndNodes(makeDocumentBucket(source_bucket), toVector<uint16_t>(0)),
maxSplitBits, splitCount, splitByteSize);
@@ -318,4 +314,36 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
+TEST_F(SplitOperationTest, cancelled_node_does_not_update_bucket_db) {
+ enable_cluster_state("distributor:1 storage:1");
+ insertBucketInfo(document::BucketId(16, 1), 0, 0xabc, 1000, tooLargeBucketSize, true);
+
+ SplitOperation op(dummy_cluster_context,
+ BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), toVector<uint16_t>(0)),
+ maxSplitBits, splitCount, splitByteSize);
+
+ op.setIdealStateManager(&getIdealStateManager());
+ op.start(_sender);
+
+ op.cancel(_sender, CancelScope::of_node_subset({0}));
+
+ {
+ ASSERT_EQ(_sender.commands().size(), 1);
+ std::shared_ptr<api::StorageCommand> msg = _sender.command(0);
+ std::shared_ptr<api::StorageReply> reply(msg->makeReply());
+ auto& sreply = dynamic_cast<api::SplitBucketReply&>(*reply);
+
+ sreply.getSplitInfo().emplace_back(document::BucketId(17, 1), api::BucketInfo(100, 600, 5000000));
+ sreply.getSplitInfo().emplace_back(document::BucketId(17, 0x10001), api::BucketInfo(110, 400, 6000000));
+ op.receive(_sender, reply);
+ }
+
+ // DB is not touched, so source bucket remains (will be removed during actual operation)
+ // while target buckets are not created
+ EXPECT_TRUE(getBucket(document::BucketId(16, 1)).valid());
+ EXPECT_FALSE(getBucket(document::BucketId(17, 0x00001)).valid());
+ EXPECT_FALSE(getBucket(document::BucketId(17, 0x10001)).valid());
+ EXPECT_FALSE(op.ok());
+}
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index d3af4cd564a..f3aa9c2eb92 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -1056,7 +1056,32 @@ TEST_F(TopLevelBucketDBUpdaterTest, recheck_node) {
const BucketCopy* copy = entry->getNode(1);
ASSERT_TRUE(copy != nullptr);
- EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo());
+ EXPECT_EQ(api::BucketInfo(20, 10, 12, 50, 60, true, true), copy->getBucketInfo());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, cancelled_pending_recheck_command_does_not_update_db) {
+ ASSERT_NO_FATAL_FAILURE(initialize_nodes_and_buckets(3, 5));
+ _sender.clear();
+
+ auto bucket = makeDocumentBucket(document::BucketId(16, 3));
+ auto& stripe_bucket_db_updater = stripe_of_bucket(bucket.getBucketId()).bucket_db_updater();
+ stripe_bucket_db_updater.recheckBucketInfo(0, bucket);
+
+ ASSERT_EQ(_sender.getCommands(true), "Request bucket info => 0");
+ auto& req = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
+
+ ASSERT_TRUE(stripe_bucket_db_updater.cancel_message_by_id(req.getMsgId()));
+
+ auto reply = std::make_shared<api::RequestBucketInfoReply>(req);
+ reply->getBucketInfo().emplace_back(document::BucketId(16, 3), api::BucketInfo(20, 10, 12, 50, 60, true, true));
+ stripe_bucket_db_updater.onRequestBucketInfoReply(reply);
+
+ BucketDatabase::Entry entry = get_bucket(bucket);
+ ASSERT_TRUE(entry.valid());
+ const BucketCopy* copy = entry->getNode(0);
+ ASSERT_TRUE(copy != nullptr);
+ // Existing bucket info not modified by reply (0xa ... is the initialized test state).
+ EXPECT_EQ(api::BucketInfo(0xa, 1, 1, 1, 1, false, false), copy->getBucketInfo());
}
TEST_F(TopLevelBucketDBUpdaterTest, notify_bucket_change) {
@@ -1217,11 +1242,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, merge_reply) {
document::BucketId bucket_id(16, 1234);
add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.emplace_back(0);
- nodes.emplace_back(1);
- nodes.emplace_back(2);
-
+ std::vector<api::MergeBucketCommand::Node> nodes{{0}, {1}, {2}};
api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
auto reply = std::make_shared<api::MergeBucketReply>(cmd);
@@ -1248,19 +1269,15 @@ TEST_F(TopLevelBucketDBUpdaterTest, merge_reply) {
"node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false), "
"node(idx=2,crc=0x1e,docs=300/300,bytes=3000/3000,trusted=false,active=false,ready=false)",
dump_bucket(bucket_id));
-};
+}
TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down) {
enable_distributor_cluster_state("distributor:1 storage:3");
- std::vector<api::MergeBucketCommand::Node> nodes;
document::BucketId bucket_id(16, 1234);
add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.emplace_back(i);
- }
-
+ std::vector<api::MergeBucketCommand::Node> nodes{{0}, {1}, {2}};
api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
auto reply = std::make_shared<api::MergeBucketReply>(cmd);
@@ -1287,19 +1304,15 @@ TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down) {
"node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
"node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)",
dump_bucket(bucket_id));
-};
+}
TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
enable_distributor_cluster_state("distributor:1 storage:3");
- std::vector<api::MergeBucketCommand::Node> nodes;
document::BucketId bucket_id(16, 1234);
add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.emplace_back(i);
- }
-
+ std::vector<api::MergeBucketCommand::Node> nodes{{0}, {1}, {2}};
api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
auto reply = std::make_shared<api::MergeBucketReply>(cmd);
@@ -1326,7 +1339,41 @@ TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
"node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
"node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)",
dump_bucket(bucket_id));
-};
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_triggered_bucket_info_request_not_sent_to_cancelled_nodes) {
+ enable_distributor_cluster_state("distributor:1 storage:3");
+ document::BucketId bucket_id(16, 1234);
+ // DB has one bucket with 3 mutually out of sync replicas. Node 0 is explicitly tagged as Active;
+ // this is done to prevent the distributor from scheduling a bucket activation as its first task.
+ add_nodes_to_stripe_bucket_db(bucket_id, "0=10/1/1/t/a,1=20/1/1/t,2=30/1/1/t");
+
+ // Poke at the business end of the stripe until it has scheduled a merge for the inconsistent bucket
+ ASSERT_EQ(_sender.commands().size(), 0u);
+ const int max_tick_tries = 20;
+ for (int i = 0; i <= max_tick_tries; ++i) {
+ stripe_of_bucket(bucket_id).tick();
+ if (!_sender.commands().empty()) {
+ continue;
+ }
+ if (i == max_tick_tries) {
+ FAIL() << "no merge sent after ticking " << max_tick_tries << " times";
+ }
+ }
+ ASSERT_EQ(_sender.getCommands(true), "Merge bucket => 0");
+
+ auto cmd = std::dynamic_pointer_cast<api::MergeBucketCommand>(_sender.commands()[0]);
+ _sender.commands().clear();
+ auto reply = std::make_shared<api::MergeBucketReply>(*cmd);
+
+ auto op = stripe_of_bucket(bucket_id).maintenance_op_from_message_id(cmd->getMsgId());
+ ASSERT_TRUE(op);
+
+ op->cancel(_sender, CancelScope::of_node_subset({0, 2}));
+ stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply);
+ // RequestBucketInfo only sent to node 1
+ ASSERT_EQ(_sender.getCommands(true), "Request bucket info => 1");
+}
TEST_F(TopLevelBucketDBUpdaterTest, flush) {
enable_distributor_cluster_state("distributor:1 storage:3");
@@ -1335,11 +1382,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, flush) {
document::BucketId bucket_id(16, 1234);
add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
- std::vector<api::MergeBucketCommand::Node> nodes;
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.emplace_back(i);
- }
-
+ std::vector<api::MergeBucketCommand::Node> nodes{{0}, {1}, {2}};
api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
auto reply = std::make_shared<api::MergeBucketReply>(cmd);
diff --git a/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp b/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
index 72bb6cf9fb5..61142415e10 100644
--- a/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
+++ b/storage/src/tests/storageapi/mbusprot/storageprotocoltest.cpp
@@ -369,20 +369,6 @@ TEST_P(StorageProtocolTest, remove) {
EXPECT_NO_FATAL_FAILURE(assert_bucket_info_reply_fields_propagated(*reply2));
}
-TEST_P(StorageProtocolTest, revert) {
- std::vector<Timestamp> tokens;
- tokens.push_back(59);
- auto cmd = std::make_shared<RevertCommand>(_bucket, tokens);
- auto cmd2 = copyCommand(cmd);
- EXPECT_EQ(_bucket, cmd2->getBucket());
- EXPECT_EQ(tokens, cmd2->getRevertTokens());
-
- auto reply = std::make_shared<RevertReply>(*cmd2);
- set_dummy_bucket_info_reply_fields(*reply);
- auto reply2 = copyReply(reply);
- EXPECT_NO_FATAL_FAILURE(assert_bucket_info_reply_fields_propagated(*reply2));
-}
-
TEST_P(StorageProtocolTest, request_bucket_info) {
{
std::vector<document::BucketId> ids;
diff --git a/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
index ae2385a36d8..ec8afaad86d 100644
--- a/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
+++ b/storage/src/tests/storageserver/changedbucketownershiphandlertest.cpp
@@ -491,12 +491,6 @@ TEST_F(ChangedBucketOwnershipHandlerTest, abort_outdated_remove_command) {
expectChangeAbortsMessage<api::RemoveCommand>(false, getBucketToAllow(), docId, api::Timestamp(1234));
}
-TEST_F(ChangedBucketOwnershipHandlerTest, abort_outdated_revert_command) {
- std::vector<api::Timestamp> timestamps;
- expectChangeAbortsMessage<api::RevertCommand>(true, getBucketToAbort(), timestamps);
- expectChangeAbortsMessage<api::RevertCommand>(false, getBucketToAllow(), timestamps);
-}
-
TEST_F(ChangedBucketOwnershipHandlerTest, ideal_state_abort_updates_metric) {
expectChangeAbortsMessage<api::SplitBucketCommand>(true, getBucketToAbort());
EXPECT_EQ(1, _handler->getMetrics().idealStateOpsAborted.getValue());
diff --git a/storage/src/vespa/storage/common/messagebucket.cpp b/storage/src/vespa/storage/common/messagebucket.cpp
index 286eef39e16..202c4a29fac 100644
--- a/storage/src/vespa/storage/common/messagebucket.cpp
+++ b/storage/src/vespa/storage/common/messagebucket.cpp
@@ -25,8 +25,6 @@ getStorageMessageBucket(const api::StorageMessage& msg)
return static_cast<const api::UpdateCommand&>(msg).getBucket();
case api::MessageType::REMOVE_ID:
return static_cast<const api::RemoveCommand&>(msg).getBucket();
- case api::MessageType::REVERT_ID:
- return static_cast<const api::RevertCommand&>(msg).getBucket();
case api::MessageType::STATBUCKET_ID:
return static_cast<const api::StatBucketCommand&>(msg).getBucket();
case api::MessageType::REMOVELOCATION_ID:
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index d6d21c89b68..ec570820ecd 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -48,12 +48,12 @@ DistributorConfiguration::DistributorConfiguration(StorageComponent& component)
_use_weak_internal_read_consistency_for_client_gets(false),
_enable_metadata_only_fetch_phase_for_inconsistent_updates(false),
_prioritize_global_bucket_merges(true),
- _enable_revert(true),
_implicitly_clear_priority_on_schedule(false),
_use_unordered_merge_chaining(false),
_inhibit_default_merges_when_global_merges_pending(false),
_enable_two_phase_garbage_collection(false),
_enable_condition_probing(false),
+ _enable_operation_cancellation(false),
_minimumReplicaCountingMode(ReplicaCountingMode::TRUSTED)
{
}
@@ -173,12 +173,12 @@ DistributorConfiguration::configure(const vespa::config::content::core::StorDist
_enable_metadata_only_fetch_phase_for_inconsistent_updates = config.enableMetadataOnlyFetchPhaseForInconsistentUpdates;
_prioritize_global_bucket_merges = config.prioritizeGlobalBucketMerges;
_max_activation_inhibited_out_of_sync_groups = config.maxActivationInhibitedOutOfSyncGroups;
- _enable_revert = config.enableRevert;
_implicitly_clear_priority_on_schedule = config.implicitlyClearBucketPriorityOnSchedule;
_use_unordered_merge_chaining = config.useUnorderedMergeChaining;
_inhibit_default_merges_when_global_merges_pending = config.inhibitDefaultMergesWhenGlobalMergesPending;
_enable_two_phase_garbage_collection = config.enableTwoPhaseGarbageCollection;
_enable_condition_probing = config.enableConditionProbing;
+ _enable_operation_cancellation = config.enableOperationCancellation;
_minimumReplicaCountingMode = config.minimumReplicaCountingMode;
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index e3664276518..08d3e2b055f 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -263,9 +263,6 @@ public:
return _max_activation_inhibited_out_of_sync_groups;
}
- bool enable_revert() const noexcept {
- return _enable_revert;
- }
[[nodiscard]] bool implicitly_clear_priority_on_schedule() const noexcept {
return _implicitly_clear_priority_on_schedule;
}
@@ -293,6 +290,12 @@ public:
[[nodiscard]] bool enable_condition_probing() const noexcept {
return _enable_condition_probing;
}
+ void set_enable_operation_cancellation(bool enable) noexcept {
+ _enable_operation_cancellation = enable;
+ }
+ [[nodiscard]] bool enable_operation_cancellation() const noexcept {
+ return _enable_operation_cancellation;
+ }
uint32_t num_distributor_stripes() const noexcept { return _num_distributor_stripes; }
@@ -348,12 +351,12 @@ private:
bool _use_weak_internal_read_consistency_for_client_gets;
bool _enable_metadata_only_fetch_phase_for_inconsistent_updates;
bool _prioritize_global_bucket_merges;
- bool _enable_revert;
bool _implicitly_clear_priority_on_schedule;
bool _use_unordered_merge_chaining;
bool _inhibit_default_merges_when_global_merges_pending;
bool _enable_two_phase_garbage_collection;
bool _enable_condition_probing;
+ bool _enable_operation_cancellation;
DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode;
diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def
index 95461eb5dc2..debbe443b31 100644
--- a/storage/src/vespa/storage/config/stor-distributormanager.def
+++ b/storage/src/vespa/storage/config/stor-distributormanager.def
@@ -313,3 +313,8 @@ enable_two_phase_garbage_collection bool default=true
## replicas will trigger an implicit distributed condition probe to resolve the outcome of
## the condition across all divergent replicas.
enable_condition_probing bool default=true
+
+## If true, changes in the cluster where a subset of the nodes become unavailable or buckets
+## change ownership between distributors will trigger an explicit cancellation of all pending
+## requests partially or fully "invalidated" by such a change.
+enable_operation_cancellation bool default=false
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index c889afcc77c..16a4fb5691f 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_library(storage_distributor OBJECT
activecopy.cpp
blockingoperationstarter.cpp
bucket_db_prune_elision.cpp
+ bucket_ownership_calculator.cpp
bucket_space_distribution_configs.cpp
bucket_space_distribution_context.cpp
bucket_space_state_map.cpp
diff --git a/storage/src/vespa/storage/distributor/bucket_ownership_calculator.cpp b/storage/src/vespa/storage/distributor/bucket_ownership_calculator.cpp
new file mode 100644
index 00000000000..6f94235e548
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_ownership_calculator.cpp
@@ -0,0 +1,41 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "bucket_ownership_calculator.h"
+#include <vespa/document/bucket/bucket.h>
+#include <vespa/vdslib/distribution/distribution.h>
+#include <vespa/vdslib/state/clusterstate.h>
+
+namespace storage::distributor {
+
+namespace {
+
+uint64_t superbucket_from_id(const document::BucketId& id, uint16_t distribution_bits) noexcept {
+ // The n LSBs of the bucket ID contain the superbucket number. Mask off the rest.
+ return id.getRawId() & ~(UINT64_MAX << distribution_bits);
+}
+
+}
+
+bool
+BucketOwnershipCalculator::this_distributor_owns_bucket(const document::BucketId& bucket_id) const
+{
+ // TODO "no distributors available" case is the same for _all_ buckets; cache once in constructor.
+ // TODO "too few bits used" case can be cheaply checked without needing exception
+ try {
+ const auto bits = _state.getDistributionBitCount();
+ const auto this_superbucket = superbucket_from_id(bucket_id, bits);
+ if (_cached_decision_superbucket == this_superbucket) {
+ return _cached_owned;
+ }
+ uint16_t distributor = _distribution.getIdealDistributorNode(_state, bucket_id, "uim");
+ _cached_decision_superbucket = this_superbucket;
+ _cached_owned = (distributor == _this_node_index);
+ return _cached_owned;
+ } catch (lib::TooFewBucketBitsInUseException&) {
+ // Ignore; implicitly not owned
+ } catch (lib::NoDistributorsAvailableException&) {
+ // Ignore; implicitly not owned
+ }
+ return false;
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucket_ownership_calculator.h b/storage/src/vespa/storage/distributor/bucket_ownership_calculator.h
new file mode 100644
index 00000000000..b67bb41e85d
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_ownership_calculator.h
@@ -0,0 +1,44 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <cstdint>
+
+namespace document { class BucketId; }
+
+namespace storage::lib {
+class ClusterState;
+class Distribution;
+}
+
+namespace storage::distributor {
+
+/**
+ * Calculator for determining if a bucket is owned by the current distributor.
+ * Ideal state calculations are cached and reused for all consecutive sub-buckets
+ * under the same super bucket. The cache is invalidated when a new super bucket
+ * is encountered, so it only provides a benefit when invoked in bucket ID order.
+ *
+ * Not thread safe due to internal caching.
+ */
+class BucketOwnershipCalculator {
+ const lib::ClusterState& _state;
+ const lib::Distribution& _distribution;
+ mutable uint64_t _cached_decision_superbucket;
+ const uint16_t _this_node_index;
+ mutable bool _cached_owned;
+public:
+ BucketOwnershipCalculator(const lib::ClusterState& state,
+ const lib::Distribution& distribution,
+ uint16_t this_node_index) noexcept
+ : _state(state),
+ _distribution(distribution),
+ _cached_decision_superbucket(UINT64_MAX),
+ _this_node_index(this_node_index),
+ _cached_owned(false)
+ {
+ }
+
+ [[nodiscard]] bool this_distributor_owns_bucket(const document::BucketId& bucket_id) const;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index ad1cce46bea..ac5cb740361 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -16,6 +16,8 @@
#include <vespa/storage/common/node_identity.h>
#include <vespa/storage/common/nodestateupdater.h>
#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
+#include <vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h>
#include <vespa/storageframework/generic/status/xmlstatusreporter.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/memoryusage.h>
@@ -177,6 +179,12 @@ DistributorStripe::handle_or_enqueue_message(const std::shared_ptr<api::StorageM
return true;
}
+std::shared_ptr<Operation>
+DistributorStripe::maintenance_op_from_message_id(uint64_t msg_id) const noexcept
+{
+ return _maintenanceOperationOwner.find_by_id(msg_id);
+}
+
void
DistributorStripe::handleCompletedMerge(const std::shared_ptr<api::MergeBucketReply>& reply)
{
@@ -210,6 +218,8 @@ DistributorStripe::handleReply(const std::shared_ptr<api::StorageReply>& reply)
bucket.getBucketId() != document::BucketId(0) &&
reply->getAddress())
{
+ // Won't be triggered for replies of cancelled ops since they will be missing
+ // from `_pendingMessageTracker` and thus `bucket` will be zero.
recheckBucketInfo(reply->getAddress()->getIndex(), bucket);
}
@@ -271,18 +281,82 @@ DistributorStripe::getClusterStateBundle() const
}
void
-DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state)
+DistributorStripe::cancel_single_message_by_id_if_found(uint64_t msg_id, const CancelScope& cancel_scope)
{
- lib::ClusterStateBundle oldState = _clusterStateBundle;
- _clusterStateBundle = state;
- propagateClusterStates();
+ // In descending order of likelihood:
+ if (_operationOwner.try_cancel_by_id(msg_id, cancel_scope)) {
+ return;
+ }
+ if (_maintenanceOperationOwner.try_cancel_by_id(msg_id, cancel_scope)) {
+ return;
+ }
+ (void)_bucketDBUpdater.cancel_message_by_id(msg_id);
+}
- const auto& baseline_state = *state.getBaselineClusterState();
- enterRecoveryMode();
+void
+DistributorStripe::handle_node_down_edge_with_cancellations(uint16_t node_index, std::span<const uint64_t> msg_ids)
+{
+ auto cancel_scope = CancelScope::of_node_subset({node_index});
+ for (const auto msg_id : msg_ids) {
+ cancel_single_message_by_id_if_found(msg_id, cancel_scope);
+ }
+}
+
+void
+DistributorStripe::cancel_ops_for_buckets_no_longer_owned(document::BucketSpace bucket_space,
+ const lib::ClusterState& new_state)
+{
+ // Note: we explicitly do not simply reuse the set of buckets removed from the bucket database
+ // when deciding which operations to cancel. This is because that would depend on every candidate
+ // bucket to cancel already being present in the DB, which is hard to guarantee always holds.
+ const auto& distribution = _bucketSpaceRepo->get(bucket_space).getDistribution();
+ BucketOwnershipCalculator ownership_calc(new_state, distribution, getDistributorIndex());
+
+ auto bucket_not_owned_in_new_state = [&](const document::Bucket& bucket) {
+ return !ownership_calc.this_distributor_owns_bucket(bucket.getBucketId());
+ };
+ auto cancel_op_by_msg_id = [&](uint64_t msg_id) {
+ cancel_single_message_by_id_if_found(msg_id, CancelScope::of_fully_cancelled());
+ };
+ _pendingMessageTracker.enumerate_matching_pending_bucket_ops(bucket_not_owned_in_new_state, cancel_op_by_msg_id);
+}
+
+void
+DistributorStripe::cancel_ops_for_unavailable_nodes(const lib::ClusterStateBundle& old_state_bundle,
+ const lib::ClusterStateBundle& new_state_bundle)
+{
+ // TODO we should probably only consider a node as unavailable if it is unavailable in
+ // _all_ bucket spaces. Consider: implicit maintenance mode for global merges (although
+ // that _should_ only be triggered by the CC when the node was already down...).
+ const auto& baseline_state = *new_state_bundle.getBaselineClusterState();
+ const uint16_t old_node_count = old_state_bundle.getBaselineClusterState()->getNodeCount(lib::NodeType::STORAGE);
+ const uint16_t new_node_count = baseline_state.getNodeCount(lib::NodeType::STORAGE);
+ const auto& distribution = _bucketSpaceRepo->get(document::FixedBucketSpaces::default_space()).getDistribution();
+ for (uint16_t i = 0; i < std::max(old_node_count, new_node_count); ++i) {
+ // Handle both the case where a node may be gone from the cluster state and from the config.
+ // These are not atomic, so one may happen before the other.
+ const auto& node_state = baseline_state.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState();
+ const auto* node_group = distribution.getNodeGraph().getGroupForNode(i);
+ if (!node_state.oneOf(storage_node_up_states()) || !node_group) {
+ // Note: this also clears _non-maintenance_ operations from the pending message tracker, but
+ // the client operation mapping (_operationOwner) is _not_ cleared, so replies from the
+ // unavailable node(s) will still be processed as expected.
+ std::vector<uint64_t> msg_ids = _pendingMessageTracker.clearMessagesForNode(i);
+ LOG(debug, "Node %u is unavailable, cancelling %zu pending operations", i, msg_ids.size());
+ handle_node_down_edge_with_cancellations(i, msg_ids);
+ }
+ }
+}
+
+// TODO remove once cancellation support has proven itself worthy of prime time
+void
+DistributorStripe::legacy_erase_ops_for_unavailable_nodes(const lib::ClusterStateBundle& old_state_bundle,
+ const lib::ClusterStateBundle& new_state_bundle)
+{
+ const auto& baseline_state = *new_state_bundle.getBaselineClusterState();
// Clear all active messages on nodes that are down.
- // TODO this should also be done on nodes that are no longer part of the config!
- const uint16_t old_node_count = oldState.getBaselineClusterState()->getNodeCount(lib::NodeType::STORAGE);
+ const uint16_t old_node_count = old_state_bundle.getBaselineClusterState()->getNodeCount(lib::NodeType::STORAGE);
const uint16_t new_node_count = baseline_state.getNodeCount(lib::NodeType::STORAGE);
for (uint16_t i = 0; i < std::max(old_node_count, new_node_count); ++i) {
const auto& node_state = baseline_state.getNodeState(lib::Node(lib::NodeType::STORAGE, i)).getState();
@@ -291,12 +365,28 @@ DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state
LOG(debug, "Node %u is down, clearing %zu pending maintenance operations", i, msgIds.size());
for (const auto & msgId : msgIds) {
- _maintenanceOperationOwner.erase(msgId);
+ (void)_maintenanceOperationOwner.erase(msgId);
}
}
}
}
+void
+DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state)
+{
+ lib::ClusterStateBundle old_state = _clusterStateBundle;
+ _clusterStateBundle = state;
+
+ propagateClusterStates();
+ enterRecoveryMode();
+
+ if (_total_config->enable_operation_cancellation()) {
+ cancel_ops_for_unavailable_nodes(old_state, state);
+ } else {
+ legacy_erase_ops_for_unavailable_nodes(old_state, state);
+ }
+}
+
OperationRoutingSnapshot DistributorStripe::read_snapshot_for_bucket(const document::Bucket& bucket) const {
return _bucketDBUpdater.read_snapshot_for_bucket(bucket);
}
@@ -308,6 +398,10 @@ DistributorStripe::notifyDistributionChangeEnabled()
// Trigger a re-scan of bucket database, just like we do when a new cluster
// state has been enabled.
enterRecoveryMode();
+
+ if (_total_config->enable_operation_cancellation()) {
+ cancel_ops_for_unavailable_nodes(_clusterStateBundle, _clusterStateBundle);
+ }
}
void
@@ -850,6 +944,9 @@ DistributorStripe::remove_superfluous_buckets(document::BucketSpace bucket_space
const lib::ClusterState& new_state,
bool is_distribution_change)
{
+ if (_total_config->enable_operation_cancellation()) {
+ cancel_ops_for_buckets_no_longer_owned(bucket_space, new_state);
+ }
return bucket_db_updater().remove_superfluous_buckets(bucket_space, new_state, is_distribution_change);
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 338a6c72125..566e6ed454a 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -26,6 +26,7 @@
#include <atomic>
#include <mutex>
#include <queue>
+#include <span>
#include <unordered_map>
namespace storage {
@@ -135,6 +136,8 @@ public:
const lib::ClusterStateBundle& getClusterStateBundle() const override;
+ std::shared_ptr<Operation> maintenance_op_from_message_id(uint64_t msg_id) const noexcept override;
+
/**
* Called by bucket db updater after a merge has finished, and all the
* request bucket info operations have been performed as well. Passes the
@@ -251,6 +254,16 @@ private:
void enterRecoveryMode();
void leaveRecoveryMode();
+ void cancel_single_message_by_id_if_found(uint64_t msg_id, const CancelScope& cancel_scope);
+ void handle_node_down_edge_with_cancellations(uint16_t node_index, std::span<const uint64_t> msg_ids);
+ void cancel_ops_for_buckets_no_longer_owned(document::BucketSpace bucket_space, const lib::ClusterState& new_state);
+ // Note: old and new state bundles may be the same if this is called for distribution config changes
+ void cancel_ops_for_unavailable_nodes(const lib::ClusterStateBundle& old_state_bundle,
+ const lib::ClusterStateBundle& new_state_bundle);
+
+ void legacy_erase_ops_for_unavailable_nodes(const lib::ClusterStateBundle& old_state_bundle,
+ const lib::ClusterStateBundle& new_state_bundle);
+
// Tries to generate an operation from the given message. Returns true
// if we either returned an operation, or the message was otherwise handled
// (for instance, wrong distribution).
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
index dfed59499c6..14888de961e 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
@@ -18,6 +18,7 @@ namespace storage::distributor {
class DistributorMetricSet;
class NodeSupportedFeaturesRepo;
class PendingMessageTracker;
+class Operation;
/**
* TODO STRIPE add class comment.
@@ -27,7 +28,7 @@ class DistributorStripeInterface : public DistributorStripeMessageSender
public:
virtual DistributorMetricSet& getMetrics() = 0;
virtual void enableClusterStateBundle(const lib::ClusterStateBundle& state) = 0;
- virtual const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const = 0;
+ [[nodiscard]] virtual const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const = 0;
virtual void notifyDistributionChangeEnabled() = 0;
/**
@@ -57,7 +58,9 @@ public:
/**
* Returns true if the node is currently initializing.
*/
- virtual bool initializing() const = 0;
+ [[nodiscard]] virtual bool initializing() const = 0;
+
+ [[nodiscard]] virtual std::shared_ptr<Operation> maintenance_op_from_message_id(uint64_t msg_id) const noexcept = 0;
virtual void handleCompletedMerge(const std::shared_ptr<api::MergeBucketReply>&) = 0;
virtual const DistributorConfiguration& getConfig() const = 0;
virtual ChainedMessageSender& getMessageSender() = 0;
diff --git a/storage/src/vespa/storage/distributor/messagetracker.h b/storage/src/vespa/storage/distributor/messagetracker.h
index a0234f425a0..92cc921d91c 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.h
+++ b/storage/src/vespa/storage/distributor/messagetracker.h
@@ -25,7 +25,7 @@ public:
uint16_t _target;
};
- MessageTracker(const ClusterContext& cluster_context);
+ explicit MessageTracker(const ClusterContext& cluster_context);
MessageTracker(MessageTracker&&) noexcept = default;
MessageTracker& operator=(MessageTracker&&) noexcept = delete;
MessageTracker(const MessageTracker &) = delete;
diff --git a/storage/src/vespa/storage/distributor/operationowner.cpp b/storage/src/vespa/storage/distributor/operationowner.cpp
index c92544c8cb5..16bbc36e4bc 100644
--- a/storage/src/vespa/storage/distributor/operationowner.cpp
+++ b/storage/src/vespa/storage/distributor/operationowner.cpp
@@ -70,10 +70,27 @@ OperationOwner::onClose()
}
}
-void
+std::shared_ptr<Operation>
+OperationOwner::find_by_id(api::StorageMessage::Id msg_id) const noexcept
+{
+ return _sentMessageMap.find_by_id_or_empty(msg_id);
+}
+
+bool
+OperationOwner::try_cancel_by_id(api::StorageMessage::Id id, const CancelScope& cancel_scope)
+{
+ auto* op = _sentMessageMap.find_by_id_or_nullptr(id);
+ if (!op) {
+ return false;
+ }
+ op->cancel(_sender, cancel_scope);
+ return true;
+}
+
+std::shared_ptr<Operation>
OperationOwner::erase(api::StorageMessage::Id msgId)
{
- (void)_sentMessageMap.pop(msgId);
+ return _sentMessageMap.pop(msgId);
}
}
diff --git a/storage/src/vespa/storage/distributor/operationowner.h b/storage/src/vespa/storage/distributor/operationowner.h
index 009bb5b80aa..828d776f1a6 100644
--- a/storage/src/vespa/storage/distributor/operationowner.h
+++ b/storage/src/vespa/storage/distributor/operationowner.h
@@ -9,6 +9,7 @@ namespace storage::framework { struct Clock; }
namespace storage::distributor {
+class CancelScope;
class Operation;
/**
@@ -87,10 +88,19 @@ public:
bool start(const std::shared_ptr<Operation>& operation, Priority priority) override;
/**
- If the given message exists, create a reply and pass it to the
- appropriate callback.
+ * If the given message exists, remove it from the internal operation mapping.
+ *
+ * Returns the operation the message belonged to, if any.
*/
- void erase(api::StorageMessage::Id msgId);
+ [[nodiscard]] std::shared_ptr<Operation> erase(api::StorageMessage::Id msgId);
+
+ /**
+ * Returns a strong ref to the pending operation with the given msg_id if it exists.
+ * Otherwise returns an empty shared_ptr.
+ */
+ [[nodiscard]] std::shared_ptr<Operation> find_by_id(api::StorageMessage::Id msg_id) const noexcept;
+
+ [[nodiscard]] bool try_cancel_by_id(api::StorageMessage::Id msg_id, const CancelScope& cancel_scope);
[[nodiscard]] DistributorStripeMessageSender& sender() noexcept { return _sender; }
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
index bd7f3709575..03be507f467 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
@@ -178,6 +178,8 @@ void CheckCondition::handle_internal_get_operation_reply(std::shared_ptr<api::St
if (_bucket_space.has_pending_cluster_state()) {
state_version_now = _bucket_space.get_pending_cluster_state().getVersion();
}
+ // TODO disable these explicit (and possibly costly) checks when cancellation is enabled,
+ // as cancellation shall cover a superset of the cases that can be detected here.
if ((state_version_now != _cluster_state_version_at_creation_time)
&& (replica_set_changed_after_get_operation()
|| distributor_no_longer_owns_bucket()))
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index c7f858de608..e4defda2bb0 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -28,8 +28,7 @@ PutOperation::PutOperation(const DistributorNodeContext& node_ctx,
PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencing_handle)
: SequencedOperation(std::move(sequencing_handle)),
- _tracker_instance(metric, std::make_shared<api::PutReply>(*msg), node_ctx, op_ctx, msg->getTimestamp()),
- _tracker(_tracker_instance),
+ _tracker(metric, std::make_shared<api::PutReply>(*msg), node_ctx, op_ctx, _cancel_scope),
_msg(std::move(msg)),
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
@@ -253,7 +252,6 @@ PutOperation::on_cancel(DistributorStripeMessageSender& sender, const CancelScop
if (_check_condition) {
_check_condition->cancel(sender, cancel_scope);
}
- _tracker.cancel(cancel_scope);
}
bool
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index 8b8e3e15375..4d26ffda61e 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -39,8 +39,7 @@ public:
void onClose(DistributorStripeMessageSender& sender) override;
private:
- PersistenceMessageTrackerImpl _tracker_instance;
- PersistenceMessageTracker& _tracker;
+ PersistenceMessageTracker _tracker;
std::shared_ptr<api::PutCommand> _msg;
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index 5f52a8208fc..07112add6e3 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -23,12 +23,7 @@ RemoveLocationOperation::RemoveLocationOperation(
std::shared_ptr<api::RemoveLocationCommand> msg,
PersistenceOperationMetricSet& metric)
: Operation(),
- _trackerInstance(metric,
- std::make_shared<api::RemoveLocationReply>(*msg),
- node_ctx,
- op_ctx,
- 0),
- _tracker(_trackerInstance),
+ _tracker(metric, std::make_shared<api::RemoveLocationReply>(*msg), node_ctx, op_ctx, _cancel_scope),
_msg(std::move(msg)),
_node_ctx(node_ctx),
_parser(parser),
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
index d177676ff03..1ac4af0997a 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.h
@@ -10,8 +10,7 @@ namespace storage::distributor {
class DistributorBucketSpace;
-class RemoveLocationOperation : public Operation
-{
+class RemoveLocationOperation : public Operation {
public:
RemoveLocationOperation(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
@@ -32,14 +31,11 @@ public:
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply>&) override;
void onClose(DistributorStripeMessageSender& sender) override;
private:
- PersistenceMessageTrackerImpl _trackerInstance;
- PersistenceMessageTracker& _tracker;
-
+ PersistenceMessageTracker _tracker;
std::shared_ptr<api::RemoveLocationCommand> _msg;
-
- const DistributorNodeContext& _node_ctx;
- const DocumentSelectionParser& _parser;
- DistributorBucketSpace &_bucketSpace;
+ const DistributorNodeContext& _node_ctx;
+ const DocumentSelectionParser& _parser;
+ DistributorBucketSpace& _bucketSpace;
};
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index be43aac3d9e..7b38f8ca21e 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -19,10 +19,7 @@ RemoveOperation::RemoveOperation(const DistributorNodeContext& node_ctx,
PersistenceOperationMetricSet& condition_probe_metrics,
SequencingHandle sequencingHandle)
: SequencedOperation(std::move(sequencingHandle)),
- _tracker_instance(metric,
- std::make_shared<api::RemoveReply>(*msg),
- node_ctx, op_ctx, msg->getTimestamp()),
- _tracker(_tracker_instance),
+ _tracker(metric, std::make_shared<api::RemoveReply>(*msg), node_ctx, op_ctx, _cancel_scope),
_msg(std::move(msg)),
_doc_id_bucket_id(document::BucketIdFactory{}.getBucketId(_msg->getDocumentId())),
_node_ctx(node_ctx),
@@ -168,7 +165,6 @@ RemoveOperation::on_cancel(DistributorStripeMessageSender& sender, const CancelS
if (_check_condition) {
_check_condition->cancel(sender, cancel_scope);
}
- _tracker.cancel(cancel_scope);
}
bool RemoveOperation::has_condition() const noexcept {
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index 221def81fdc..772047b96ca 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -32,8 +32,7 @@ public:
void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
private:
- PersistenceMessageTrackerImpl _tracker_instance;
- PersistenceMessageTracker& _tracker;
+ PersistenceMessageTracker _tracker;
std::shared_ptr<api::RemoveCommand> _msg;
document::BucketId _doc_id_bucket_id;
const DistributorNodeContext& _node_ctx;
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
index 60bddebbb89..6c80a192ab3 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
@@ -25,9 +25,7 @@ UpdateOperation::UpdateOperation(const DistributorNodeContext& node_ctx,
std::vector<BucketDatabase::Entry> entries,
UpdateMetricSet& metric)
: Operation(),
- _trackerInstance(metric, std::make_shared<api::UpdateReply>(*msg),
- node_ctx, op_ctx, msg->getTimestamp()),
- _tracker(_trackerInstance),
+ _tracker(metric, std::make_shared<api::UpdateReply>(*msg), node_ctx, op_ctx, _cancel_scope),
_msg(msg),
_entries(std::move(entries)),
_new_timestamp(_msg->getTimestamp()),
@@ -207,13 +205,6 @@ UpdateOperation::onClose(DistributorStripeMessageSender& sender)
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
-void
-UpdateOperation::on_cancel(DistributorStripeMessageSender&, const CancelScope& cancel_scope)
-{
- _tracker.cancel(cancel_scope);
-}
-
-
// The backend behavior of "create-if-missing" updates is to return the timestamp of the
// _new_ update operation if the document was created from scratch. The two-phase update
// operation logic auto-detects unexpected inconsistencies and tries to reconcile
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
index 7d2131d426d..750e50aeae5 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
@@ -31,25 +31,22 @@ public:
std::string getStatus() const override { return ""; };
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
void onClose(DistributorStripeMessageSender& sender) override;
- void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
std::pair<document::BucketId, uint16_t> getNewestTimestampLocation() const {
return _newestTimestampLocation;
}
private:
- PersistenceMessageTrackerImpl _trackerInstance;
- PersistenceMessageTracker& _tracker;
- std::shared_ptr<api::UpdateCommand> _msg;
- std::vector<BucketDatabase::Entry> _entries;
- const api::Timestamp _new_timestamp;
- const bool _is_auto_create_update;
-
- const DistributorNodeContext& _node_ctx;
- DistributorStripeOperationContext& _op_ctx;
- DistributorBucketSpace &_bucketSpace;
+ PersistenceMessageTracker _tracker;
+ std::shared_ptr<api::UpdateCommand> _msg;
+ std::vector<BucketDatabase::Entry> _entries;
+ const api::Timestamp _new_timestamp;
+ const bool _is_auto_create_update;
+ const DistributorNodeContext& _node_ctx;
+ DistributorStripeOperationContext& _op_ctx;
+ DistributorBucketSpace& _bucketSpace;
std::pair<document::BucketId, uint16_t> _newestTimestampLocation;
- api::BucketInfo _infoAtSendTime; // Should be same across all replicas
+ api::BucketInfo _infoAtSendTime; // Should be same across all replicas
bool anyStorageNodesAvailable() const;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
index bf64fa2eb82..e384163f421 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
@@ -23,7 +23,6 @@ GarbageCollectionOperation::GarbageCollectionOperation(const ClusterContext& clu
_cluster_state_version_at_phase1_start_time(0),
_remove_candidates(),
_replica_info(),
- _cancel_scope(),
_max_documents_removed(0),
_is_done(false)
{}
@@ -150,10 +149,6 @@ GarbageCollectionOperation::onReceive(DistributorStripeMessageSender& sender,
}
}
-void GarbageCollectionOperation::on_cancel(DistributorStripeMessageSender&, const CancelScope& cancel_scope) {
- _cancel_scope.merge(cancel_scope);
-}
-
void GarbageCollectionOperation::update_replica_response_info_from_reply(uint16_t from_node, const api::RemoveLocationReply& reply) {
_replica_info.emplace_back(_manager->operation_context().generate_unique_timestamp(),
from_node, reply.getBucketInfo());
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
index 97efbe694de..d5c6d655857 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
@@ -7,7 +7,6 @@
#include <vespa/storage/bucketdb/bucketcopy.h>
#include <vespa/storage/distributor/messagetracker.h>
#include <vespa/storage/distributor/operation_sequencer.h>
-#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/vespalib/stllike/hash_map.h>
#include <vector>
@@ -23,7 +22,6 @@ public:
void onStart(DistributorStripeMessageSender& sender) override;
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
- void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
const char* getName() const noexcept override { return "garbagecollection"; };
Type getType() const noexcept override { return GARBAGE_COLLECTION; }
bool shouldBlockThisOperation(uint32_t, uint16_t, uint8_t) const override;
@@ -56,7 +54,6 @@ private:
RemoveCandidates _remove_candidates;
std::vector<SequencingHandle> _gc_write_locks;
std::vector<BucketCopy> _replica_info;
- CancelScope _cancel_scope;
uint32_t _max_documents_removed;
bool _is_done;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
index a69b7739e07..09082e718e0 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
@@ -28,6 +28,7 @@ IdealStateOperation::IdealStateOperation(const BucketAndNodes& bucketAndNodes)
: _manager(nullptr),
_bucketSpace(nullptr),
_bucketAndNodes(bucketAndNodes),
+ _detailedReason(),
_priority(255),
_ok(true)
{
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
index 6c52bdb738d..ba4a2f95686 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
@@ -2,6 +2,7 @@
#pragma once
#include <vespa/storage/distributor/maintenance/maintenanceoperation.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/storageapi/messageapi/storagemessage.h>
#include <vespa/storageapi/messageapi/storagereply.h>
#include <vespa/storageapi/messageapi/maintenancecommand.h>
@@ -110,7 +111,7 @@ public:
using Vector = std::vector<SP>;
using Map = std::map<document::BucketId, SP>;
- IdealStateOperation(const BucketAndNodes& bucketAndNodes);
+ explicit IdealStateOperation(const BucketAndNodes& bucketAndNodes);
~IdealStateOperation() override;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index 616c4962dca..6153306861c 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -93,14 +93,18 @@ JoinOperation::enqueueJoinMessagePerTargetNode(
void
JoinOperation::onReceive(DistributorStripeMessageSender&, const api::StorageReply::SP& msg)
{
- auto& rep = static_cast<api::JoinBucketsReply&>(*msg);
+ auto& rep = dynamic_cast<api::JoinBucketsReply&>(*msg);
uint16_t node = _tracker.handleReply(rep);
if (node == 0xffff) {
LOG(debug, "Ignored reply since node was max uint16_t for unknown reasons");
return;
}
- if (rep.getResult().success()) {
+ _ok = rep.getResult().success();
+ if (_cancel_scope.node_is_cancelled(node)) {
+ LOG(debug, "Join operation for %s has been cancelled", getBucketId().toString().c_str());
+ _ok = false;
+ } else if (rep.getResult().success()) {
const std::vector<document::BucketId>& sourceBuckets(
rep.getSourceBuckets());
for (auto bucket : sourceBuckets) {
@@ -133,7 +137,6 @@ JoinOperation::onReceive(DistributorStripeMessageSender&, const api::StorageRepl
LOG(debug, "Join failed for %s with non-critical failure: %s",
getBucketId().toString().c_str(), rep.getResult().toString().c_str());
}
- _ok = rep.getResult().success();
LOG(debug, "Bucket %s join finished", getBucketId().toString().c_str());
if (_tracker.finished()) {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index 9469403daae..0a11a8233aa 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -177,7 +177,7 @@ MergeOperation::sourceOnlyCopyChangedDuringMerge(
const BucketDatabase::Entry& currentState) const
{
assert(currentState.valid());
- for (const auto & mnode : _mnodes) {
+ for (const auto& mnode : _mnodes) {
const BucketCopy* copyBefore(_infoBefore.getNode(mnode.index));
if (!copyBefore) {
continue;
@@ -205,7 +205,7 @@ MergeOperation::deleteSourceOnlyNodes(
{
assert(currentState.valid());
std::vector<uint16_t> sourceOnlyNodes;
- for (const auto & mnode : _mnodes) {
+ for (const auto& mnode : _mnodes) {
const uint16_t nodeIndex = mnode.index;
const BucketCopy* copy = currentState->getNode(nodeIndex);
if (!copy) {
@@ -272,7 +272,14 @@ MergeOperation::onReceive(DistributorStripeMessageSender& sender, const std::sha
api::ReturnCode result = reply.getResult();
_ok = result.success();
- if (_ok) {
+ // We avoid replica deletion entirely if _any_ aspect of the merge has been cancelled.
+ // It is, for instance, possible that a node that was previously considered source-only
+ // now is part of the #redundancy ideal copies because another node became unavailable.
+ // Leave it up to the maintenance state checkers to figure this out.
+ if (_cancel_scope.is_cancelled()) {
+ LOG(debug, "Merge operation for %s has been cancelled", getBucketId().toString().c_str());
+ _ok = false;
+ } else if (_ok) {
BucketDatabase::Entry entry(_bucketSpace->getBucketDatabase().get(getBucketId()));
if (!entry.valid()) {
LOG(debug, "Bucket %s no longer exists after merge", getBucketId().toString().c_str());
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
index 41767f0e3af..2184739f82c 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
@@ -59,13 +59,14 @@ RemoveBucketOperation::onReceiveInternal(const std::shared_ptr<api::StorageReply
{
auto* rep = dynamic_cast<api::DeleteBucketReply*>(msg.get());
- uint16_t node = _tracker.handleReply(*rep);
+ const uint16_t node = _tracker.handleReply(*rep);
- LOG(debug, "Got DeleteBucket reply for %s from node %u",
- getBucketId().toString().c_str(),
- node);
+ LOG(debug, "Got DeleteBucket reply for %s from node %u", getBucketId().toString().c_str(), node);
- if (rep->getResult().failed()) {
+ if (_cancel_scope.node_is_cancelled(node)) {
+ LOG(debug, "DeleteBucket operation for %s has been cancelled", getBucketId().toString().c_str());
+ _ok = false;
+ } else if (rep->getResult().failed()) {
if (rep->getResult().getResult() == api::ReturnCode::REJECTED
&& rep->getBucketInfo().valid())
{
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
index 531f7f64b68..5ddf082a544 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
@@ -69,13 +69,17 @@ void
SetBucketStateOperation::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>& reply)
{
- auto& rep = static_cast<api::SetBucketStateReply&>(*reply);
+ auto& rep = dynamic_cast<api::SetBucketStateReply&>(*reply);
const uint16_t node = _tracker.handleReply(rep);
LOG(debug, "Got %s from node %u", reply->toString(true).c_str(), node);
bool deactivate = false;
- if (reply->getResult().success()) {
+
+ if (_cancel_scope.node_is_cancelled(node)) {
+ LOG(debug, "SetBucketState for %s has been cancelled", rep.getBucketId().toString().c_str());
+ _ok = false;
+ } else if (reply->getResult().success()) {
BucketDatabase::Entry entry = _bucketSpace->getBucketDatabase().get(rep.getBucketId());
if (entry.valid()) {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index d704a42e96b..c894deeecd8 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -50,7 +50,7 @@ SplitOperation::onStart(DistributorStripeMessageSender& sender)
void
SplitOperation::onReceive(DistributorStripeMessageSender&, const api::StorageReply::SP& msg)
{
- auto & rep = static_cast<api::SplitBucketReply&>(*msg);
+ auto& rep = dynamic_cast<api::SplitBucketReply&>(*msg);
uint16_t node = _tracker.handleReply(rep);
@@ -61,7 +61,9 @@ SplitOperation::onReceive(DistributorStripeMessageSender&, const api::StorageRep
std::ostringstream ost;
- if (rep.getResult().success()) {
+ if (_cancel_scope.node_is_cancelled(node)) {
+ _ok = false;
+ } else if (rep.getResult().success()) {
BucketDatabase::Entry entry = _bucketSpace->getBucketDatabase().get(rep.getBucketId());
if (entry.valid()) {
diff --git a/storage/src/vespa/storage/distributor/operations/operation.cpp b/storage/src/vespa/storage/distributor/operations/operation.cpp
index 9f944a94178..f60dc8eecff 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/operation.cpp
@@ -13,7 +13,7 @@ namespace storage::distributor {
Operation::Operation()
: _startTime(),
- _cancelled(false)
+ _cancel_scope()
{
}
@@ -47,7 +47,7 @@ Operation::copyMessageSettings(const api::StorageCommand& source, api::StorageCo
}
void Operation::cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) {
- _cancelled = true;
+ _cancel_scope.merge(cancel_scope);
on_cancel(sender, cancel_scope);
}
diff --git a/storage/src/vespa/storage/distributor/operations/operation.h b/storage/src/vespa/storage/distributor/operations/operation.h
index 64caacfc642..c742f918c30 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.h
+++ b/storage/src/vespa/storage/distributor/operations/operation.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "cancel_scope.h"
#include <vespa/vdslib/state/nodetype.h>
#include <vespa/storage/distributor/distributormessagesender.h>
#include <vespa/vespalib/util/time.h>
@@ -68,13 +69,15 @@ public:
*/
void cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope);
+ [[nodiscard]] const CancelScope& cancel_scope() const noexcept { return _cancel_scope; }
+
/**
* Whether cancel() has been invoked at least once on this instance. This does not
* distinguish between cancellations caused by ownership transfers and those caused
* by nodes becoming unavailable; Operation implementations that care about this need
- * to implement cancel() themselves and inspect the provided CancelScope.
+ * to inspect cancel_scope() themselves.
*/
- [[nodiscard]] bool is_cancelled() const noexcept { return _cancelled; }
+ [[nodiscard]] bool is_cancelled() const noexcept { return _cancel_scope.is_cancelled(); }
/**
* Returns true if we are blocked to start this operation given
@@ -118,7 +121,7 @@ protected:
static constexpr vespalib::duration MAX_TIMEOUT = 3600s;
vespalib::system_time _startTime;
- bool _cancelled;
+ CancelScope _cancel_scope;
};
}
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index 498f3a5feab..a0c4d6786f6 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -13,22 +13,21 @@ LOG_SETUP(".persistencemessagetracker");
namespace storage::distributor {
-PersistenceMessageTrackerImpl::PersistenceMessageTrackerImpl(
+PersistenceMessageTracker::PersistenceMessageTracker(
PersistenceOperationMetricSet& metric,
std::shared_ptr<api::BucketInfoReply> reply,
const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
- api::Timestamp revertTimestamp)
+ CancelScope& cancel_scope)
: MessageTracker(node_ctx),
_remapBucketInfo(),
_bucketInfo(),
_metric(metric),
_reply(std::move(reply)),
_op_ctx(op_ctx),
- _revertTimestamp(revertTimestamp),
_trace(_reply->getTrace().getLevel()),
_requestTimer(node_ctx.clock()),
- _cancel_scope(),
+ _cancel_scope(cancel_scope),
_n_persistence_replies_total(0),
_n_successful_persistence_replies(0),
_priority(_reply->getPriority()),
@@ -36,33 +35,35 @@ PersistenceMessageTrackerImpl::PersistenceMessageTrackerImpl(
{
}
-PersistenceMessageTrackerImpl::~PersistenceMessageTrackerImpl() = default;
+PersistenceMessageTracker::~PersistenceMessageTracker() = default;
-void
-PersistenceMessageTrackerImpl::cancel(const CancelScope& cancel_scope)
-{
- _cancel_scope.merge(cancel_scope);
-}
-
-void
-PersistenceMessageTrackerImpl::prune_cancelled_nodes_if_present(
+PersistenceMessageTracker::PostPruningStatus
+PersistenceMessageTracker::prune_cancelled_nodes_if_present(
BucketInfoMap& bucket_and_replicas,
const CancelScope& cancel_scope)
{
+ bool any_replicas = false;
for (auto& info : bucket_and_replicas) {
info.second = prune_cancelled_nodes(info.second, cancel_scope);
+ any_replicas |= !info.second.empty();
}
+ return (any_replicas ? PostPruningStatus ::ReplicasStillPresent
+ : PostPruningStatus::NoReplicasPresent);
}
void
-PersistenceMessageTrackerImpl::updateDB()
+PersistenceMessageTracker::updateDB()
{
if (_cancel_scope.is_cancelled()) {
if (_cancel_scope.fully_cancelled()) {
return; // Fully cancelled ops cannot mutate the DB at all
}
- prune_cancelled_nodes_if_present(_bucketInfo, _cancel_scope);
- prune_cancelled_nodes_if_present(_remapBucketInfo, _cancel_scope);
+ const bool any_infos = still_has_replicas(prune_cancelled_nodes_if_present(_bucketInfo, _cancel_scope));
+ const bool any_remapped = still_has_replicas(prune_cancelled_nodes_if_present(_remapBucketInfo, _cancel_scope));
+ if (!(any_infos || any_remapped)) {
+ LOG(spam, "No usable bucket info left after pruning; returning without updating DB");
+ return;
+ }
}
for (const auto & entry : _bucketInfo) {
@@ -75,7 +76,7 @@ PersistenceMessageTrackerImpl::updateDB()
}
void
-PersistenceMessageTrackerImpl::updateMetrics()
+PersistenceMessageTracker::updateMetrics()
{
const api::ReturnCode& result(_reply->getResult());
_metric.updateFromResult(result);
@@ -83,7 +84,7 @@ PersistenceMessageTrackerImpl::updateMetrics()
}
void
-PersistenceMessageTrackerImpl::fail(MessageSender& sender, const api::ReturnCode& result) {
+PersistenceMessageTracker::fail(MessageSender& sender, const api::ReturnCode& result) {
if (_reply.get()) {
_reply->setResult(result);
updateMetrics();
@@ -94,7 +95,7 @@ PersistenceMessageTrackerImpl::fail(MessageSender& sender, const api::ReturnCode
}
uint16_t
-PersistenceMessageTrackerImpl::receiveReply(MessageSender& sender, api::BucketInfoReply& reply)
+PersistenceMessageTracker::receiveReply(MessageSender& sender, api::BucketInfoReply& reply)
{
uint16_t node = handleReply(reply);
@@ -106,27 +107,7 @@ PersistenceMessageTrackerImpl::receiveReply(MessageSender& sender, api::BucketIn
}
void
-PersistenceMessageTrackerImpl::revert(MessageSender& sender, const std::vector<BucketNodePair>& revertNodes)
-{
- if (_revertTimestamp != 0) {
- // Since we're reverting, all received bucket info is voided.
- _bucketInfo.clear();
-
- std::vector<api::Timestamp> reverts;
- reverts.push_back(_revertTimestamp);
-
- for (const auto & revertNode : revertNodes) {
- auto toRevert = std::make_shared<api::RevertCommand>(revertNode.first, reverts);
- toRevert->setPriority(_priority);
- queueCommand(std::move(toRevert), revertNode.second);
- }
-
- flushQueue(sender);
- }
-}
-
-void
-PersistenceMessageTrackerImpl::queueMessageBatch(std::vector<MessageTracker::ToSend> messages) {
+PersistenceMessageTracker::queueMessageBatch(std::vector<MessageTracker::ToSend> messages) {
_messageBatches.emplace_back();
auto & batch = _messageBatches.back();
batch.reserve(messages.size());
@@ -142,7 +123,7 @@ PersistenceMessageTrackerImpl::queueMessageBatch(std::vector<MessageTracker::ToS
}
bool
-PersistenceMessageTrackerImpl::canSendReplyEarly() const
+PersistenceMessageTracker::canSendReplyEarly() const
{
if (!_reply.get() || !_reply->getResult().success()) {
LOG(spam, "Can't return early because we have already replied or failed");
@@ -181,7 +162,7 @@ PersistenceMessageTrackerImpl::canSendReplyEarly() const
}
void
-PersistenceMessageTrackerImpl::addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply)
+PersistenceMessageTracker::addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply)
{
document::Bucket bucket(reply.getBucket());
const api::BucketInfo& bucketInfo(reply.getBucketInfo());
@@ -198,7 +179,7 @@ PersistenceMessageTrackerImpl::addBucketInfoFromReply(uint16_t node, const api::
}
void
-PersistenceMessageTrackerImpl::logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const
+PersistenceMessageTracker::logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const
{
LOG(spam, "Bucket %s: Received successful reply %s",
reply.getBucketId().toString().c_str(), reply.toString().c_str());
@@ -210,27 +191,20 @@ PersistenceMessageTrackerImpl::logSuccessfulReply(uint16_t node, const api::Buck
}
}
-bool
-PersistenceMessageTrackerImpl::shouldRevert() const
-{
- return _op_ctx.distributor_config().enable_revert()
- && !_revertNodes.empty() && !_success && _reply;
-}
-
-bool PersistenceMessageTrackerImpl::has_majority_successful_replies() const noexcept {
+bool PersistenceMessageTracker::has_majority_successful_replies() const noexcept {
// FIXME this has questionable interaction with early client ACK since we only count
// the number of observed replies rather than the number of total requests sent.
// ... but the early ACK-feature dearly needs a redesign anyway.
return (_n_successful_persistence_replies >= (_n_persistence_replies_total / 2 + 1));
}
-bool PersistenceMessageTrackerImpl::has_minority_test_and_set_failure() const noexcept {
+bool PersistenceMessageTracker::has_minority_test_and_set_failure() const noexcept {
return ((_reply->getResult().getResult() == api::ReturnCode::TEST_AND_SET_CONDITION_FAILED)
&& has_majority_successful_replies());
}
void
-PersistenceMessageTrackerImpl::sendReply(MessageSender& sender)
+PersistenceMessageTracker::sendReply(MessageSender& sender)
{
// If we've observed _partial_ TaS failures but have had a majority of good ACKs,
// treat the reply as successful. This is because the ACKed write(s) will eventually
@@ -247,7 +221,7 @@ PersistenceMessageTrackerImpl::sendReply(MessageSender& sender)
}
void
-PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& reply)
+PersistenceMessageTracker::updateFailureResult(const api::BucketInfoReply& reply)
{
LOG(debug, "Bucket %s: Received failed reply %s with result %s",
reply.getBucketId().toString().c_str(), reply.toString().c_str(), reply.getResult().toString().c_str());
@@ -259,13 +233,13 @@ PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& r
}
bool
-PersistenceMessageTrackerImpl::node_is_effectively_cancelled(uint16_t node) const noexcept
+PersistenceMessageTracker::node_is_effectively_cancelled(uint16_t node) const noexcept
{
return _cancel_scope.node_is_cancelled(node); // Implicitly covers the fully cancelled case
}
void
-PersistenceMessageTrackerImpl::handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node)
+PersistenceMessageTracker::handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node)
{
LOG(spam, "Received CreateBucket reply for %s from node %u", reply.getBucketId().toString().c_str(), node);
if (!reply.getResult().success()
@@ -285,7 +259,7 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply(api::BucketInfoReply& rep
}
void
-PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node)
+PersistenceMessageTracker::handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node)
{
++_n_persistence_replies_total;
if (reply.getBucketInfo().valid()) {
@@ -293,7 +267,6 @@ PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& repl
}
if (reply.getResult().success()) {
logSuccessfulReply(node, reply);
- _revertNodes.emplace_back(reply.getBucket(), node);
++_n_successful_persistence_replies;
} else if (!hasSentReply()) {
updateFailureResult(reply);
@@ -301,7 +274,7 @@ PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& repl
}
void
-PersistenceMessageTrackerImpl::transfer_trace_state_to_reply()
+PersistenceMessageTracker::transfer_trace_state_to_reply()
{
if (!_trace.isEmpty()) {
_trace.setStrict(false);
@@ -310,7 +283,7 @@ PersistenceMessageTrackerImpl::transfer_trace_state_to_reply()
}
void
-PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node)
+PersistenceMessageTracker::updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node)
{
_trace.addChild(reply.steal_trace());
@@ -321,16 +294,11 @@ PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::Bucke
}
if (finished()) {
- bool doRevert(shouldRevert());
-
updateDB();
if (!hasSentReply()) {
sendReply(sender);
}
- if (doRevert) {
- revert(sender, _revertNodes);
- }
} else if (canSendReplyEarly()) {
LOG(debug, "Sending reply early because initial redundancy has been reached");
sendReply(sender);
@@ -338,7 +306,7 @@ PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::Bucke
}
void
-PersistenceMessageTrackerImpl::add_trace_tree_to_reply(vespalib::Trace trace)
+PersistenceMessageTracker::add_trace_tree_to_reply(vespalib::Trace trace)
{
_trace.addChild(std::move(trace));
}
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.h b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
index 8c44d70062c..00e97b12a94 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.h
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
@@ -11,51 +11,28 @@
namespace storage::distributor {
-struct PersistenceMessageTracker {
- virtual ~PersistenceMessageTracker() = default;
- using ToSend = MessageTracker::ToSend;
-
- virtual void cancel(const CancelScope& cancel_scope) = 0;
- virtual void fail(MessageSender&, const api::ReturnCode&) = 0;
- virtual void queueMessageBatch(std::vector<ToSend> messages) = 0;
- virtual uint16_t receiveReply(MessageSender&, api::BucketInfoReply&) = 0;
- virtual std::shared_ptr<api::BucketInfoReply>& getReply() = 0;
- virtual void updateFromReply(MessageSender&, api::BucketInfoReply&, uint16_t node) = 0;
- virtual void queueCommand(api::BucketCommand::SP, uint16_t target) = 0;
- virtual void flushQueue(MessageSender&) = 0;
- virtual uint16_t handleReply(api::BucketReply& reply) = 0;
- virtual void add_trace_tree_to_reply(vespalib::Trace trace) = 0;
-};
-
-class PersistenceMessageTrackerImpl final
- : public PersistenceMessageTracker,
- public MessageTracker
-{
+class PersistenceMessageTracker final : public MessageTracker {
public:
- PersistenceMessageTrackerImpl(PersistenceOperationMetricSet& metric,
- std::shared_ptr<api::BucketInfoReply> reply,
- const DistributorNodeContext& node_ctx,
- DistributorStripeOperationContext& op_ctx,
- api::Timestamp revertTimestamp = 0);
- ~PersistenceMessageTrackerImpl() override;
+ using ToSend = MessageTracker::ToSend;
- void cancel(const CancelScope& cancel_scope) override;
+ PersistenceMessageTracker(PersistenceOperationMetricSet& metric,
+ std::shared_ptr<api::BucketInfoReply> reply,
+ const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ CancelScope& cancel_scope);
+ ~PersistenceMessageTracker();
void updateDB();
void updateMetrics();
[[nodiscard]] bool success() const noexcept { return _success; }
- void fail(MessageSender& sender, const api::ReturnCode& result) override;
+ void fail(MessageSender& sender, const api::ReturnCode& result);
/**
Returns the node the reply was from.
*/
- uint16_t receiveReply(MessageSender& sender, api::BucketInfoReply& reply) override;
- void updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node) override;
- std::shared_ptr<api::BucketInfoReply>& getReply() override { return _reply; }
-
- using BucketNodePair = std::pair<document::Bucket, uint16_t>;
-
- void revert(MessageSender& sender, const std::vector<BucketNodePair>& revertNodes);
+ uint16_t receiveReply(MessageSender& sender, api::BucketInfoReply& reply);
+ void updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node);
+ std::shared_ptr<api::BucketInfoReply>& getReply() { return _reply; }
/**
Sends a set of messages that are permissible for early return.
@@ -63,7 +40,9 @@ public:
have at most (messages.size() - initial redundancy) messages left in the
queue and have it's first message be done.
*/
- void queueMessageBatch(std::vector<MessageTracker::ToSend> messages) override;
+ void queueMessageBatch(std::vector<MessageTracker::ToSend> messages);
+
+ void add_trace_tree_to_reply(vespalib::Trace trace);
private:
using MessageBatch = std::vector<uint64_t>;
@@ -75,23 +54,31 @@ private:
PersistenceOperationMetricSet& _metric;
std::shared_ptr<api::BucketInfoReply> _reply;
DistributorStripeOperationContext& _op_ctx;
- api::Timestamp _revertTimestamp;
- std::vector<BucketNodePair> _revertNodes;
mbus::Trace _trace;
framework::MilliSecTimer _requestTimer;
- CancelScope _cancel_scope;
+ CancelScope& _cancel_scope;
uint32_t _n_persistence_replies_total;
uint32_t _n_successful_persistence_replies;
uint8_t _priority;
bool _success;
- static void prune_cancelled_nodes_if_present(BucketInfoMap& bucket_and_replicas,
- const CancelScope& cancel_scope);
+ enum class PostPruningStatus {
+ ReplicasStillPresent,
+ NoReplicasPresent
+ };
+
+ constexpr static bool still_has_replicas(PostPruningStatus status) {
+ return status == PostPruningStatus::ReplicasStillPresent;
+ }
+
+ // Returns ReplicasStillPresent iff `bucket_and_replicas` has at least 1 usable entry after pruning,
+ // otherwise returns NoReplicasPresent
+ [[nodiscard]] static PostPruningStatus prune_cancelled_nodes_if_present(BucketInfoMap& bucket_and_replicas,
+ const CancelScope& cancel_scope);
[[nodiscard]] bool canSendReplyEarly() const;
void addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply);
void logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const;
[[nodiscard]] bool hasSentReply() const noexcept { return !_reply; }
- [[nodiscard]] bool shouldRevert() const;
[[nodiscard]] bool has_majority_successful_replies() const noexcept;
[[nodiscard]] bool has_minority_test_and_set_failure() const noexcept;
void sendReply(MessageSender& sender);
@@ -100,13 +87,6 @@ private:
void handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node);
void handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node);
void transfer_trace_state_to_reply();
-
- void queueCommand(std::shared_ptr<api::BucketCommand> msg, uint16_t target) override {
- MessageTracker::queueCommand(std::move(msg), target);
- }
- void flushQueue(MessageSender& s) override { MessageTracker::flushQueue(s); }
- uint16_t handleReply(api::BucketReply& r) override { return MessageTracker::handleReply(r); }
- void add_trace_tree_to_reply(vespalib::Trace trace) override;
};
}
diff --git a/storage/src/vespa/storage/distributor/sentmessagemap.cpp b/storage/src/vespa/storage/distributor/sentmessagemap.cpp
index 4b7292c1e81..2ae70f417d1 100644
--- a/storage/src/vespa/storage/distributor/sentmessagemap.cpp
+++ b/storage/src/vespa/storage/distributor/sentmessagemap.cpp
@@ -18,18 +18,30 @@ SentMessageMap::SentMessageMap()
SentMessageMap::~SentMessageMap() = default;
+Operation*
+SentMessageMap::find_by_id_or_nullptr(api::StorageMessage::Id id) const noexcept
+{
+ auto iter = _map.find(id);
+ return ((iter != _map.end()) ? iter->second.get() : nullptr);
+}
+
+std::shared_ptr<Operation>
+SentMessageMap::find_by_id_or_empty(api::StorageMessage::Id id) const noexcept
+{
+ auto iter = _map.find(id);
+ return ((iter != _map.end()) ? iter->second : std::shared_ptr<Operation>());
+}
std::shared_ptr<Operation>
SentMessageMap::pop()
{
auto found = _map.begin();
-
if (found != _map.end()) {
- std::shared_ptr<Operation> retVal = found->second;
+ std::shared_ptr<Operation> op = std::move(found->second);
_map.erase(found);
- return retVal;
+ return op;
} else {
- return std::shared_ptr<Operation>();
+ return {};
}
}
@@ -37,17 +49,15 @@ std::shared_ptr<Operation>
SentMessageMap::pop(api::StorageMessage::Id id)
{
auto found = _map.find(id);
-
if (found != _map.end()) {
LOG(spam, "Found Id %" PRIu64 " in callback map: %p", id, found->second.get());
- std::shared_ptr<Operation> retVal = found->second;
+ std::shared_ptr<Operation> op = std::move(found->second);
_map.erase(found);
- return retVal;
+ return op;
} else {
LOG(spam, "Did not find Id %" PRIu64 " in callback map", id);
-
- return std::shared_ptr<Operation>();
+ return {};
}
}
@@ -55,7 +65,6 @@ void
SentMessageMap::insert(api::StorageMessage::Id id, const std::shared_ptr<Operation> & callback)
{
LOG(spam, "Inserting callback %p for message %" PRIu64 "", callback.get(), id);
-
_map[id] = callback;
}
diff --git a/storage/src/vespa/storage/distributor/sentmessagemap.h b/storage/src/vespa/storage/distributor/sentmessagemap.h
index 951ed6a6877..3ad80f4e55d 100644
--- a/storage/src/vespa/storage/distributor/sentmessagemap.h
+++ b/storage/src/vespa/storage/distributor/sentmessagemap.h
@@ -15,6 +15,11 @@ public:
SentMessageMap();
~SentMessageMap();
+ // Find by message ID, or nullptr if not found
+ [[nodiscard]] Operation* find_by_id_or_nullptr(api::StorageMessage::Id id) const noexcept;
+ // Find by message ID, or empty shared_ptr if not found
+ [[nodiscard]] std::shared_ptr<Operation> find_by_id_or_empty(api::StorageMessage::Id id) const noexcept;
+
[[nodiscard]] std::shared_ptr<Operation> pop(api::StorageMessage::Id id);
[[nodiscard]] std::shared_ptr<Operation> pop();
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
index fd747484ccf..ad8fae9cd74 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -130,6 +130,7 @@ StripeBucketDBUpdater::sendRequestBucketInfo(
const document::Bucket& bucket,
const std::shared_ptr<MergeReplyGuard>& mergeReplyGuard)
{
+ // TODO assert if cancellation enabled
if (!_op_ctx.storage_node_is_up(bucket.getBucketSpace(), node)) {
return;
}
@@ -342,14 +343,19 @@ StripeBucketDBUpdater::onMergeBucketReply(
const std::shared_ptr<api::MergeBucketReply>& reply)
{
auto replyGuard = std::make_shared<MergeReplyGuard>(_distributor_interface, reply);
+ auto merge_op = _distributor_interface.maintenance_op_from_message_id(reply->getMsgId());
// In case the merge was unsuccessful somehow, or some nodes weren't
// actually merged (source-only nodes?) we request the bucket info of the
// bucket again to make sure it's ok.
for (uint32_t i = 0; i < reply->getNodes().size(); i++) {
- sendRequestBucketInfo(reply->getNodes()[i].index,
- reply->getBucket(),
- replyGuard);
+ const uint16_t node_index = reply->getNodes()[i].index;
+ // We conditionally omit the node instead of conditionally send to it, as many tests do not
+ // wire their merges through the main distributor maintenance operation tracking locking.
+ if (merge_op && merge_op->cancel_scope().node_is_cancelled(node_index)) {
+ continue;
+ }
+ sendRequestBucketInfo(node_index, reply->getBucket(), replyGuard);
}
return true;
@@ -502,8 +508,9 @@ StripeBucketDBUpdater::processSingleBucketInfoReply(const std::shared_ptr<api::R
BucketRequest req = iter->second;
_sentMessages.erase(iter);
- if (!_op_ctx.storage_node_is_up(req.bucket.getBucketSpace(), req.targetNode)) {
- // Ignore replies from nodes that are down.
+ // TODO remove explicit node check in favor of cancellation only
+ if (req.cancelled || !_op_ctx.storage_node_is_up(req.bucket.getBucketSpace(), req.targetNode)) {
+ // Ignore replies from nodes that are cancelled/down.
return true;
}
if (repl->getResult().getResult() != api::ReturnCode::OK) {
@@ -516,6 +523,17 @@ StripeBucketDBUpdater::processSingleBucketInfoReply(const std::shared_ptr<api::R
return true;
}
+bool
+StripeBucketDBUpdater::cancel_message_by_id(uint64_t msg_id)
+{
+ auto iter = _sentMessages.find(msg_id);
+ if (iter == _sentMessages.end()) {
+ return false;
+ }
+ iter->second.cancelled = true;
+ return true;
+}
+
void
StripeBucketDBUpdater::addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
BucketListMerger::BucketList& existing)
@@ -652,12 +670,10 @@ StripeBucketDBUpdater::MergingNodeRemover::MergingNodeRemover(
_nonOwnedBuckets(),
_removed_buckets(0),
_removed_documents(0),
- _localIndex(localIndex),
_distribution(distribution),
_upStates(upStates),
- _track_non_owned_entries(track_non_owned_entries),
- _cachedDecisionSuperbucket(UINT64_MAX),
- _cachedOwned(false)
+ _ownership_calc(_state, _distribution, localIndex),
+ _track_non_owned_entries(track_non_owned_entries)
{
const uint16_t storage_count = s.getNodeCount(lib::NodeType::STORAGE);
_available_nodes.resize(storage_count);
@@ -678,45 +694,15 @@ StripeBucketDBUpdater::MergingNodeRemover::logRemove(const document::BucketId& b
LOG(spam, "Removing bucket %s: %s", bucketId.toString().c_str(), msg);
}
-namespace {
-
-uint64_t superbucket_from_id(const document::BucketId& id, uint16_t distribution_bits) noexcept {
- // The n LSBs of the bucket ID contain the superbucket number. Mask off the rest.
- return id.getRawId() & ~(UINT64_MAX << distribution_bits);
-}
-
-}
-
bool
StripeBucketDBUpdater::MergingNodeRemover::distributorOwnsBucket(
const document::BucketId& bucketId) const
{
- // TODO "no distributors available" case is the same for _all_ buckets; cache once in constructor.
- // TODO "too few bits used" case can be cheaply checked without needing exception
- try {
- const auto bits = _state.getDistributionBitCount();
- const auto this_superbucket = superbucket_from_id(bucketId, bits);
- if (_cachedDecisionSuperbucket == this_superbucket) {
- if (!_cachedOwned) {
- logRemove(bucketId, "bucket now owned by another distributor (cached)");
- }
- return _cachedOwned;
- }
-
- uint16_t distributor = _distribution.getIdealDistributorNode(_state, bucketId, "uim");
- _cachedDecisionSuperbucket = this_superbucket;
- _cachedOwned = (distributor == _localIndex);
- if (!_cachedOwned) {
- logRemove(bucketId, "bucket now owned by another distributor");
- return false;
- }
- return true;
- } catch (lib::TooFewBucketBitsInUseException& exc) {
- logRemove(bucketId, "using too few distribution bits now");
- } catch (lib::NoDistributorsAvailableException& exc) {
- logRemove(bucketId, "no distributors are available");
+ const bool owns_bucket = _ownership_calc.this_distributor_owns_bucket(bucketId);
+ if (!owns_bucket) {
+ logRemove(bucketId, "bucket now owned by another distributor");
}
- return false;
+ return owns_bucket;
}
void
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
index 2e4ef2a7543..9536c84691d 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "bucket_ownership_calculator.h"
#include "bucketlistmerger.h"
#include "distributor_stripe_component.h"
#include "distributormessagesender.h"
@@ -49,6 +50,7 @@ public:
bool onMergeBucketReply(const std::shared_ptr<api::MergeBucketReply>& reply) override;
bool onNotifyBucketChange(const std::shared_ptr<api::NotifyBucketChangeCommand>&) override;
void resendDelayedMessages();
+ [[nodiscard]] bool cancel_message_by_id(uint64_t msg_id);
vespalib::string reportXmlStatus(vespalib::xml::XmlOutputStream&, const framework::HttpUrlPath&) const;
vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
@@ -75,7 +77,8 @@ public:
private:
class MergeReplyGuard {
public:
- MergeReplyGuard(DistributorStripeInterface& distributor_interface, const std::shared_ptr<api::MergeBucketReply>& reply) noexcept
+ MergeReplyGuard(DistributorStripeInterface& distributor_interface,
+ const std::shared_ptr<api::MergeBucketReply>& reply) noexcept
: _distributor_interface(distributor_interface), _reply(reply) {}
~MergeReplyGuard();
@@ -89,22 +92,23 @@ private:
};
struct BucketRequest {
- BucketRequest()
- : targetNode(0), bucket(), timestamp(0) {};
+ BucketRequest() noexcept : targetNode(0), bucket(), timestamp(0), cancelled(false) {}
BucketRequest(uint16_t t, uint64_t currentTime, const document::Bucket& b,
- const std::shared_ptr<MergeReplyGuard>& guard)
+ const std::shared_ptr<MergeReplyGuard>& guard) noexcept
: targetNode(t),
bucket(b),
timestamp(currentTime),
- _mergeReplyGuard(guard) {};
+ _mergeReplyGuard(guard),
+ cancelled(false)
+ {}
void print_xml_tag(vespalib::xml::XmlOutputStream &xos, const vespalib::xml::XmlAttribute &timestampAttribute) const;
uint16_t targetNode;
document::Bucket bucket;
uint64_t timestamp;
-
std::shared_ptr<MergeReplyGuard> _mergeReplyGuard;
+ bool cancelled;
};
struct EnqueuedBucketRecheck {
@@ -148,7 +152,7 @@ private:
static void convertBucketInfoToBucketList(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
uint16_t targetNode, BucketListMerger::BucketList& newList);
void sendRequestBucketInfo(uint16_t node, const document::Bucket& bucket,
- const std::shared_ptr<MergeReplyGuard>& mergeReplystatic );
+ const std::shared_ptr<MergeReplyGuard>& mergeReplyGuard);
static void addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
BucketListMerger::BucketList& existing);
void clearReadOnlyBucketRepoDatabases();
@@ -218,12 +222,10 @@ private:
std::vector<BucketDatabase::Entry> _nonOwnedBuckets;
size_t _removed_buckets;
size_t _removed_documents;
- uint16_t _localIndex;
const lib::Distribution& _distribution;
const char* _upStates;
+ BucketOwnershipCalculator _ownership_calc;
bool _track_non_owned_entries;
- mutable uint64_t _cachedDecisionSuperbucket;
- mutable bool _cachedOwned;
};
using DistributionContexts = std::unordered_map<document::BucketSpace,
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
index 808f19be3e5..45bcd6a98b5 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
@@ -286,7 +286,6 @@ FileStorHandlerImpl::messageMayBeAborted(const api::StorageMessage& msg)
switch (msg.getType().getId()) {
case api::MessageType::PUT_ID:
case api::MessageType::REMOVE_ID:
- case api::MessageType::REVERT_ID:
case api::MessageType::MERGEBUCKET_ID:
case api::MessageType::GETBUCKETDIFF_ID:
case api::MessageType::APPLYBUCKETDIFF_ID:
@@ -611,7 +610,6 @@ FileStorHandlerImpl::remapMessage(api::StorageMessage& msg, const document::Buck
break;
}
case api::MessageType::STAT_ID:
- case api::MessageType::REVERT_ID:
case api::MessageType::REMOVELOCATION_ID:
case api::MessageType::SETBUCKETSTATE_ID:
{
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
index d2c3cea44b0..777b9a93be6 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.cpp
@@ -447,16 +447,6 @@ FileStorManager::onRemove(const shared_ptr<api::RemoveCommand>& cmd)
}
bool
-FileStorManager::onRevert(const shared_ptr<api::RevertCommand>& cmd)
-{
- StorBucketDatabase::WrappedEntry entry(mapOperationToBucketAndDisk(*cmd, 0));
- if (entry.exists()) {
- handlePersistenceMessage(cmd);
- }
- return true;
-}
-
-bool
FileStorManager::onRemoveLocation(const std::shared_ptr<api::RemoveLocationCommand>& cmd)
{
StorBucketDatabase::WrappedEntry entry(mapOperationToDisk(*cmd, cmd->getBucket()));
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
index 99f61c62cd1..cf004c58820 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormanager.h
@@ -151,7 +151,6 @@ private:
bool onUpdate(const std::shared_ptr<api::UpdateCommand>&) override;
bool onGet(const std::shared_ptr<api::GetCommand>&) override;
bool onRemove(const std::shared_ptr<api::RemoveCommand>&) override;
- bool onRevert(const std::shared_ptr<api::RevertCommand>&) override;
bool onStatBucket(const std::shared_ptr<api::StatBucketCommand>&) override;
// Bucket operations
diff --git a/storage/src/vespa/storage/persistence/persistencehandler.cpp b/storage/src/vespa/storage/persistence/persistencehandler.cpp
index 00ab61f2304..f91ff22217d 100644
--- a/storage/src/vespa/storage/persistence/persistencehandler.cpp
+++ b/storage/src/vespa/storage/persistence/persistencehandler.cpp
@@ -65,8 +65,6 @@ PersistenceHandler::handleCommandSplitByType(api::StorageCommand& msg, MessageTr
return _asyncHandler.handleRemove(static_cast<api::RemoveCommand&>(msg), std::move(tracker));
case api::MessageType::UPDATE_ID:
return _asyncHandler.handleUpdate(static_cast<api::UpdateCommand&>(msg), std::move(tracker));
- case api::MessageType::REVERT_ID:
- return _simpleHandler.handleRevert(static_cast<api::RevertCommand&>(msg), std::move(tracker));
case api::MessageType::CREATEBUCKET_ID:
return _asyncHandler.handleCreateBucket(static_cast<api::CreateBucketCommand&>(msg), std::move(tracker));
case api::MessageType::DELETEBUCKET_ID:
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 203af1e04f3..ad3e30060ad 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -15,8 +15,7 @@ namespace {
{
return (id == api::MessageType::PUT_ID ||
id == api::MessageType::REMOVE_ID ||
- id == api::MessageType::UPDATE_ID ||
- id == api::MessageType::REVERT_ID);
+ id == api::MessageType::UPDATE_ID);
}
bool hasBucketInfo(api::MessageType::Id id)
diff --git a/storage/src/vespa/storage/persistence/simplemessagehandler.cpp b/storage/src/vespa/storage/persistence/simplemessagehandler.cpp
index 1ac0939c21e..f9119076ab7 100644
--- a/storage/src/vespa/storage/persistence/simplemessagehandler.cpp
+++ b/storage/src/vespa/storage/persistence/simplemessagehandler.cpp
@@ -102,18 +102,6 @@ SimpleMessageHandler::handleGet(api::GetCommand& cmd, MessageTracker::UP tracker
}
MessageTracker::UP
-SimpleMessageHandler::handleRevert(api::RevertCommand& cmd, MessageTracker::UP tracker) const
-{
- tracker->setMetric(_env._metrics.revert);
- spi::Bucket b = spi::Bucket(cmd.getBucket());
- const std::vector<api::Timestamp> & tokens = cmd.getRevertTokens();
- for (const api::Timestamp & token : tokens) {
- spi::Result result = _spi.removeEntry(b, spi::Timestamp(token));
- }
- return tracker;
-}
-
-MessageTracker::UP
SimpleMessageHandler::handleGetIter(GetIterCommand& cmd, MessageTracker::UP tracker) const
{
tracker->setMetric(_env._metrics.visit);
diff --git a/storage/src/vespa/storage/persistence/simplemessagehandler.h b/storage/src/vespa/storage/persistence/simplemessagehandler.h
index 49432c1ccb7..deeb7188f65 100644
--- a/storage/src/vespa/storage/persistence/simplemessagehandler.h
+++ b/storage/src/vespa/storage/persistence/simplemessagehandler.h
@@ -24,7 +24,6 @@ public:
spi::PersistenceProvider&,
const document::BucketIdFactory&);
MessageTrackerUP handleGet(api::GetCommand& cmd, MessageTrackerUP tracker) const;
- MessageTrackerUP handleRevert(api::RevertCommand& cmd, MessageTrackerUP tracker) const;
MessageTrackerUP handleCreateIterator(CreateIteratorCommand& cmd, MessageTrackerUP tracker) const;
MessageTrackerUP handleGetIter(GetIterCommand& cmd, MessageTrackerUP tracker) const;
private:
diff --git a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
index 3b97ff6c018..63dd6982fea 100644
--- a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
+++ b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
@@ -367,7 +367,6 @@ ChangedBucketOwnershipHandler::isMutatingExternalOperation(
case api::MessageType::PUT_ID:
case api::MessageType::REMOVE_ID:
case api::MessageType::UPDATE_ID:
- case api::MessageType::REVERT_ID:
return true;
default:
return false;
diff --git a/storage/src/vespa/storageapi/mbusprot/protocolserialization.cpp b/storage/src/vespa/storageapi/mbusprot/protocolserialization.cpp
index 59ea60e6e0d..1f24d7a03ef 100644
--- a/storage/src/vespa/storageapi/mbusprot/protocolserialization.cpp
+++ b/storage/src/vespa/storageapi/mbusprot/protocolserialization.cpp
@@ -49,12 +49,6 @@ ProtocolSerialization::encode(const api::StorageMessage& msg) const
case api::MessageType::REMOVE_REPLY_ID:
onEncode(buf, static_cast<const api::RemoveReply&>(msg));
break;
- case api::MessageType::REVERT_ID:
- onEncode(buf, static_cast<const api::RevertCommand&>(msg));
- break;
- case api::MessageType::REVERT_REPLY_ID:
- onEncode(buf, static_cast<const api::RevertReply&>(msg));
- break;
case api::MessageType::DELETEBUCKET_ID:
onEncode(buf, static_cast<const api::DeleteBucketCommand&>(msg));
break;
@@ -140,9 +134,8 @@ ProtocolSerialization::encode(const api::StorageMessage& msg) const
onEncode(buf, static_cast<const api::SetBucketStateReply&>(msg));
break;
default:
- LOG(error, "Trying to encode unhandled type %s",
- msg.getType().toString().c_str());
- break;
+ LOG(error, "Trying to encode unhandled type %s", msg.getType().toString().c_str());
+ abort();
}
mbus::Blob retVal(buf.position());
@@ -174,8 +167,6 @@ ProtocolSerialization::decodeCommand(mbus::BlobRef data) const
cmd = onDecodeGetCommand(buf); break;
case api::MessageType::REMOVE_ID:
cmd = onDecodeRemoveCommand(buf); break;
- case api::MessageType::REVERT_ID:
- cmd = onDecodeRevertCommand(buf); break;
case api::MessageType::CREATEBUCKET_ID:
cmd = onDecodeCreateBucketCommand(buf); break;
case api::MessageType::DELETEBUCKET_ID:
@@ -238,8 +229,6 @@ ProtocolSerialization::decodeReply(mbus::BlobRef data, const api::StorageCommand
reply = onDecodeGetReply(cmd, buf); break;
case api::MessageType::REMOVE_REPLY_ID:
reply = onDecodeRemoveReply(cmd, buf); break;
- case api::MessageType::REVERT_REPLY_ID:
- reply = onDecodeRevertReply(cmd, buf); break;
case api::MessageType::CREATEBUCKET_REPLY_ID:
reply = onDecodeCreateBucketReply(cmd, buf); break;
case api::MessageType::DELETEBUCKET_REPLY_ID:
diff --git a/storage/src/vespa/storageapi/mbusprot/protocolserialization.h b/storage/src/vespa/storageapi/mbusprot/protocolserialization.h
index 0daf04c75c6..6ed46e1f770 100644
--- a/storage/src/vespa/storageapi/mbusprot/protocolserialization.h
+++ b/storage/src/vespa/storageapi/mbusprot/protocolserialization.h
@@ -25,8 +25,6 @@ class GetCommand;
class GetReply;
class RemoveCommand;
class RemoveReply;
-class RevertCommand;
-class RevertReply;
class DeleteBucketCommand;
class DeleteBucketReply;
class CreateBucketCommand;
@@ -85,8 +83,6 @@ protected:
virtual void onEncode(GBBuf&, const api::GetReply&) const = 0;
virtual void onEncode(GBBuf&, const api::RemoveCommand&) const = 0;
virtual void onEncode(GBBuf&, const api::RemoveReply&) const = 0;
- virtual void onEncode(GBBuf&, const api::RevertCommand&) const = 0;
- virtual void onEncode(GBBuf&, const api::RevertReply&) const = 0;
virtual void onEncode(GBBuf&, const api::DeleteBucketCommand&) const = 0;
virtual void onEncode(GBBuf&, const api::DeleteBucketReply&) const = 0;
virtual void onEncode(GBBuf&, const api::CreateBucketCommand&) const = 0;
@@ -124,8 +120,6 @@ protected:
virtual SRep::UP onDecodeGetReply(const SCmd&, BBuf&) const = 0;
virtual SCmd::UP onDecodeRemoveCommand(BBuf&) const = 0;
virtual SRep::UP onDecodeRemoveReply(const SCmd&, BBuf&) const = 0;
- virtual SCmd::UP onDecodeRevertCommand(BBuf&) const = 0;
- virtual SRep::UP onDecodeRevertReply(const SCmd&, BBuf&) const = 0;
virtual SCmd::UP onDecodeDeleteBucketCommand(BBuf&) const = 0;
virtual SRep::UP onDecodeDeleteBucketReply(const SCmd&, BBuf&) const = 0;
virtual SCmd::UP onDecodeCreateBucketCommand(BBuf&) const = 0;
diff --git a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
index 3f1ab1e5fe1..9ccb4c2ffc6 100644
--- a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
+++ b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
@@ -624,42 +624,6 @@ api::StorageReply::UP ProtocolSerialization7::onDecodeGetReply(const SCmd& cmd,
}
// -----------------------------------------------------------------
-// Revert
-// -----------------------------------------------------------------
-
-void ProtocolSerialization7::onEncode(GBBuf& buf, const api::RevertCommand& msg) const {
- encode_bucket_request<protobuf::RevertRequest>(buf, msg, [&](auto& req) {
- auto* tokens = req.mutable_revert_tokens();
- assert(msg.getRevertTokens().size() <= INT_MAX);
- tokens->Reserve(static_cast<int>(msg.getRevertTokens().size()));
- for (auto token : msg.getRevertTokens()) {
- tokens->Add(token);
- }
- });
-}
-
-void ProtocolSerialization7::onEncode(GBBuf& buf, const api::RevertReply& msg) const {
- encode_bucket_info_response<protobuf::RevertResponse>(buf, msg, no_op_encode);
-}
-
-api::StorageCommand::UP ProtocolSerialization7::onDecodeRevertCommand(BBuf& buf) const {
- return decode_bucket_request<protobuf::RevertRequest>(buf, [&](auto& req, auto& bucket) {
- std::vector<api::Timestamp> tokens;
- tokens.reserve(req.revert_tokens_size());
- for (auto token : req.revert_tokens()) {
- tokens.emplace_back(api::Timestamp(token));
- }
- return std::make_unique<api::RevertCommand>(bucket, std::move(tokens));
- });
-}
-
-api::StorageReply::UP ProtocolSerialization7::onDecodeRevertReply(const SCmd& cmd, BBuf& buf) const {
- return decode_bucket_info_response<protobuf::RevertResponse>(buf, [&]([[maybe_unused]] auto& res) {
- return std::make_unique<api::RevertReply>(static_cast<const api::RevertCommand&>(cmd));
- });
-}
-
-// -----------------------------------------------------------------
// RemoveLocation
// -----------------------------------------------------------------
diff --git a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.h b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.h
index a61397c85ac..a11d589af60 100644
--- a/storage/src/vespa/storageapi/mbusprot/protocolserialization7.h
+++ b/storage/src/vespa/storageapi/mbusprot/protocolserialization7.h
@@ -41,12 +41,6 @@ public:
SCmd::UP onDecodeGetCommand(BBuf&) const override;
SRep::UP onDecodeGetReply(const SCmd&, BBuf&) const override;
- // Revert - TODO this is deprecated, no?
- void onEncode(GBBuf&, const api::RevertCommand&) const override;
- void onEncode(GBBuf&, const api::RevertReply&) const override;
- SCmd::UP onDecodeRevertCommand(BBuf&) const override;
- SRep::UP onDecodeRevertReply(const SCmd&, BBuf&) const override;
-
// DeleteBucket
void onEncode(GBBuf&, const api::DeleteBucketCommand&) const override;
void onEncode(GBBuf&, const api::DeleteBucketReply&) const override;
diff --git a/storage/src/vespa/storageapi/mbusprot/storagemessage.h b/storage/src/vespa/storageapi/mbusprot/storagemessage.h
index 4a7a18ce355..f338d1900fb 100644
--- a/storage/src/vespa/storageapi/mbusprot/storagemessage.h
+++ b/storage/src/vespa/storageapi/mbusprot/storagemessage.h
@@ -9,7 +9,7 @@ class StorageMessage {
public:
using UP = std::unique_ptr<StorageMessage>;
- virtual ~StorageMessage() {}
+ virtual ~StorageMessage() = default;
virtual api::StorageMessage::SP getInternalMessage() = 0;
virtual api::StorageMessage::CSP getInternalMessage() const = 0;
diff --git a/storage/src/vespa/storageapi/message/persistence.cpp b/storage/src/vespa/storageapi/message/persistence.cpp
index a8fa9a0bba1..2fafb998991 100644
--- a/storage/src/vespa/storageapi/message/persistence.cpp
+++ b/storage/src/vespa/storageapi/message/persistence.cpp
@@ -17,8 +17,6 @@ IMPLEMENT_COMMAND(GetCommand, GetReply)
IMPLEMENT_REPLY(GetReply)
IMPLEMENT_COMMAND(RemoveCommand, RemoveReply)
IMPLEMENT_REPLY(RemoveReply)
-IMPLEMENT_COMMAND(RevertCommand, RevertReply)
-IMPLEMENT_REPLY(RevertReply)
TestAndSetCommand::TestAndSetCommand(const MessageType & messageType, const document::Bucket &bucket)
: BucketInfoCommand(messageType, bucket)
@@ -309,47 +307,4 @@ RemoveReply::print(std::ostream& out, bool verbose, const std::string& indent) c
}
}
-RevertCommand::RevertCommand(const document::Bucket &bucket, const std::vector<Timestamp>& revertTokens)
- : BucketInfoCommand(MessageType::REVERT, bucket),
- _tokens(revertTokens)
-{
-}
-
-RevertCommand::~RevertCommand() = default;
-
-void
-RevertCommand::print(std::ostream& out, bool verbose, const std::string& indent) const
-{
- out << "Revert(" << getBucketId();
- if (verbose) {
- out << ",";
- for (Timestamp token : _tokens) {
- out << "\n" << indent << " " << token;
- }
- }
- out << ")";
- if (verbose) {
- out << " : ";
- BucketInfoCommand::print(out, verbose, indent);
- }
-}
-
-RevertReply::RevertReply(const RevertCommand& cmd)
- : BucketInfoReply(cmd),
- _tokens(cmd.getRevertTokens())
-{
-}
-
-RevertReply::~RevertReply() = default;
-
-void
-RevertReply::print(std::ostream& out, bool verbose, const std::string& indent) const
-{
- out << "RevertReply(" << getBucketId() << ")";
- if (verbose) {
- out << " : ";
- BucketInfoReply::print(out, verbose, indent);
- }
-}
-
}
diff --git a/storage/src/vespa/storageapi/message/persistence.h b/storage/src/vespa/storageapi/message/persistence.h
index 0607f3792f3..40749e2a02f 100644
--- a/storage/src/vespa/storageapi/message/persistence.h
+++ b/storage/src/vespa/storageapi/message/persistence.h
@@ -308,37 +308,4 @@ public:
DECLARE_STORAGEREPLY(RemoveReply, onRemoveReply)
};
-/**
- * @class RevertCommand
- * @ingroup message
- *
- * @brief Command for reverting a write or remove operation.
- */
-class RevertCommand : public BucketInfoCommand {
- std::vector<Timestamp> _tokens;
-public:
- RevertCommand(const document::Bucket &bucket,
- const std::vector<Timestamp>& revertTokens);
- ~RevertCommand() override;
- const std::vector<Timestamp>& getRevertTokens() const { return _tokens; }
- void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- DECLARE_STORAGECOMMAND(RevertCommand, onRevert)
-};
-
-/**
- * @class RevertReply
- * @ingroup message
- *
- * @brief Reply for a revert command.
- */
-class RevertReply : public BucketInfoReply {
- std::vector<Timestamp> _tokens;
-public:
- explicit RevertReply(const RevertCommand& cmd);
- ~RevertReply() override;
- const std::vector<Timestamp>& getRevertTokens() const { return _tokens; }
- void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- DECLARE_STORAGEREPLY(RevertReply, onRevertReply)
-};
-
}
diff --git a/storage/src/vespa/storageapi/messageapi/messagehandler.h b/storage/src/vespa/storageapi/messageapi/messagehandler.h
index fa362d5380f..fba0c58ecf9 100644
--- a/storage/src/vespa/storageapi/messageapi/messagehandler.h
+++ b/storage/src/vespa/storageapi/messageapi/messagehandler.h
@@ -23,7 +23,6 @@ class GetCommand; // Retrieve document
class PutCommand; // Add document
class UpdateCommand; // Update document
class RemoveCommand; // Remove document
-class RevertCommand; // Revert put/remove operation
class CreateVisitorCommand; // Create a new visitor
class DestroyVisitorCommand; // Destroy a running visitor
@@ -59,7 +58,6 @@ class GetReply;
class PutReply;
class UpdateReply;
class RemoveReply;
-class RevertReply;
class CreateVisitorReply;
class DestroyVisitorReply;
@@ -122,8 +120,6 @@ public:
virtual bool onUpdateReply(const std::shared_ptr<api::UpdateReply>&) { return false; }
virtual bool onRemove(const std::shared_ptr<api::RemoveCommand>&) { return false; }
virtual bool onRemoveReply(const std::shared_ptr<api::RemoveReply>&) { return false; }
- virtual bool onRevert(const std::shared_ptr<api::RevertCommand>&) { return false; }
- virtual bool onRevertReply(const std::shared_ptr<api::RevertReply>&) { return false; }
virtual bool onCreateVisitor(const std::shared_ptr<api::CreateVisitorCommand>&) { return false; }
virtual bool onCreateVisitorReply(const std::shared_ptr<api::CreateVisitorReply>&) { return false; }
diff --git a/storage/src/vespa/storageapi/messageapi/storagemessage.cpp b/storage/src/vespa/storageapi/messageapi/storagemessage.cpp
index f78e8e7a8f5..0007cb3b817 100644
--- a/storage/src/vespa/storageapi/messageapi/storagemessage.cpp
+++ b/storage/src/vespa/storageapi/messageapi/storagemessage.cpp
@@ -44,8 +44,6 @@ const MessageType MessageType::UPDATE("Update", UPDATE_ID);
const MessageType MessageType::UPDATE_REPLY("Update Reply", UPDATE_REPLY_ID, &MessageType::UPDATE);
const MessageType MessageType::REMOVE("Remove", REMOVE_ID);
const MessageType MessageType::REMOVE_REPLY("Remove Reply", REMOVE_REPLY_ID, &MessageType::REMOVE);
-const MessageType MessageType::REVERT("Revert", REVERT_ID);
-const MessageType MessageType::REVERT_REPLY("Revert Reply", REVERT_REPLY_ID, &MessageType::REVERT);
const MessageType MessageType::VISITOR_CREATE("Visitor Create", VISITOR_CREATE_ID);
const MessageType MessageType::VISITOR_CREATE_REPLY("Visitor Create Reply", VISITOR_CREATE_REPLY_ID, &MessageType::VISITOR_CREATE);
const MessageType MessageType::VISITOR_DESTROY("Visitor Destroy", VISITOR_DESTROY_ID);
diff --git a/storage/src/vespa/storageapi/messageapi/storagemessage.h b/storage/src/vespa/storageapi/messageapi/storagemessage.h
index 831e44bdba9..af258125984 100644
--- a/storage/src/vespa/storageapi/messageapi/storagemessage.h
+++ b/storage/src/vespa/storageapi/messageapi/storagemessage.h
@@ -80,8 +80,8 @@ public:
PUT_REPLY_ID = 11,
REMOVE_ID = 12,
REMOVE_REPLY_ID = 13,
- REVERT_ID = 14,
- REVERT_REPLY_ID = 15,
+ // REVERT_ID = 14, unused but reserved
+ // REVERT_REPLY_ID = 15, unused but reserved
STAT_ID = 16,
STAT_REPLY_ID = 17,
VISITOR_CREATE_ID = 18,
@@ -164,8 +164,6 @@ public:
static const MessageType PUT_REPLY;
static const MessageType REMOVE;
static const MessageType REMOVE_REPLY;
- static const MessageType REVERT;
- static const MessageType REVERT_REPLY;
static const MessageType VISITOR_CREATE;
static const MessageType VISITOR_CREATE_REPLY;
static const MessageType VISITOR_DESTROY;
diff --git a/valgrind-suppressions.txt b/valgrind-suppressions.txt
index 71007554439..6e4d4f39545 100644
--- a/valgrind-suppressions.txt
+++ b/valgrind-suppressions.txt
@@ -12,6 +12,15 @@
fun:UnknownInlinedFun
fun:allocate_dtv
fun:_dl_allocate_tls
+ fun:pthread_create@@GLIBC_2.17
+}
+{
+ NPTL keeps a cache of thread stacks, and metadata for thread local storage is not freed for threads in that cache
+ Memcheck:Leak
+ fun:calloc
+ fun:UnknownInlinedFun
+ fun:allocate_dtv
+ fun:_dl_allocate_tls
fun:allocate_stack
fun:pthread_create@@GLIBC_2.17
}
diff --git a/vespa-athenz/pom.xml b/vespa-athenz/pom.xml
index 55fd25f8b99..a9379040133 100644
--- a/vespa-athenz/pom.xml
+++ b/vespa-athenz/pom.xml
@@ -275,52 +275,6 @@
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
- <dependency>
- <groupId>com.google.http-client</groupId>
- <artifactId>google-http-client-apache-v2</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpcore</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpclient</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.google.http-client</groupId>
- <artifactId>google-http-client</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.google.http-client</groupId>
- <artifactId>google-http-client</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpcore</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.apache.httpcomponents</groupId>
- <artifactId>httpclient</artifactId>
- </exclusion>
- <exclusion>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>com.google.auth</groupId>
- <artifactId>google-auth-library-oauth2-http</artifactId>
- <exclusions>
- <exclusion>
- <groupId>com.google.guava</groupId>
- <artifactId>guava</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
</dependencies>
<build>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
deleted file mode 100644
index bbdc3c2b372..00000000000
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
+++ /dev/null
@@ -1,180 +0,0 @@
-package com.yahoo.vespa.athenz.gcp;
-
-import com.google.api.client.http.apache.v2.ApacheHttpTransport;
-import com.google.auth.http.HttpTransportFactory;
-import com.google.auth.oauth2.ExternalAccountCredentials;
-import com.yahoo.security.token.TokenDomain;
-import com.yahoo.security.token.TokenGenerator;
-import com.yahoo.slime.Cursor;
-import com.yahoo.slime.Slime;
-import com.yahoo.slime.SlimeUtils;
-import com.yahoo.vespa.athenz.api.AthenzDomain;
-import com.yahoo.vespa.athenz.identity.ServiceIdentityProvider;
-import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
-import org.apache.http.impl.client.HttpClientBuilder;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URLEncoder;
-import java.nio.charset.StandardCharsets;
-import java.util.Objects;
-
-public class GcpCredentials {
- private static final TokenDomain domain = TokenDomain.of("athenz-gcp-oauth2-nonce");
-
- final private InputStream tokenApiStream;
- private final HttpTransportFactory httpTransportFactory;
-
- private GcpCredentials(Builder builder) {
- String clientId = builder.athenzDomain.getName() + ".gcp";
- String audience = String.format("//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s",
- builder.projectNumber, builder.workloadPoolName, builder.workloadProviderName);
- String serviceUrl = String.format("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com:generateAccessToken",
- builder.serviceAccountName, builder.projectName);
- String scope = URLEncoder.encode(generateIdTokenScope(builder.athenzDomain.getName(), builder.role), StandardCharsets.UTF_8);
- String redirectUri = URLEncoder.encode(generateRedirectUri(clientId, builder.redirectURISuffix), StandardCharsets.UTF_8);
- String tokenUrl = String.format("%s/oauth2/auth?response_type=id_token&client_id=%s&redirect_uri=%s&scope=%s&nonce=%s&keyType=EC&fullArn=true&output=json",
- builder.ztsUrl, clientId, redirectUri, scope, TokenGenerator.generateToken(domain, "", 32).secretTokenString());
-
- tokenApiStream = createTokenAPIStream(audience, serviceUrl, tokenUrl, builder.tokenLifetimeSeconds);
- SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(builder.identityProvider.getIdentitySslContext());
- HttpClientBuilder httpClientBuilder = ApacheHttpTransport.newDefaultHttpClientBuilder()
- .setSSLSocketFactory(sslConnectionSocketFactory);
- httpTransportFactory = () -> new ApacheHttpTransport(httpClientBuilder.build());
- }
-
- public ExternalAccountCredentials getCredential() throws IOException {
- return ExternalAccountCredentials.fromStream(tokenApiStream, httpTransportFactory);
- }
-
- private InputStream createTokenAPIStream(final String audience, final String serviceUrl, final String tokenUrl,
- int tokenLifetimeSeconds) {
-
- Slime root = new Slime();
- Cursor c = root.setObject();
-
- c.setString("type", "external_account");
- c.setString("audience", audience);
- c.setString("subject_token_type", "urn:ietf:params:oauth:token-type:jwt");
- c.setString("token_url", "https://sts.googleapis.com/v1/token");
-
- c.setString("service_account_impersonation_url", serviceUrl);
- Cursor sai = c.setObject("service_account_impersonation");
- sai.setLong("token_lifetime_seconds", tokenLifetimeSeconds);
-
- Cursor credentialSource = c.setObject("credential_source");
- credentialSource.setString("url", tokenUrl);
-
- Cursor credentialSourceFormat = credentialSource.setObject("format");
- credentialSourceFormat.setString("type", "json");
- credentialSourceFormat.setString("subject_token_field_name", "id_token");
-
- try {
- return new ByteArrayInputStream(SlimeUtils.toJsonBytes(root));
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
-
- private static String generateIdTokenScope(final String domainName, String roleName) {
- StringBuilder scope = new StringBuilder(256);
- scope.append("openid");
- scope.append(' ').append(domainName).append(":role.").append(roleName);
- return scope.toString();
- }
-
- private static String generateRedirectUri(final String clientId, String uriSuffix) {
- int idx = clientId.lastIndexOf('.');
- if (idx == -1) {
- return "";
- }
- final String dashDomain = clientId.substring(0, idx).replace('.', '-');
- final String service = clientId.substring(idx + 1);
- return "https://" + service + "." + dashDomain + "." + uriSuffix;
- }
-
-
- public static class Builder {
- private String ztsUrl;
- private ServiceIdentityProvider identityProvider;
- private String redirectURISuffix;
- private AthenzDomain athenzDomain;
- private String role;
- private String projectName;
- private String projectNumber;
- private String serviceAccountName;
-
- private int tokenLifetimeSeconds = 3600; // default to 1 hour lifetime
- private String workloadPoolName = "athenz";
- private String workloadProviderName = "athenz";
-
- public GcpCredentials build() {
- Objects.requireNonNull(ztsUrl);
- Objects.requireNonNull(identityProvider);
- Objects.requireNonNull(redirectURISuffix);
- Objects.requireNonNull(athenzDomain);
- Objects.requireNonNull(role);
- Objects.requireNonNull(projectName);
- Objects.requireNonNull(projectNumber);
- Objects.requireNonNull(serviceAccountName);
-
- return new GcpCredentials(this);
- }
-
- public Builder setZtsUrl(String ztsUrl) {
- this.ztsUrl = ztsUrl;
- return this;
- }
-
- public Builder identityProvider(ServiceIdentityProvider provider) {
- this.identityProvider = provider;
- return this;
- }
-
- public Builder redirectURISuffix(String redirectURISuffix) {
- this.redirectURISuffix = redirectURISuffix;
- return this;
- }
-
- public Builder athenzDomain(AthenzDomain athenzDomain) {
- this.athenzDomain = athenzDomain;
- return this;
- }
-
- public Builder role(String gcpRole) {
- this.role = gcpRole;
- return this;
- }
-
- public Builder projectName(String projectName) {
- this.projectName = projectName;
- return this;
- }
-
- public Builder projectNumber(String projectNumber) {
- this.projectNumber = projectNumber;
- return this;
- }
-
- public Builder serviceAccountName(String serviceAccountName) {
- this.serviceAccountName = serviceAccountName;
- return this;
- }
-
- public Builder tokenLifetimeSeconds(int tokenLifetimeSeconds) {
- this.tokenLifetimeSeconds = tokenLifetimeSeconds;
- return this;
- }
-
- public Builder workloadPoolName(String workloadPoolName) {
- this.workloadPoolName = workloadPoolName;
- return this;
- }
-
- public Builder workloadProviderName(String workloadProviderName) {
- this.workloadProviderName = workloadProviderName;
- return this;
- }
- }
-}
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 1c6a2957317..ca7d1fd8aab 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -22,20 +22,13 @@ com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.15.2
com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.15.2
com.github.luben:zstd-jni:1.5.5-5
com.github.spotbugs:spotbugs-annotations:3.1.9
-com.google.auth:google-auth-library-credentials:1.19.0
-com.google.auth:google-auth-library-oauth2-http:1.19.0
-com.google.auto.value:auto-value-annotations:1.10.1
com.google.code.findbugs:jsr305:3.0.2
-com.google.code.gson:gson:2.10
com.google.errorprone:error_prone_annotations:2.21.1
com.google.guava:failureaccess:1.0.1
com.google.guava:guava:32.1.2-jre
-com.google.http-client:google-http-client:1.43.3
-com.google.http-client:google-http-client-apache-v2:1.43.3
-com.google.http-client:google-http-client-gson:1.42.3
com.google.inject:guice:6.0.0
com.google.j2objc:j2objc-annotations:2.8
-com.google.protobuf:protobuf-java:3.24.2
+com.google.protobuf:protobuf-java:3.24.3
com.ibm.icu:icu4j:73.2
com.microsoft.onnxruntime:onnxruntime:1.15.1
com.sun.activation:javax.activation:1.2.0
@@ -56,7 +49,6 @@ commons-io:commons-io:2.13.0
commons-logging:commons-logging:1.2
io.airlift:airline:0.9
io.dropwizard.metrics:metrics-core:4.2.19
-io.grpc:grpc-context:1.27.2
io.jsonwebtoken:jjwt-api:0.11.5
io.jsonwebtoken:jjwt-impl:0.11.5
io.jsonwebtoken:jjwt-jackson:0.11.5
@@ -71,8 +63,6 @@ io.netty:netty-transport:4.1.97.Final
io.netty:netty-transport-classes-epoll:4.1.97.Final
io.netty:netty-transport-native-epoll:4.1.97.Final
io.netty:netty-transport-native-unix-common:4.1.97.Final
-io.opencensus:opencensus-api:0.31.1
-io.opencensus:opencensus-contrib-http-util:0.31.1
io.prometheus:simpleclient:0.16.0
io.prometheus:simpleclient_common:0.16.0
io.prometheus:simpleclient_tracer_common:0.16.0
@@ -92,7 +82,7 @@ net.openhft:zero-allocation-hashing:0.16
org.antlr:antlr-runtime:3.5.3
org.antlr:antlr4-runtime:4.13.1
org.apache.aries.spifly:org.apache.aries.spifly.dynamic.bundle:1.3.6
-org.apache.commons:commons-compress:1.23.0
+org.apache.commons:commons-compress:1.24.0
org.apache.commons:commons-csv:1.10.0
org.apache.commons:commons-exec:1.3
org.apache.commons:commons-lang3:3.13.0
@@ -216,4 +206,4 @@ org.junit.vintage:junit-vintage-engine:5.8.1
org.mockito:mockito-core:5.5.0
org.mockito:mockito-junit-jupiter:5.5.0
org.objenesis:objenesis:3.3
-org.wiremock:wiremock-standalone:3.0.2
+org.wiremock:wiremock-standalone:3.0.4
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index e6d5ea48e8f..74266fe2a6e 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -46,6 +46,7 @@ import com.yahoo.documentapi.VisitorParameters;
import com.yahoo.documentapi.VisitorSession;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
import com.yahoo.documentapi.messagebus.protocol.PutDocumentMessage;
+import com.yahoo.documentapi.messagebus.protocol.RemoveDocumentMessage;
import com.yahoo.documentapi.metrics.DocumentApiMetrics;
import com.yahoo.documentapi.metrics.DocumentOperationStatus;
import com.yahoo.jdisc.Metric;
@@ -178,6 +179,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static final String DRY_RUN = "dryRun";
private static final String FROM_TIMESTAMP = "fromTimestamp";
private static final String TO_TIMESTAMP = "toTimestamp";
+ private static final String INCLUDE_REMOVES = "includeRemoves";
private final Clock clock;
private final Duration visitTimeout;
@@ -760,8 +762,31 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
json.writeArrayFieldStart("documents");
}
+ private interface DocumentWriter {
+ void write(ByteArrayOutputStream out) throws IOException;
+ }
+
/** Writes documents to an internal queue, which is flushed regularly. */
void writeDocumentValue(Document document, CompletionHandler completionHandler) throws IOException {
+ writeDocument(myOut -> {
+ try (JsonGenerator myJson = jsonFactory.createGenerator(myOut)) {
+ new JsonWriter(myJson, tensorShortForm(), tensorDirectValues()).write(document);
+ }
+ }, completionHandler);
+ }
+
+ void writeDocumentRemoval(DocumentId id, CompletionHandler completionHandler) throws IOException {
+ writeDocument(myOut -> {
+ try (JsonGenerator myJson = jsonFactory.createGenerator(myOut)) {
+ myJson.writeStartObject();
+ myJson.writeStringField("remove", id.toString());
+ myJson.writeEndObject();
+ }
+ }, completionHandler);
+ }
+
+ /** Writes documents to an internal queue, which is flushed regularly. */
+ void writeDocument(DocumentWriter documentWriter, CompletionHandler completionHandler) throws IOException {
if (completionHandler != null) {
acks.add(completionHandler);
ackDocuments();
@@ -771,9 +796,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
// i.e., the first 128 documents in the queue are not necessarily the ones ack'ed early.
ByteArrayOutputStream myOut = new ByteArrayOutputStream(1);
myOut.write(','); // Prepend rather than append, to avoid double memory copying.
- try (JsonGenerator myJson = jsonFactory.createGenerator(myOut)) {
- new JsonWriter(myJson, tensorShortForm(), tensorDirectValues()).write(document);
- }
+ documentWriter.write(myOut);
docs.add(myOut);
// Flush the first FLUSH_SIZE documents in the queue to the network layer if chunk is filled.
@@ -1173,6 +1196,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
parameters.setFieldSet(getProperty(request, FIELD_SET).orElse(path.documentType().map(type -> type + ":[document]").orElse(DocumentOnly.NAME)));
parameters.setMaxTotalHits(wantedDocumentCount);
parameters.visitInconsistentBuckets(true);
+ getProperty(request, INCLUDE_REMOVES, booleanParser).ifPresent(parameters::setVisitRemoves);
if (streamed) {
StaticThrottlePolicy throttlePolicy = new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1);
concurrency.ifPresent(throttlePolicy::setMaxPendingCount);
@@ -1247,8 +1271,8 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
/** Called at the start of response rendering. */
default void onStart(JsonResponse response, boolean fullyApplied) throws IOException { }
- /** Called for every document received from backend visitors—must call the ack for these to proceed. */
- default void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) { }
+ /** Called for every document or removal received from backend visitors—must call the ack for these to proceed. */
+ default void onDocument(JsonResponse response, Document document, DocumentId removeId, Runnable ack, Consumer<String> onError) { }
/** Called at the end of response rendering, before generic status data is written. Called from a dedicated thread pool. */
default void onEnd(JsonResponse response) throws IOException { }
@@ -1276,7 +1300,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
ResponseHandler handler,
String route, BiFunction<DocumentId, DocumentOperationParameters, Result> operation) {
visit(request, parameters, false, fullyApplied, handler, new VisitCallback() {
- @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) {
+ @Override public void onDocument(JsonResponse response, Document document, DocumentId removeId, Runnable ack, Consumer<String> onError) {
DocumentOperationParameters operationParameters = parameters().withRoute(route)
.withResponseHandler(operationResponse -> {
outstanding.decrementAndGet();
@@ -1320,18 +1344,22 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
response.writeDocumentsArrayStart();
}
- @Override public void onDocument(JsonResponse response, Document document, Runnable ack, Consumer<String> onError) {
+ @Override public void onDocument(JsonResponse response, Document document, DocumentId removeId, Runnable ack, Consumer<String> onError) {
try {
- if (streamed)
- response.writeDocumentValue(document, new CompletionHandler() {
- @Override public void completed() { ack.run();}
+ if (streamed) {
+ CompletionHandler completion = new CompletionHandler() {
+ @Override public void completed() { ack.run(); }
@Override public void failed(Throwable t) {
ack.run();
onError.accept(t.getMessage());
}
- });
+ };
+ if (document != null) response.writeDocumentValue(document, completion);
+ else response.writeDocumentRemoval(removeId, completion);
+ }
else {
- response.writeDocumentValue(document, null);
+ if (document != null) response.writeDocumentValue(document, null);
+ else response.writeDocumentRemoval(removeId, null);
ack.run();
}
}
@@ -1410,16 +1438,19 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
if (parameters.getRemoteDataHandler() == null) {
parameters.setLocalDataHandler(new VisitorDataHandler() {
@Override public void onMessage(Message m, AckToken token) {
- if (m instanceof PutDocumentMessage)
- callback.onDocument(response,
- ((PutDocumentMessage) m).getDocumentPut().getDocument(),
- () -> ack(token),
- errorMessage -> {
- error.set(errorMessage);
- controller.abort();
- });
- else
- throw new UnsupportedOperationException("Only PutDocumentMessage is supported, but got a " + m.getClass());
+ Document document = null;
+ DocumentId removeId = null;
+ if (m instanceof PutDocumentMessage put) document = put.getDocumentPut().getDocument();
+ else if (parameters.visitRemoves() && m instanceof RemoveDocumentMessage remove) removeId = remove.getDocumentId();
+ else throw new UnsupportedOperationException("Got unsupported message type: " + m.getClass().getName());
+ callback.onDocument(response,
+ document,
+ removeId,
+ () -> ack(token),
+ errorMessage -> {
+ error.set(errorMessage);
+ controller.abort();
+ });
}
});
}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index e8f42fbecfa..a6aeab61fa2 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -43,6 +43,7 @@ import com.yahoo.documentapi.VisitorParameters;
import com.yahoo.documentapi.VisitorResponse;
import com.yahoo.documentapi.VisitorSession;
import com.yahoo.documentapi.messagebus.protocol.PutDocumentMessage;
+import com.yahoo.documentapi.messagebus.protocol.RemoveDocumentMessage;
import com.yahoo.jdisc.test.MockMetric;
import com.yahoo.messagebus.StaticThrottlePolicy;
import com.yahoo.messagebus.Trace;
@@ -190,7 +191,7 @@ public class DocumentV1ApiTest {
@Test
public void testResponses() {
RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
- List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null));
+ List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null), new AckToken(null));
// GET at non-existent path returns 404 with available paths
var response = driver.sendRequest("http://localhost/document/v1/not-found");
assertSameJson("""
@@ -227,18 +228,21 @@ public class DocumentV1ApiTest {
assertEquals(9, parameters.getTraceLevel());
assertEquals(1_000_000, parameters.getFromTimestamp());
assertEquals(2_000_000, parameters.getToTimestamp());
+ assertTrue(parameters.visitRemoves());
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc3)), tokens.get(2));
+ parameters.getLocalDataHandler().onMessage(new RemoveDocumentMessage(new DocumentId("id:space:music::t-square-truth")), tokens.get(3));
VisitorStatistics statistics = new VisitorStatistics();
statistics.setBucketsVisited(1);
statistics.setDocumentsVisited(3);
parameters.getControlHandler().onVisitorStatistics(statistics);
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK");
});
- response = driver.sendRequest("http://localhost/document/v1?cluster=content&bucketSpace=default&wantedDocumentCount=1025&concurrency=123" +
- "&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9&fromTimestamp=1000000&toTimestamp=2000000");
+ response = driver.sendRequest("http://localhost/document/v1?cluster=content&bucketSpace=default&wantedDocumentCount=1025" +
+ "&concurrency=123&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9" +
+ "&fromTimestamp=1000000&toTimestamp=2000000&includeRemoves=TrUe");
assertSameJson("""
{
"pathId": "/document/v1",
@@ -246,20 +250,23 @@ public class DocumentV1ApiTest {
{
"id": "id:space:music::one",
"fields": {
- "artist": "Tom Waits",\s
- "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] }\s
+ "artist": "Tom Waits",
+ "embedding": { "type": "tensor(x[3])", "values": [1.0,2.0,3.0] }
}
},
{
"id": "id:space:music:n=1:two",
"fields": {
- "artist": "Asa-Chan & Jun-Ray",\s
- "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] }\s
+ "artist": "Asa-Chan & Jun-Ray",
+ "embedding": { "type": "tensor(x[3])", "values": [4.0,5.0,6.0] }
}
},
{
"id": "id:space:music:g=a:three",
"fields": {}
+ },
+ {
+ "remove": "id:space:music::t-square-truth"
}
],
"documentCount": 3,
@@ -290,6 +297,7 @@ public class DocumentV1ApiTest {
assertEquals(1, parameters.getSliceId());
assertEquals(0, parameters.getFromTimestamp()); // not set; 0 is default
assertEquals(0, parameters.getToTimestamp()); // not set; 0 is default
+ assertFalse(parameters.visitRemoves()); // false by default
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
index 0d5320c387a..8e7a7956aed 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyMetrics.java
@@ -2,6 +2,9 @@
package com.yahoo.vespa.curator.stats;
import java.time.Duration;
+import java.util.Collections;
+import java.util.Map;
+import java.util.TreeMap;
import static java.lang.Math.round;
@@ -22,18 +25,20 @@ public class LatencyMetrics {
private final Duration maxActiveLatency;
private final double startHz;
private final double endHz;
+ private final Map<String, Double> loadByThread;
private final double load;
private final int maxLoad;
private final int currentLoad;
public LatencyMetrics(Duration latency, Duration maxLatency, Duration maxActiveLatency,
- double startHz, double endHz,
+ double startHz, double endHz, Map<String, Double> loadByThread,
double load, int maxLoad, int currentLoad) {
this.latency = latency;
this.maxLatency = maxLatency;
this.maxActiveLatency = maxActiveLatency;
this.startHz = startHz;
this.endHz = endHz;
+ this.loadByThread = new TreeMap<>(loadByThread);
this.load = load;
this.maxLoad = maxLoad;
this.currentLoad = currentLoad;
@@ -54,6 +59,13 @@ public class LatencyMetrics {
/** Returns the average number of intervals that ended in the period per second. */
public double endHz() { return roundTo3DecimalPlaces(endHz); }
+ /** Returns the average load of the implied time periond, for each thread with non-zero load, with 3 decimal places precision. */
+ public Map<String, Double> loadByThread() {
+ Map<String, Double> result = new TreeMap<>();
+ loadByThread.forEach((name, load) -> result.put(name, roundTo3DecimalPlaces(load)));
+ return Collections.unmodifiableMap(result);
+ }
+
/** The average load of the implied time period, with 3 decimal places precision. */
public double load() { return roundTo3DecimalPlaces(load); }
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyStats.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyStats.java
index 367d5ab2b9f..130c8a6a987 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyStats.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/stats/LatencyStats.java
@@ -3,7 +3,9 @@ package com.yahoo.vespa.curator.stats;
import java.time.Duration;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.HashSet;
+import java.util.Map;
import java.util.Optional;
import java.util.function.LongSupplier;
import java.util.logging.Level;
@@ -36,6 +38,7 @@ public class LatencyStats {
private long startOfPeriodNanos;
private long endOfPeriodNanos;
private double cumulativeLoadNanos;
+ private final Map<String, Long> cumulativeLoadNanosByThread = new HashMap<>();
private Duration cumulativeLatency;
private Duration maxLatency;
private int numIntervalsStarted;
@@ -92,6 +95,8 @@ public class LatencyStats {
private static class ActiveIntervalInfo {
private final long startNanos;
+ // Poor man's attempt at collapsing thread names into their pool names, as that is the relevant (task) level here.
+ private final String threadNameTemplate = Thread.currentThread().getName().replaceAll("\\d+", "*");
public ActiveIntervalInfo(long startOfIntervalNanos) { this.startNanos = startOfIntervalNanos; }
public long startOfIntervalNanos() { return startNanos; }
}
@@ -99,6 +104,7 @@ public class LatencyStats {
private void resetForNewPeriod() {
startOfPeriodNanos = endOfPeriodNanos;
cumulativeLoadNanos = 0.0;
+ cumulativeLoadNanosByThread.clear();
cumulativeLatency = Duration.ZERO;
maxLatency = Duration.ZERO;
numIntervalsStarted = 0;
@@ -109,6 +115,11 @@ public class LatencyStats {
private void pushEndOfPeriodToNow() {
long currentNanos = nanoTimeSupplier.getAsLong();
cumulativeLoadNanos += activeIntervals.size() * (currentNanos - endOfPeriodNanos);
+ for (ActiveIntervalInfo activeInterval : activeIntervals) {
+ cumulativeLoadNanosByThread.merge(activeInterval.threadNameTemplate,
+ currentNanos - endOfPeriodNanos,
+ Long::sum);
+ }
endOfPeriodNanos = currentNanos;
}
@@ -146,15 +157,22 @@ public class LatencyStats {
.orElse(maxLatency);
final double startHz, endHz, load;
+ final Map<String, Double> loadByThread = new HashMap<>();
long periodNanos = endOfPeriodNanos - startOfPeriodNanos;
if (periodNanos > 0) {
double periodSeconds = periodNanos / 1_000_000_000.0;
startHz = numIntervalsStarted / periodSeconds;
endHz = numIntervalsEnded / periodSeconds;
load = cumulativeLoadNanos / periodNanos;
+ cumulativeLoadNanosByThread.forEach((name, threadLoad) -> {
+ if (threadLoad > 0) loadByThread.put(name, threadLoad / (double) periodNanos);
+ });
} else {
startHz = endHz = 0.0;
load = activeIntervals.size();
+ for (ActiveIntervalInfo activeInterval : activeIntervals) {
+ loadByThread.put(activeInterval.threadNameTemplate, 1.0);
+ }
}
return new LatencyMetrics(latency,
@@ -162,6 +180,7 @@ public class LatencyStats {
maxActiveLatency,
startHz,
endHz,
+ loadByThread,
load,
maxLoad,
activeIntervals.size());