summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.copr/Makefile7
-rw-r--r--CMakeLists.txt1
-rw-r--r--client/go/cmd/api_key_test.go6
-rw-r--r--client/go/cmd/cert_test.go10
-rw-r--r--client/go/cmd/command_tester.go18
-rw-r--r--client/go/cmd/config_test.go37
-rw-r--r--client/go/cmd/curl.go95
-rw-r--r--client/go/cmd/curl_test.go37
-rw-r--r--client/go/cmd/deploy_test.go5
-rw-r--r--client/go/cmd/document.go30
-rw-r--r--client/go/cmd/document_test.go26
-rw-r--r--client/go/cmd/helpers.go13
-rw-r--r--client/go/cmd/man_test.go2
-rw-r--r--client/go/cmd/root.go17
-rw-r--r--client/go/cmd/version.go123
-rw-r--r--client/go/cmd/version_test.go42
-rw-r--r--client/go/curl/curl.go104
-rw-r--r--client/go/curl/curl_test.go45
-rw-r--r--client/go/version/version.go92
-rw-r--r--client/go/version/version_test.go66
-rw-r--r--client/go/vespa/document.go53
-rw-r--r--client/go/vespa/target.go33
-rw-r--r--client/go/vespa/target_test.go2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java5
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Distributor.java28
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java19
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java12
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java70
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java20
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java3
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java28
-rw-r--r--configgen/CMakeLists.txt2
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java2
-rw-r--r--container-disc/pom.xml2
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java16
-rw-r--r--container-search/abi-spec.json49
-rw-r--r--container-search/src/main/java/com/yahoo/search/Query.java78
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java40
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java15
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/ConversionContext.java40
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/FieldDescription.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/FieldType.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/PrimitiveFieldType.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/QueryFieldType.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileFieldType.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/types/TensorFieldType.java20
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java30
-rw-r--r--container-search/src/main/java/com/yahoo/search/yql/ProgramCompileException.java25
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java85
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java15
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializer.java86
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiOnPremTest.java58
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java116
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializerTest.java133
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json3
-rw-r--r--defaults/CMakeLists.txt2
-rwxr-xr-xdist/release-vespa-rpm.sh50
-rw-r--r--dist/vespa.spec2
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp19
-rw-r--r--eval/src/apps/tensor_conformance/generate.h5
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp9
-rw-r--r--eval/src/tests/eval/inline_operation/inline_operation_test.cpp1
-rw-r--r--eval/src/tests/eval/node_tools/node_tools_test.cpp1
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp1
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.cpp1
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.h1
-rw-r--r--eval/src/vespa/eval/eval/hamming_distance.h13
-rw-r--r--eval/src/vespa/eval/eval/key_gen.cpp1
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp5
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.h1
-rw-r--r--eval/src/vespa/eval/eval/make_tensor_function.cpp3
-rw-r--r--eval/src/vespa/eval/eval/node_tools.cpp1
-rw-r--r--eval/src/vespa/eval/eval/node_types.cpp1
-rw-r--r--eval/src/vespa/eval/eval/node_visitor.h2
-rw-r--r--eval/src/vespa/eval/eval/operation.cpp3
-rw-r--r--eval/src/vespa/eval/eval/operation.h1
-rw-r--r--eval/src/vespa/eval/eval/test/eval_spec.cpp21
-rw-r--r--eval/src/vespa/eval/eval/test/reference_evaluation.cpp3
-rw-r--r--eval/src/vespa/eval/eval/visit_stuff.cpp1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java12
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/FlagDefinition.java21
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java36
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java13
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/Condition.java24
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java15
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java15
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java15
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java19
-rw-r--r--fnet/src/vespa/fnet/transport_debugger.cpp4
-rw-r--r--fnet/src/vespa/fnet/transport_debugger.h18
-rw-r--r--functions.cmake17
-rw-r--r--linguistics/abi-spec.json11
-rw-r--r--linguistics/src/main/java/com/yahoo/language/Linguistics.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/detect/Detection.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/detect/DetectionException.java3
-rw-r--r--linguistics/src/main/java/com/yahoo/language/detect/Hint.java5
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java3
-rw-r--r--linguistics/src/main/java/com/yahoo/language/opennlp/OptimaizeDetector.java10
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/ProcessingException.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/StemList.java4
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/StemMode.java16
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/Stemmer.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/StemmerImpl.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/TokenScript.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/sentencepiece/Trie.java2
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleDetector.java9
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java1
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java1
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsException.java14
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java12
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/ServiceHealthGatherer.java14
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java44
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteHealthMetricFetcher.java4
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java6
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java1
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetrieverTest.java4
-rw-r--r--model-integration/pom.xml25
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApi.java51
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java58
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java36
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImplTest.java2
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java178
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java25
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java28
-rw-r--r--parent/pom.xml12
-rw-r--r--searchcore/CMakeLists.txt1
-rw-r--r--searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp68
-rw-r--r--searchcore/src/apps/vespa-redistribute-bm/.gitignore1
-rw-r--r--searchcore/src/apps/vespa-redistribute-bm/CMakeLists.txt8
-rw-r--r--searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp626
-rw-r--r--searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp18
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h4
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h3
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_distribution.cpp55
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_distribution.h8
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.cpp22
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.h4
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def12
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.h13
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/address_space_usage.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/address_space_usage.h2
-rw-r--r--slobrok/CMakeLists.txt1
-rw-r--r--slobrok/src/tests/rpc_mapping_monitor/CMakeLists.txt9
-rw-r--r--slobrok/src/tests/rpc_mapping_monitor/rpc_mapping_monitor_test.cpp224
-rw-r--r--slobrok/src/vespa/slobrok/server/mapping_monitor.h2
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt3
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.cpp5
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.h1
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp512
-rw-r--r--storage/src/tests/distributor/distributortestutil.h248
-rw-r--r--storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp2893
-rw-r--r--storage/src/tests/distributor/legacy_distributor_test.cpp1326
-rw-r--r--storage/src/tests/distributor/statusreporterdelegatetest.cpp2
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp36
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.cpp17
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.h11
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.h2
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_state_map.cpp16
-rw-r--r--storage/src/vespa/storage/distributor/bucket_space_state_map.h4
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_component.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_component.h22
-rw-r--r--storage/src/vespa/storage/distributor/distributor_operation_context.h9
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp211
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h38
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h12
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp29
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h7
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.cpp37
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.h17
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp365
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h95
-rw-r--r--storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp37
-rw-r--r--storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h2
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.cpp442
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.h38
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp4
-rw-r--r--streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp16
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java15
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java2
-rw-r--r--vespajlib/abi-spec.json1
-rw-r--r--vespajlib/src/main/java/com/yahoo/lang/MutableBoolean.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/protect/Process.java14
213 files changed, 3309 insertions, 7291 deletions
diff --git a/.copr/Makefile b/.copr/Makefile
index 5b097ba0ad9..b0322bf29b3 100644
--- a/.copr/Makefile
+++ b/.copr/Makefile
@@ -1,17 +1,14 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
TOP = $(realpath $(dir $(lastword $(MAKEFILE_LIST))))
-# Version
-VESPA_VERSION := $(shell git tag --points-at HEAD | grep -oP "\d+\.\d+\.\d+" | sort -V | tail -1)
-
RPMTOPDIR := $(HOME)/rpmbuild
SOURCEDIR := $(RPMTOPDIR)/SOURCES
SPECDIR := $(RPMTOPDIR)/SPECS
SPECFILE := $(SPECDIR)/vespa-$(VESPA_VERSION).spec
srpm:
- dnf install -y rpmdevtools
- $(TOP)/../dist.sh $(VESPA_VERSION)
+ dnf install -y git rpmdevtools
+ $(TOP)/../dist.sh $$(git tag --points-at HEAD | grep -oP "\d+\.\d+\.\d+" | sort -V | tail -1)
spectool -g -C $(SOURCEDIR) $(SPECFILE)
rpmbuild -bs --define "_topdir $(RPMTOPDIR)" $(SPECFILE)
cp -a $(RPMTOPDIR)/SRPMS/* $(outdir)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3598d259144..7d9968b6329 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -58,7 +58,6 @@ add_subdirectory(config)
add_subdirectory(config-model-fat)
add_subdirectory(configd)
add_subdirectory(configdefinitions)
-add_subdirectory(configgen)
add_subdirectory(configserver)
add_subdirectory(configserver-flags)
add_subdirectory(configutil)
diff --git a/client/go/cmd/api_key_test.go b/client/go/cmd/api_key_test.go
index c00f520aa25..2497568604f 100644
--- a/client/go/cmd/api_key_test.go
+++ b/client/go/cmd/api_key_test.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// Author: mpolden
package cmd
@@ -14,10 +14,10 @@ func TestAPIKey(t *testing.T) {
homeDir := t.TempDir()
keyFile := homeDir + "/.vespa/t1.api-key.pem"
- out := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
+ out, _ := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
assert.True(t, strings.HasPrefix(out, "Success: API private key written to "+keyFile+"\n"))
- out = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
+ out, _ = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
assert.True(t, strings.HasPrefix(out, "Error: File "+keyFile+" already exists\nHint: Use -f to overwrite it\n"))
assert.True(t, strings.Contains(out, "This is your public key"))
}
diff --git a/client/go/cmd/cert_test.go b/client/go/cmd/cert_test.go
index 36abdae1787..d93def2fa70 100644
--- a/client/go/cmd/cert_test.go
+++ b/client/go/cmd/cert_test.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// Author: mpolden
package cmd
@@ -16,7 +16,7 @@ import (
func TestCert(t *testing.T) {
homeDir := t.TempDir()
pkgDir := mockApplicationPackage(t, false)
- out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
+ out, _ := execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
app, err := vespa.ApplicationFromString("t1.a1.i1")
assert.Nil(t, err)
@@ -28,7 +28,7 @@ func TestCert(t *testing.T) {
assert.Equal(t, fmt.Sprintf("Success: Certificate written to %s\nSuccess: Certificate written to %s\nSuccess: Private key written to %s\n", pkgCertificate, certificate, privateKey), out)
- out = execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
+ out, _ = execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
assert.Contains(t, out, fmt.Sprintf("Error: Application package %s already contains a certificate", appDir))
}
@@ -41,13 +41,13 @@ func TestCertCompressedPackage(t *testing.T) {
_, err = os.Create(zipFile)
assert.Nil(t, err)
- out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
+ out, _ := execute(command{args: []string{"cert", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
assert.Contains(t, out, "Error: Cannot add certificate to compressed application package")
err = os.Remove(zipFile)
assert.Nil(t, err)
- out = execute(command{args: []string{"cert", "-f", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
+ out, _ = execute(command{args: []string{"cert", "-f", "-a", "t1.a1.i1", pkgDir}, homeDir: homeDir}, t, nil)
assert.Contains(t, out, "Success: Certificate written to")
assert.Contains(t, out, "Success: Private key written to")
}
diff --git a/client/go/cmd/command_tester.go b/client/go/cmd/command_tester.go
index 3d19e772875..f455ffa9957 100644
--- a/client/go/cmd/command_tester.go
+++ b/client/go/cmd/command_tester.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// A helper for testing commands
// Author: bratseth
@@ -17,7 +17,6 @@ import (
"github.com/spf13/pflag"
"github.com/spf13/viper"
- "github.com/stretchr/testify/assert"
"github.com/vespa-engine/vespa/client/go/util"
)
@@ -27,7 +26,7 @@ type command struct {
moreArgs []string
}
-func execute(cmd command, t *testing.T, client *mockHttpClient) string {
+func execute(cmd command, t *testing.T, client *mockHttpClient) (string, string) {
if client != nil {
util.ActiveHttpClient = client
}
@@ -56,19 +55,20 @@ func execute(cmd command, t *testing.T, client *mockHttpClient) string {
exitFunc = func(code int) {}
// Capture stdout and execute command
- var b bytes.Buffer
- stdout = &b
+ var capturedOut bytes.Buffer
+ var capturedErr bytes.Buffer
+ stdout = &capturedOut
+ stderr = &capturedErr
// Execute command and return output
rootCmd.SetArgs(append(cmd.args, cmd.moreArgs...))
rootCmd.Execute()
- out, err := ioutil.ReadAll(&b)
- assert.Nil(t, err, "No error")
- return string(out)
+ return capturedOut.String(), capturedErr.String()
}
func executeCommand(t *testing.T, client *mockHttpClient, args []string, moreArgs []string) string {
- return execute(command{args: args, moreArgs: moreArgs}, t, client)
+ out, _ := execute(command{args: args, moreArgs: moreArgs}, t, client)
+ return out
}
type mockHttpClient struct {
diff --git a/client/go/cmd/config_test.go b/client/go/cmd/config_test.go
index 70ad5d558e1..cf50f561f0f 100644
--- a/client/go/cmd/config_test.go
+++ b/client/go/cmd/config_test.go
@@ -8,23 +8,28 @@ import (
func TestConfig(t *testing.T) {
homeDir := t.TempDir()
- assert.Equal(t, "invalid option or value: \"foo\": \"bar\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "foo", "bar"}}, t, nil))
- assert.Equal(t, "foo = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "foo"}}, t, nil))
- assert.Equal(t, "target = local\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
- assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "cloud"}}, t, nil))
- assert.Equal(t, "target = cloud\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
- assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "http://127.0.0.1:8080"}}, t, nil))
- assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "https://127.0.0.1"}}, t, nil))
- assert.Equal(t, "target = https://127.0.0.1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
+ assertConfigCommand(t, "invalid option or value: \"foo\": \"bar\"\n", homeDir, "config", "set", "foo", "bar")
+ assertConfigCommand(t, "foo = <unset>\n", homeDir, "config", "get", "foo")
+ assertConfigCommand(t, "target = local\n", homeDir, "config", "get", "target")
+ assertConfigCommand(t, "", homeDir, "config", "set", "target", "cloud")
+ assertConfigCommand(t, "target = cloud\n", homeDir, "config", "get", "target")
+ assertConfigCommand(t, "", homeDir, "config", "set", "target", "http://127.0.0.1:8080")
+ assertConfigCommand(t, "", homeDir, "config", "set", "target", "https://127.0.0.1")
+ assertConfigCommand(t, "target = https://127.0.0.1\n", homeDir, "config", "get", "target")
- assert.Equal(t, "invalid application: \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "foo"}}, t, nil))
- assert.Equal(t, "application = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil))
- assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, nil))
- assert.Equal(t, "application = t1.a1.i1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil))
+ assertConfigCommand(t, "invalid application: \"foo\"\n", homeDir, "config", "set", "application", "foo")
+ assertConfigCommand(t, "application = <unset>\n", homeDir, "config", "get", "application")
+ assertConfigCommand(t, "", homeDir, "config", "set", "application", "t1.a1.i1")
+ assertConfigCommand(t, "application = t1.a1.i1\n", homeDir, "config", "get", "application")
- assert.Equal(t, "application = t1.a1.i1\ncolor = auto\ntarget = https://127.0.0.1\nwait = 0\n", execute(command{homeDir: homeDir, args: []string{"config", "get"}}, t, nil))
+ assertConfigCommand(t, "application = t1.a1.i1\ncolor = auto\ntarget = https://127.0.0.1\nwait = 0\n", homeDir, "config", "get")
- assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "60"}}, t, nil))
- assert.Equal(t, "wait option must be an integer >= 0, got \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "foo"}}, t, nil))
- assert.Equal(t, "wait = 60\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "wait"}}, t, nil))
+ assertConfigCommand(t, "", homeDir, "config", "set", "wait", "60")
+ assertConfigCommand(t, "wait option must be an integer >= 0, got \"foo\"\n", homeDir, "config", "set", "wait", "foo")
+ assertConfigCommand(t, "wait = 60\n", homeDir, "config", "get", "wait")
+}
+
+func assertConfigCommand(t *testing.T, expected, homeDir string, args ...string) {
+ out, _ := execute(command{homeDir: homeDir, args: args}, t, nil)
+ assert.Equal(t, expected, out)
}
diff --git a/client/go/cmd/curl.go b/client/go/cmd/curl.go
index f6b40e10f35..bd9fad1b47e 100644
--- a/client/go/cmd/curl.go
+++ b/client/go/cmd/curl.go
@@ -1,22 +1,19 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package cmd
import (
- "fmt"
"log"
"os"
- "os/exec"
"strings"
- "github.com/kballard/go-shellquote"
"github.com/spf13/cobra"
+ "github.com/vespa-engine/vespa/client/go/curl"
)
var curlDryRun bool
-var curlPath string
func init() {
rootCmd.AddCommand(curlCmd)
- curlCmd.Flags().StringVarP(&curlPath, "path", "p", "", "The path to curl. If this is unset, curl from PATH is used")
curlCmd.Flags().BoolVarP(&curlDryRun, "dry-run", "n", false, "Print the curl command that would be executed")
}
@@ -50,16 +47,20 @@ $ vespa curl -t local -- -v /search/?yql=query
return
}
service := getService("query", 0)
- c := &curl{privateKeyPath: privateKeyFile, certificatePath: certificateFile}
+ url := joinURL(service.BaseURL, args[len(args)-1])
+ rawArgs := args[:len(args)-1]
+ c, err := curl.RawArgs(url, rawArgs...)
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+ c.PrivateKey = privateKeyFile
+ c.Certificate = certificateFile
+
if curlDryRun {
- cmd, err := c.command(service.BaseURL, args...)
- if err != nil {
- fatalErr(err, "Failed to create curl command")
- return
- }
- log.Print(shellquote.Join(cmd.Args...))
+ log.Print(c.String())
} else {
- if err := c.run(service.BaseURL, args...); err != nil {
+ if err := c.Run(os.Stdout, os.Stderr); err != nil {
fatalErr(err, "Failed to run curl")
return
}
@@ -67,72 +68,8 @@ $ vespa curl -t local -- -v /search/?yql=query
},
}
-type curl struct {
- path string
- certificatePath string
- privateKeyPath string
-}
-
-func (c *curl) run(baseURL string, args ...string) error {
- cmd, err := c.command(baseURL, args...)
- if err != nil {
- return err
- }
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Start(); err != nil {
- return err
- }
- return cmd.Wait()
-}
-
-func (c *curl) command(baseURL string, args ...string) (*exec.Cmd, error) {
- if len(args) == 0 {
- return nil, fmt.Errorf("need at least one argument")
- }
-
- if c.path == "" {
- resolvedPath, err := resolveCurlPath()
- if err != nil {
- return nil, err
- }
- c.path = resolvedPath
- }
-
- path := args[len(args)-1]
- args = args[:len(args)-1]
- if !hasOption("--key", args) && c.privateKeyPath != "" {
- args = append(args, "--key", c.privateKeyPath)
- }
- if !hasOption("--cert", args) && c.certificatePath != "" {
- args = append(args, "--cert", c.certificatePath)
- }
-
+func joinURL(baseURL, path string) string {
baseURL = strings.TrimSuffix(baseURL, "/")
path = strings.TrimPrefix(path, "/")
- args = append(args, baseURL+"/"+path)
-
- return exec.Command(c.path, args...), nil
-}
-
-func hasOption(option string, args []string) bool {
- for _, arg := range args {
- if arg == option {
- return true
- }
- }
- return false
-}
-
-func resolveCurlPath() (string, error) {
- var curlPath string
- var err error
- curlPath, err = exec.LookPath("curl")
- if err != nil {
- curlPath, err = exec.LookPath("curl.exe")
- if err != nil {
- return "", err
- }
- }
- return curlPath, nil
+ return baseURL + "/" + path
}
diff --git a/client/go/cmd/curl_test.go b/client/go/cmd/curl_test.go
index c3163e731ce..340eacd0bd3 100644
--- a/client/go/cmd/curl_test.go
+++ b/client/go/cmd/curl_test.go
@@ -1,9 +1,9 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package cmd
import (
"fmt"
"path/filepath"
- "strings"
"testing"
"github.com/stretchr/testify/assert"
@@ -13,41 +13,10 @@ func TestCurl(t *testing.T) {
homeDir := t.TempDir()
httpClient := &mockHttpClient{}
convergeServices(httpClient)
- out := execute(command{homeDir: homeDir, args: []string{"curl", "-n", "-p", "/usr/bin/curl", "-a", "t1.a1.i1", "--", "-v", "--data-urlencode", "arg=with space", "/search"}}, t, httpClient)
+ out, _ := execute(command{homeDir: homeDir, args: []string{"curl", "-n", "-a", "t1.a1.i1", "--", "-v", "--data-urlencode", "arg=with space", "/search"}}, t, httpClient)
- expected := fmt.Sprintf("/usr/bin/curl -v --data-urlencode 'arg=with space' --key %s --cert %s https://127.0.0.1:8080/search\n",
+ expected := fmt.Sprintf("curl --key %s --cert %s -v --data-urlencode 'arg=with space' https://127.0.0.1:8080/search\n",
filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-private-key.pem"),
filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-public-cert.pem"))
assert.Equal(t, expected, out)
}
-
-func TestCurlCommand(t *testing.T) {
- c := &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"}
- assertCurl(t, c, "/usr/bin/curl -v --key /tmp/priv-key --cert /tmp/cert-key https://example.com/", "-v", "/")
-
- c = &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"}
- assertCurl(t, c, "/usr/bin/curl -v --cert my-cert --key my-key https://example.com/", "-v", "--cert", "my-cert", "--key", "my-key", "/")
-
- c = &curl{path: "/usr/bin/curl2"}
- assertCurl(t, c, "/usr/bin/curl2 -v https://example.com/foo", "-v", "/foo")
-
- c = &curl{path: "/usr/bin/curl"}
- assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "/foo/bar")
-
- c = &curl{path: "/usr/bin/curl"}
- assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "foo/bar")
-
- c = &curl{path: "/usr/bin/curl"}
- assertCurlURL(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "https://example.com/", "-v", "foo/bar")
-}
-
-func assertCurl(t *testing.T, c *curl, expectedOutput string, args ...string) {
- assertCurlURL(t, c, expectedOutput, "https://example.com", args...)
-}
-
-func assertCurlURL(t *testing.T, c *curl, expectedOutput string, url string, args ...string) {
- cmd, err := c.command("https://example.com", args...)
- assert.Nil(t, err)
-
- assert.Equal(t, expectedOutput, strings.Join(cmd.Args, " "))
-}
diff --git a/client/go/cmd/deploy_test.go b/client/go/cmd/deploy_test.go
index f24ba0829f9..443f7e8846f 100644
--- a/client/go/cmd/deploy_test.go
+++ b/client/go/cmd/deploy_test.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// deploy command tests
// Author: bratseth
@@ -130,9 +130,10 @@ func assertActivate(applicationPackage string, arguments []string, t *testing.T)
if err := cfg.WriteSessionID(vespa.DefaultApplication, 42); err != nil {
t.Fatal(err)
}
+ out, _ := execute(command{args: arguments, homeDir: homeDir}, t, client)
assert.Equal(t,
"Success: Activated "+applicationPackage+" with session 42\n",
- execute(command{args: arguments, homeDir: homeDir}, t, client))
+ out)
url := "http://127.0.0.1:19071/application/v2/tenant/default/session/42/active"
assert.Equal(t, url, client.lastRequest.URL.String())
assert.Equal(t, "PUT", client.lastRequest.Method)
diff --git a/client/go/cmd/document.go b/client/go/cmd/document.go
index 78c6596f511..cc5fb948e3b 100644
--- a/client/go/cmd/document.go
+++ b/client/go/cmd/document.go
@@ -1,10 +1,12 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// vespa document command
// author: bratseth
package cmd
import (
+ "io"
+ "io/ioutil"
"log"
"strings"
@@ -13,12 +15,15 @@ import (
"github.com/vespa-engine/vespa/client/go/vespa"
)
+var printCurl bool
+
func init() {
rootCmd.AddCommand(documentCmd)
documentCmd.AddCommand(documentPutCmd)
documentCmd.AddCommand(documentUpdateCmd)
documentCmd.AddCommand(documentRemoveCmd)
documentCmd.AddCommand(documentGetCmd)
+ documentCmd.PersistentFlags().BoolVarP(&printCurl, "verbose", "v", false, "Print the equivalent curl command for the document operation")
}
var documentCmd = &cobra.Command{
@@ -38,7 +43,7 @@ should be used instead of this.`,
DisableAutoGenTag: true,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
- printResult(vespa.Send(args[0], documentService()), false)
+ printResult(vespa.Send(args[0], documentService(), curlOutput()), false)
},
}
@@ -54,9 +59,9 @@ $ vespa document put id:mynamespace:music::a-head-full-of-dreams src/test/resour
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 1 {
- printResult(vespa.Put("", args[0], documentService()), false)
+ printResult(vespa.Put("", args[0], documentService(), curlOutput()), false)
} else {
- printResult(vespa.Put(args[0], args[1], documentService()), false)
+ printResult(vespa.Put(args[0], args[1], documentService(), curlOutput()), false)
}
},
}
@@ -72,9 +77,9 @@ $ vespa document update id:mynamespace:music::a-head-full-of-dreams src/test/res
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 1 {
- printResult(vespa.Update("", args[0], documentService()), false)
+ printResult(vespa.Update("", args[0], documentService(), curlOutput()), false)
} else {
- printResult(vespa.Update(args[0], args[1], documentService()), false)
+ printResult(vespa.Update(args[0], args[1], documentService(), curlOutput()), false)
}
},
}
@@ -90,9 +95,9 @@ $ vespa document remove id:mynamespace:music::a-head-full-of-dreams`,
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
if strings.HasPrefix(args[0], "id:") {
- printResult(vespa.RemoveId(args[0], documentService()), false)
+ printResult(vespa.RemoveId(args[0], documentService(), curlOutput()), false)
} else {
- printResult(vespa.RemoveOperation(args[0], documentService()), false)
+ printResult(vespa.RemoveOperation(args[0], documentService(), curlOutput()), false)
}
},
}
@@ -104,12 +109,19 @@ var documentGetCmd = &cobra.Command{
DisableAutoGenTag: true,
Example: `$ vespa document get id:mynamespace:music::a-head-full-of-dreams`,
Run: func(cmd *cobra.Command, args []string) {
- printResult(vespa.Get(args[0], documentService()), true)
+ printResult(vespa.Get(args[0], documentService(), curlOutput()), true)
},
}
func documentService() *vespa.Service { return getService("document", 0) }
+func curlOutput() io.Writer {
+ if printCurl {
+ return stderr
+ }
+ return ioutil.Discard
+}
+
func printResult(result util.OperationResult, payloadOnlyOnSuccess bool) {
if !result.Success {
log.Print(color.Red("Error: "), result.Message)
diff --git a/client/go/cmd/document_test.go b/client/go/cmd/document_test.go
index c298d5ef285..8aecb538f89 100644
--- a/client/go/cmd/document_test.go
+++ b/client/go/cmd/document_test.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// document command tests
// Author: bratseth
@@ -19,6 +19,11 @@ func TestDocumentSendPut(t *testing.T) {
"put", "POST", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put.json", t)
}
+func TestDocumentSendPutVerbose(t *testing.T) {
+ assertDocumentSend([]string{"document", "-v", "testdata/A-Head-Full-of-Dreams-Put.json"},
+ "put", "POST", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Put.json", t)
+}
+
func TestDocumentSendUpdate(t *testing.T) {
assertDocumentSend([]string{"document", "testdata/A-Head-Full-of-Dreams-Update.json"},
"update", "PUT", "id:mynamespace:music::a-head-full-of-dreams", "testdata/A-Head-Full-of-Dreams-Update.json", t)
@@ -93,11 +98,22 @@ func TestDocumentGet(t *testing.T) {
func assertDocumentSend(arguments []string, expectedOperation string, expectedMethod string, expectedDocumentId string, expectedPayloadFile string, t *testing.T) {
client := &mockHttpClient{}
documentURL := documentServiceURL(client)
- assert.Equal(t,
- "Success: "+expectedOperation+" "+expectedDocumentId+"\n",
- executeCommand(t, client, arguments, []string{}))
expectedPath, _ := vespa.IdToURLPath(expectedDocumentId)
- assert.Equal(t, documentURL+"/document/v1/"+expectedPath, client.lastRequest.URL.String())
+ expectedURL := documentURL + "/document/v1/" + expectedPath
+ out, errOut := execute(command{args: arguments}, t, client)
+
+ verbose := false
+ for _, a := range arguments {
+ if a == "-v" {
+ verbose = true
+ }
+ }
+ if verbose {
+ expectedCurl := "curl -X " + expectedMethod + " -H 'Content-Type: application/json' --data-binary @" + expectedPayloadFile + " " + expectedURL + "\n"
+ assert.Equal(t, expectedCurl, errOut)
+ }
+ assert.Equal(t, "Success: "+expectedOperation+" "+expectedDocumentId+"\n", out)
+ assert.Equal(t, expectedURL, client.lastRequest.URL.String())
assert.Equal(t, "application/json", client.lastRequest.Header.Get("Content-Type"))
assert.Equal(t, expectedMethod, client.lastRequest.Method)
diff --git a/client/go/cmd/helpers.go b/client/go/cmd/helpers.go
index 3493a4b32a8..f29a842aed2 100644
--- a/client/go/cmd/helpers.go
+++ b/client/go/cmd/helpers.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// Helpers used by multiple sub-commands.
// Author: mpolden
@@ -147,7 +147,16 @@ func getTarget() vespa.Target {
if err != nil {
fatalErrHint(err, "Deployment to cloud requires a certificate. Try 'vespa cert'")
}
- return vespa.CloudTarget(deployment, kp, apiKey, vespa.LogOptions{Writer: stdout, Level: vespa.LogLevel(logLevelArg)})
+ return vespa.CloudTarget(deployment, apiKey,
+ vespa.TLSOptions{
+ KeyPair: kp,
+ CertificateFile: certificateFile,
+ PrivateKeyFile: privateKeyFile,
+ },
+ vespa.LogOptions{
+ Writer: stdout,
+ Level: vespa.LogLevel(logLevelArg),
+ })
}
fatalErrHint(fmt.Errorf("Invalid target: %s", targetType), "Valid targets are 'local', 'cloud' or an URL")
return nil
diff --git a/client/go/cmd/man_test.go b/client/go/cmd/man_test.go
index 59efc64b8de..f7c33c8b3a1 100644
--- a/client/go/cmd/man_test.go
+++ b/client/go/cmd/man_test.go
@@ -11,7 +11,7 @@ import (
func TestMan(t *testing.T) {
tmpDir := t.TempDir()
- out := execute(command{args: []string{"man", tmpDir}}, t, nil)
+ out, _ := execute(command{args: []string{"man", tmpDir}}, t, nil)
assert.Equal(t, fmt.Sprintf("Success: Man pages written to %s\n", tmpDir), out)
assert.True(t, util.PathExists(filepath.Join(tmpDir, "vespa.1")))
}
diff --git a/client/go/cmd/root.go b/client/go/cmd/root.go
index 4202035af92..cd8427c3ac6 100644
--- a/client/go/cmd/root.go
+++ b/client/go/cmd/root.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// Root Cobra command: vespa
// author: bratseth
@@ -17,7 +17,6 @@ import (
var (
// TODO: add timeout flag
- // TODO: add flag to show http request made
rootCmd = &cobra.Command{
Use: "vespa command-name",
Short: "The command-line tool for Vespa.ai",
@@ -40,6 +39,7 @@ Vespa documentation: https://docs.vespa.ai`,
color = aurora.NewAurora(false)
stdout = colorable.NewColorableStdout()
+ stderr = colorable.NewColorableStderr()
)
const (
@@ -49,6 +49,14 @@ const (
colorFlag = "color"
)
+func isTerminal() bool {
+ file, ok := stdout.(*os.File)
+ if ok {
+ return isatty.IsTerminal(file.Fd())
+ }
+ return false
+}
+
func configureOutput() {
log.SetFlags(0) // No timestamps
log.SetOutput(stdout)
@@ -65,10 +73,7 @@ func configureOutput() {
colorize := false
switch colorValue {
case "auto":
- file, ok := stdout.(*os.File)
- if ok {
- colorize = isatty.IsTerminal(file.Fd())
- }
+ colorize = isTerminal()
case "always":
colorize = true
case "never":
diff --git a/client/go/cmd/version.go b/client/go/cmd/version.go
index 05820f4e34b..749d17a41d9 100644
--- a/client/go/cmd/version.go
+++ b/client/go/cmd/version.go
@@ -1,23 +1,144 @@
package cmd
import (
+ "encoding/json"
"log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
"runtime"
+ "sort"
+ "strings"
+ "time"
"github.com/spf13/cobra"
"github.com/vespa-engine/vespa/client/go/build"
+ "github.com/vespa-engine/vespa/client/go/util"
+ "github.com/vespa-engine/vespa/client/go/version"
)
+var skipVersionCheck bool
+
+var sp subprocess = &execSubprocess{}
+
+type subprocess interface {
+ pathOf(name string) (string, error)
+ outputOf(name string, args ...string) ([]byte, error)
+ isTerminal() bool
+}
+
+type execSubprocess struct{}
+
+func (c *execSubprocess) pathOf(name string) (string, error) { return exec.LookPath(name) }
+func (c *execSubprocess) isTerminal() bool { return isTerminal() }
+func (c *execSubprocess) outputOf(name string, args ...string) ([]byte, error) {
+ return exec.Command(name, args...).Output()
+}
+
func init() {
rootCmd.AddCommand(versionCmd)
+ versionCmd.Flags().BoolVarP(&skipVersionCheck, "no-check", "n", false, "Do not check if a new version is available")
}
var versionCmd = &cobra.Command{
Use: "version",
- Short: "Show version number",
+ Short: "Show current version and check for updates",
DisableAutoGenTag: true,
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
log.Printf("vespa version %s compiled with %v on %v/%v", build.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+ if !skipVersionCheck && sp.isTerminal() {
+ if err := checkVersion(); err != nil {
+ fatalErr(err)
+ }
+ }
},
}
+
+func checkVersion() error {
+ current, err := version.Parse(build.Version)
+ if err != nil {
+ return err
+ }
+ latest, err := latestRelease()
+ if err != nil {
+ return err
+ }
+ if !current.Less(latest.Version) {
+ return nil
+ }
+ usingHomebrew := usingHomebrew()
+ if usingHomebrew && latest.isRecent() {
+ return nil // Allow some time for new release to appear in Homebrew repo
+ }
+ log.Printf("\nNew release available: %s", color.Green(latest.Version))
+ log.Printf("https://github.com/vespa-engine/vespa/releases/tag/v%s", latest.Version)
+ if usingHomebrew {
+ log.Printf("\nUpgrade by running:\n%s", color.Cyan("brew update && brew upgrade vespa-cli"))
+ }
+ return nil
+}
+
+func latestRelease() (release, error) {
+ req, err := http.NewRequest("GET", "https://api.github.com/repos/vespa-engine/vespa/releases", nil)
+ if err != nil {
+ return release{}, err
+ }
+ response, err := util.HttpDo(req, time.Minute, "GitHub")
+ if err != nil {
+ return release{}, err
+ }
+ defer response.Body.Close()
+
+ var ghReleases []githubRelease
+ dec := json.NewDecoder(response.Body)
+ if err := dec.Decode(&ghReleases); err != nil {
+ return release{}, err
+ }
+ if len(ghReleases) == 0 {
+ return release{}, nil // No releases found
+ }
+
+ var releases []release
+ for _, r := range ghReleases {
+ v, err := version.Parse(r.TagName)
+ if err != nil {
+ return release{}, err
+ }
+ publishedAt, err := time.Parse(time.RFC3339, r.PublishedAt)
+ if err != nil {
+ return release{}, err
+ }
+ releases = append(releases, release{Version: v, PublishedAt: publishedAt})
+ }
+ sort.Slice(releases, func(i, j int) bool { return releases[i].Version.Less(releases[j].Version) })
+ return releases[len(releases)-1], nil
+}
+
+func usingHomebrew() bool {
+ selfPath, err := sp.pathOf("vespa")
+ if err != nil {
+ return false
+ }
+ brewPrefix, err := sp.outputOf("brew", "--prefix")
+ if err != nil {
+ return false
+ }
+ brewBin := filepath.Join(strings.TrimSpace(string(brewPrefix)), "bin") + string(os.PathSeparator)
+ return strings.HasPrefix(selfPath, brewBin)
+}
+
+type githubRelease struct {
+ TagName string `json:"tag_name"`
+ PublishedAt string `json:"published_at"`
+}
+
+type release struct {
+ Version version.Version
+ PublishedAt time.Time
+}
+
+func (r release) isRecent() bool {
+ return time.Now().Before(r.PublishedAt.Add(time.Hour * 24))
+}
diff --git a/client/go/cmd/version_test.go b/client/go/cmd/version_test.go
index fc977c47938..9eeaaaa4692 100644
--- a/client/go/cmd/version_test.go
+++ b/client/go/cmd/version_test.go
@@ -1,11 +1,51 @@
package cmd
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/vespa-engine/vespa/client/go/util"
)
func TestVersion(t *testing.T) {
- assert.Contains(t, execute(command{args: []string{"version"}}, t, nil), "vespa version 0.0.0-devel compiled with")
+ c := &mockHttpClient{}
+ c.NextResponse(200, `[{"tag_name": "v1.2.3", "published_at": "2021-09-10T12:00:00Z"}]`)
+ util.ActiveHttpClient = c
+
+ sp = &mockSubprocess{}
+ out, _ := execute(command{args: []string{"version"}}, t, nil)
+ assert.Contains(t, out, "vespa version 0.0.0-devel compiled with")
+ assert.Contains(t, out, "New release available: 1.2.3\nhttps://github.com/vespa-engine/vespa/releases/tag/v1.2.3")
+}
+
+func TestVersionCheckHomebrew(t *testing.T) {
+ c := &mockHttpClient{}
+ c.NextResponse(200, `[{"tag_name": "v1.2.3", "published_at": "2021-09-10T12:00:00Z"}]`)
+ util.ActiveHttpClient = c
+
+ sp = &mockSubprocess{programPath: "/usr/local/bin/vespa", output: "/usr/local"}
+ out, _ := execute(command{args: []string{"version"}}, t, nil)
+ assert.Contains(t, out, "vespa version 0.0.0-devel compiled with")
+ assert.Contains(t, out, "New release available: 1.2.3\n"+
+ "https://github.com/vespa-engine/vespa/releases/tag/v1.2.3\n"+
+ "\nUpgrade by running:\nbrew update && brew upgrade vespa-cli\n")
+}
+
+type mockSubprocess struct {
+ programPath string
+ output string
}
+
+func (c *mockSubprocess) pathOf(name string) (string, error) {
+ if c.programPath == "" {
+ return "", fmt.Errorf("no program path set in this mock")
+ }
+ return c.programPath, nil
+}
+
+func (c *mockSubprocess) outputOf(name string, args ...string) ([]byte, error) {
+ return []byte(c.output), nil
+}
+
+func (c *mockSubprocess) isTerminal() bool { return true }
diff --git a/client/go/curl/curl.go b/client/go/curl/curl.go
new file mode 100644
index 00000000000..44c3a0ad2a9
--- /dev/null
+++ b/client/go/curl/curl.go
@@ -0,0 +1,104 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package curl
+
+import (
+ "io"
+ "net/url"
+ "os/exec"
+ "runtime"
+
+ "github.com/kballard/go-shellquote"
+)
+
+type header struct {
+ key string
+ value string
+}
+
+type Command struct {
+ Path string
+ Method string
+ PrivateKey string
+ Certificate string
+ BodyFile string
+ url *url.URL
+ headers []header
+ rawArgs []string
+}
+
+func (c *Command) Args() []string {
+ var args []string
+ if c.PrivateKey != "" {
+ args = append(args, "--key", c.PrivateKey)
+ }
+ if c.Certificate != "" {
+ args = append(args, "--cert", c.Certificate)
+ }
+ if c.Method != "" {
+ args = append(args, "-X", c.Method)
+ }
+ for _, header := range c.headers {
+ args = append(args, "-H", header.key+": "+header.value)
+ }
+ if c.BodyFile != "" {
+ args = append(args, "--data-binary", "@"+c.BodyFile)
+ }
+ args = append(args, c.rawArgs...)
+ args = append(args, c.url.String())
+ return args
+}
+
+func (c *Command) String() string {
+ args := []string{c.Path}
+ args = append(args, c.Args()...)
+ return shellquote.Join(args...)
+}
+
+func (c *Command) Header(key, value string) {
+ c.headers = append(c.headers, header{key: key, value: value})
+}
+
+func (c *Command) Param(key, value string) {
+ query := c.url.Query()
+ query.Set(key, value)
+ c.url.RawQuery = query.Encode()
+}
+
+func (c *Command) Run(stdout, stderr io.Writer) error {
+ cmd := exec.Command(c.Path, c.Args()...)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ return cmd.Wait()
+}
+
+func Post(url string) (*Command, error) { return curl("POST", url) }
+
+func Get(url string) (*Command, error) { return curl("", url) }
+
+func RawArgs(url string, args ...string) (*Command, error) {
+ c, err := curl("", url)
+ if err != nil {
+ return nil, err
+ }
+ c.rawArgs = args
+ return c, nil
+}
+
+func curl(method, rawurl string) (*Command, error) {
+ path := "curl"
+ if runtime.GOOS == "windows" {
+ path = "curl.exe"
+ }
+ realURL, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, err
+ }
+ return &Command{
+ Path: path,
+ Method: method,
+ url: realURL,
+ }, nil
+}
diff --git a/client/go/curl/curl_test.go b/client/go/curl/curl_test.go
new file mode 100644
index 00000000000..90bf274f7a2
--- /dev/null
+++ b/client/go/curl/curl_test.go
@@ -0,0 +1,45 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package curl
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPost(t *testing.T) {
+ c, err := Post("https://example.com")
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.PrivateKey = "key.pem"
+ c.Certificate = "cert.pem"
+ c.BodyFile = "file.json"
+ c.Header("Content-Type", "application/json")
+
+ assert.Equal(t, "curl --key key.pem --cert cert.pem -X POST -H 'Content-Type: application/json' --data-binary @file.json https://example.com", c.String())
+}
+
+func TestGet(t *testing.T) {
+ c, err := Get("https://example.com")
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.PrivateKey = "key.pem"
+ c.Certificate = "cert.pem"
+ c.Param("yql", "select * from sources * where title contains 'foo';")
+ c.Param("hits", "5")
+
+ assert.Equal(t, `curl --key key.pem --cert cert.pem https://example.com\?hits=5\&yql=select+%2A+from+sources+%2A+where+title+contains+%27foo%27%3B`, c.String())
+}
+
+func TestRawArgs(t *testing.T) {
+ c, err := RawArgs("https://example.com/search", "-v", "-m", "10", "-H", "foo: bar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.PrivateKey = "key.pem"
+ c.Certificate = "cert.pem"
+
+ assert.Equal(t, `curl --key key.pem --cert cert.pem -v -m 10 -H 'foo: bar' https://example.com/search`, c.String())
+}
diff --git a/client/go/version/version.go b/client/go/version/version.go
new file mode 100644
index 00000000000..27b7da1d0f5
--- /dev/null
+++ b/client/go/version/version.go
@@ -0,0 +1,92 @@
+package version
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Version represents a semantic version number.
+type Version struct {
+ Major int
+ Minor int
+ Patch int
+ Label string
+}
+
+func (v Version) String() string {
+ var sb strings.Builder
+ sb.WriteString(strconv.Itoa(v.Major))
+ sb.WriteRune('.')
+ sb.WriteString(strconv.Itoa(v.Minor))
+ sb.WriteRune('.')
+ sb.WriteString(strconv.Itoa(v.Patch))
+ if v.Label != "" {
+ sb.WriteRune('-')
+ sb.WriteString(v.Label)
+ }
+ return sb.String()
+}
+
+// Compare returns a positive integer if v1 is greater than v2, a negative integer if v1 is less than v2 and zero if they
+// are equal.
+func (v1 Version) Compare(v2 Version) int {
+ result := v1.Major - v2.Major
+ if result != 0 {
+ return result
+ }
+ result = v1.Minor - v2.Minor
+ if result != 0 {
+ return result
+ }
+ result = v1.Patch - v2.Patch
+ if result != 0 {
+ return result
+ }
+ // Version without label always sorts first
+ if v1.Label == "" && v2.Label != "" {
+ return 1
+ }
+ if v1.Label != "" && v2.Label == "" {
+ return -1
+ }
+ if v1.Label > v2.Label {
+ return 1
+ }
+ if v1.Label < v2.Label {
+ return -1
+ }
+ return 0
+}
+
+// Less returns true if v1 is lower than v2.
+func (v1 Version) Less(v2 Version) bool { return v1.Compare(v2) < 0 }
+
+// Parse parses a semantic version number from string s.
+func Parse(s string) (Version, error) {
+ if len(s) > 0 && s[0] == 'v' {
+ s = s[1:] // Trim v prefix
+ }
+ parts := strings.Split(s, ".")
+ if len(parts) != 3 {
+ return Version{}, fmt.Errorf("invalid version number: %s", s)
+ }
+ major, err := strconv.Atoi(parts[0])
+ if err != nil {
+ return Version{}, fmt.Errorf("invalid major version: %s", parts[0])
+ }
+ minor, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return Version{}, fmt.Errorf("invalid minor version: %s", parts[1])
+ }
+ parts2 := strings.SplitN(parts[2], "-", 2)
+ patch, err := strconv.Atoi(parts2[0])
+ if err != nil {
+ return Version{}, fmt.Errorf("invalid patch version: %s", parts[2])
+ }
+ v := Version{Major: major, Minor: minor, Patch: patch}
+ if len(parts2) > 1 {
+ v.Label = parts2[1]
+ }
+ return v, nil
+}
diff --git a/client/go/version/version_test.go b/client/go/version/version_test.go
new file mode 100644
index 00000000000..3602715cca8
--- /dev/null
+++ b/client/go/version/version_test.go
@@ -0,0 +1,66 @@
+package version
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParse(t *testing.T) {
+ _, err := Parse("foo")
+ assert.NotNil(t, err)
+
+ v, err := Parse("1.2.3")
+ assert.Nil(t, err)
+ assert.Equal(t, "1.2.3", v.String())
+
+ v, err = Parse("v4.5.6")
+ assert.Nil(t, err)
+ assert.Equal(t, "4.5.6", v.String())
+
+ v, err = Parse("1.2.3-foo")
+ assert.Nil(t, err)
+ assert.Equal(t, "1.2.3-foo", v.String())
+}
+
+func TestCompare(t *testing.T) {
+ assertComparison(t, "1.2.3", '>', "1.0.0")
+ assertComparison(t, "1.0.0", '<', "1.2.3")
+
+ assertComparison(t, "1.2.3", '=', "1.2.3")
+ assertComparison(t, "1.2.3", '>', "1.2.0")
+ assertComparison(t, "1.2.0", '<', "1.2.3")
+ assertComparison(t, "1.2.3", '>', "1.2.0")
+ assertComparison(t, "1.0.3", '<', "1.2.3")
+
+ assertComparison(t, "1.2.3", '>', "1.1.4")
+ assertComparison(t, "1.1.4", '<', "1.2.3")
+
+ assertComparison(t, "2.0.0", '>', "1.2.3")
+ assertComparison(t, "1.2.3", '<', "2.0.0")
+
+ assertComparison(t, "1.2.3-alpha1", '<', "1.2.3")
+ assertComparison(t, "1.2.3", '>', "1.2.3-alpha1")
+ assertComparison(t, "1.2.3-alpha1", '=', "1.2.3-alpha1")
+ assertComparison(t, "1.2.3-alpha1", '<', "1.2.3-alpha2")
+ assertComparison(t, "1.2.3-alpha2", '>', "1.2.3-alpha1")
+}
+
+func assertComparison(t *testing.T, s1 string, cmp rune, s2 string) {
+ v1, err := Parse(s1)
+ assert.Nil(t, err)
+ v2, err := Parse(s2)
+ assert.Nil(t, err)
+ result := v1.Compare(v2)
+ switch cmp {
+ case '<':
+ assert.True(t, result < 0, fmt.Sprintf("%s is less than %s", v1, v2))
+ case '>':
+ assert.True(t, result > 0, fmt.Sprintf("%s is greater than %s", v1, v2))
+ case '=':
+ assert.True(t, result == 0, fmt.Sprintf("%s is equal to %s", v1, v2))
+ default:
+ t.Fatal("invalid comparator: %r", cmp)
+ }
+}
diff --git a/client/go/vespa/document.go b/client/go/vespa/document.go
index 7b750b86728..cfac1930199 100644
--- a/client/go/vespa/document.go
+++ b/client/go/vespa/document.go
@@ -1,4 +1,4 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
// vespa document API client
// Author: bratseth
@@ -7,34 +7,36 @@ package vespa
import (
"bytes"
"encoding/json"
+ "io"
"io/ioutil"
"net/http"
"net/url"
"os"
"time"
+ "github.com/vespa-engine/vespa/client/go/curl"
"github.com/vespa-engine/vespa/client/go/util"
)
// Sends the operation given in the file
-func Send(jsonFile string, service *Service) util.OperationResult {
- return sendOperation("", jsonFile, service, anyOperation)
+func Send(jsonFile string, service *Service, curlOutput io.Writer) util.OperationResult {
+ return sendOperation("", jsonFile, service, anyOperation, curlOutput)
}
-func Put(documentId string, jsonFile string, service *Service) util.OperationResult {
- return sendOperation(documentId, jsonFile, service, putOperation)
+func Put(documentId string, jsonFile string, service *Service, curlOutput io.Writer) util.OperationResult {
+ return sendOperation(documentId, jsonFile, service, putOperation, curlOutput)
}
-func Update(documentId string, jsonFile string, service *Service) util.OperationResult {
- return sendOperation(documentId, jsonFile, service, updateOperation)
+func Update(documentId string, jsonFile string, service *Service, curlOutput io.Writer) util.OperationResult {
+ return sendOperation(documentId, jsonFile, service, updateOperation, curlOutput)
}
-func RemoveId(documentId string, service *Service) util.OperationResult {
- return sendOperation(documentId, "", service, removeOperation)
+func RemoveId(documentId string, service *Service, curlOutput io.Writer) util.OperationResult {
+ return sendOperation(documentId, "", service, removeOperation, curlOutput)
}
-func RemoveOperation(jsonFile string, service *Service) util.OperationResult {
- return sendOperation("", jsonFile, service, removeOperation)
+func RemoveOperation(jsonFile string, service *Service, curlOutput io.Writer) util.OperationResult {
+ return sendOperation("", jsonFile, service, removeOperation, curlOutput)
}
const (
@@ -44,7 +46,7 @@ const (
removeOperation string = "remove"
)
-func sendOperation(documentId string, jsonFile string, service *Service, operation string) util.OperationResult {
+func sendOperation(documentId string, jsonFile string, service *Service, operation string, curlOutput io.Writer) util.OperationResult {
header := http.Header{}
header.Add("Content-Type", "application/json")
@@ -93,7 +95,7 @@ func sendOperation(documentId string, jsonFile string, service *Service, operati
Header: header,
Body: ioutil.NopCloser(bytes.NewReader(documentData)),
}
- response, err := service.Do(request, time.Second*60)
+ response, err := serviceDo(service, request, jsonFile, curlOutput)
if response == nil {
return util.Failure("Request failed: " + err.Error())
}
@@ -132,7 +134,28 @@ func operationToHTTPMethod(operation string) string {
panic("Unexpected document operation ''" + operation + "'")
}
-func Get(documentId string, service *Service) util.OperationResult {
+func serviceDo(service *Service, request *http.Request, filename string, curlOutput io.Writer) (*http.Response, error) {
+ cmd, err := curl.RawArgs(request.URL.String())
+ if err != nil {
+ return nil, err
+ }
+ cmd.Method = request.Method
+ for k, vs := range request.Header {
+ for _, v := range vs {
+ cmd.Header(k, v)
+ }
+ }
+ cmd.BodyFile = filename
+ cmd.Certificate = service.TLSOptions.CertificateFile
+ cmd.PrivateKey = service.TLSOptions.PrivateKeyFile
+ out := cmd.String() + "\n"
+ if _, err := io.WriteString(curlOutput, out); err != nil {
+ return nil, err
+ }
+ return service.Do(request, time.Second*60)
+}
+
+func Get(documentId string, service *Service, curlOutput io.Writer) util.OperationResult {
documentPath, documentPathError := IdToURLPath(documentId)
if documentPathError != nil {
return util.Failure("Invalid document id '" + documentId + "': " + documentPathError.Error())
@@ -147,7 +170,7 @@ func Get(documentId string, service *Service) util.OperationResult {
URL: url,
Method: "GET",
}
- response, err := service.Do(request, time.Second*60)
+ response, err := serviceDo(service, request, "", curlOutput)
if response == nil {
return util.Failure("Request failed: " + err.Error())
}
diff --git a/client/go/vespa/target.go b/client/go/vespa/target.go
index 065471d2a1e..69dc876c1c8 100644
--- a/client/go/vespa/target.go
+++ b/client/go/vespa/target.go
@@ -31,9 +31,9 @@ const (
// Service represents a Vespa service.
type Service struct {
- BaseURL string
- Name string
- certificate tls.Certificate
+ BaseURL string
+ Name string
+ TLSOptions TLSOptions
}
// Target represents a Vespa platform, running named Vespa services.
@@ -48,6 +48,13 @@ type Target interface {
DiscoverServices(timeout time.Duration, runID int64) error
}
+// TLSOptions configures the certificate to use for service requests.
+type TLSOptions struct {
+ KeyPair tls.Certificate
+ CertificateFile string
+ PrivateKeyFile string
+}
+
// LogOptions configures the log output to produce when waiting for services.
type LogOptions struct {
Writer io.Writer
@@ -61,8 +68,8 @@ type customTarget struct {
// Do sends request to this service. Any required authentication happens automatically.
func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Response, error) {
- if s.certificate.Certificate != nil {
- util.ActiveHttpClient.UseCertificate(s.certificate)
+ if s.TLSOptions.KeyPair.Certificate != nil {
+ util.ActiveHttpClient.UseCertificate(s.TLSOptions.KeyPair)
}
return util.HttpDo(request, timeout, s.Description())
}
@@ -83,7 +90,7 @@ func (s *Service) Wait(timeout time.Duration) (int, error) {
return 0, err
}
okFunc := func(status int, response []byte) (bool, error) { return status/100 == 2, nil }
- return wait(okFunc, func() *http.Request { return req }, &s.certificate, timeout)
+ return wait(okFunc, func() *http.Request { return req }, &s.TLSOptions.KeyPair, timeout)
}
func (s *Service) Description() string {
@@ -167,8 +174,8 @@ type cloudTarget struct {
cloudAPI string
targetType string
deployment Deployment
- keyPair tls.Certificate
apiKey []byte
+ tlsOptions TLSOptions
logOptions LogOptions
queryURL string
@@ -185,12 +192,12 @@ func (t *cloudTarget) Service(name string) (*Service, error) {
if t.queryURL == "" {
return nil, fmt.Errorf("service %s not discovered", name)
}
- return &Service{Name: name, BaseURL: t.queryURL, certificate: t.keyPair}, nil
+ return &Service{Name: name, BaseURL: t.queryURL, TLSOptions: t.tlsOptions}, nil
case documentService:
if t.documentURL == "" {
return nil, fmt.Errorf("service %s not discovered", name)
}
- return &Service{Name: name, BaseURL: t.documentURL, certificate: t.keyPair}, nil
+ return &Service{Name: name, BaseURL: t.documentURL, TLSOptions: t.tlsOptions}, nil
}
return nil, fmt.Errorf("unknown service: %s", name)
}
@@ -245,7 +252,7 @@ func (t *cloudTarget) waitForRun(signer *RequestSigner, runID int64, timeout tim
}
return true, nil
}
- _, err = wait(jobSuccessFunc, requestFunc, &t.keyPair, timeout)
+ _, err = wait(jobSuccessFunc, requestFunc, &t.tlsOptions.KeyPair, timeout)
return err
}
@@ -298,7 +305,7 @@ func (t *cloudTarget) discoverEndpoints(signer *RequestSigner, timeout time.Dura
endpointURL = resp.Endpoints[0].URL
return true, nil
}
- if _, err = wait(endpointFunc, func() *http.Request { return req }, &t.keyPair, timeout); err != nil {
+ if _, err = wait(endpointFunc, func() *http.Request { return req }, &t.tlsOptions.KeyPair, timeout); err != nil {
return err
}
if endpointURL == "" {
@@ -320,13 +327,13 @@ func CustomTarget(baseURL string) Target {
}
// CloudTarget creates a Target for the Vespa Cloud platform.
-func CloudTarget(deployment Deployment, keyPair tls.Certificate, apiKey []byte, logOptions LogOptions) Target {
+func CloudTarget(deployment Deployment, apiKey []byte, tlsOptions TLSOptions, logOptions LogOptions) Target {
return &cloudTarget{
cloudAPI: defaultCloudAPI,
targetType: cloudTargetType,
deployment: deployment,
- keyPair: keyPair,
apiKey: apiKey,
+ tlsOptions: tlsOptions,
logOptions: logOptions,
}
}
diff --git a/client/go/vespa/target_test.go b/client/go/vespa/target_test.go
index b524f73c5d3..31f145f0db3 100644
--- a/client/go/vespa/target_test.go
+++ b/client/go/vespa/target_test.go
@@ -106,8 +106,8 @@ func TestCloudTargetWait(t *testing.T) {
Application: ApplicationID{Tenant: "t1", Application: "a1", Instance: "i1"},
Zone: ZoneID{Environment: "dev", Region: "us-north-1"},
},
- x509KeyPair,
apiKey,
+ TLSOptions{KeyPair: x509KeyPair},
LogOptions{Writer: &logWriter})
if ct, ok := target.(*cloudTarget); ok {
ct.cloudAPI = srv.URL
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 9e417776c73..9ee36831d6a 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -10,7 +10,6 @@ import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.HostName;
-import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.Zone;
import java.io.File;
@@ -70,7 +69,6 @@ public interface ModelContext {
* - Remove all flag data files from hosted-feature-flag repository
*/
interface FeatureFlags {
- @ModelFeatureFlag(owners = {"jonmv"}, removeAfter = "7.457") default Optional<NodeResources> dedicatedClusterControllerFlavor() { return Optional.of(new NodeResources(0.25, 1, 10, 0.3, NodeResources.DiskSpeed.any, NodeResources.StorageType.any)); }
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Revisit in May or June 2021") default double defaultTermwiseLimit() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"vekterli"}) default boolean useThreePhaseUpdates() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Select sequencer type use while feeding") default String feedSequencerType() { throw new UnsupportedOperationException("TODO specify default value"); }
@@ -93,10 +91,9 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"bjorncs", "tokle"}) default List<String> allowedAthenzProxyIdentities() { return List.of(); }
@ModelFeatureFlag(owners = {"vekterli"}) default int maxActivationInhibitedOutOfSyncGroups() { return 0; }
@ModelFeatureFlag(owners = {"hmusum"}) default String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return ""; }
- @ModelFeatureFlag(owners = {"tokle", "bjorncs"}, removeAfter = "7.450") default boolean enableCustomAclMapping() { return true; }
@ModelFeatureFlag(owners = {"geirst", "vekterli"}) default int numDistributorStripes() { return 0; }
@ModelFeatureFlag(owners = {"arnej"}) default boolean requireConnectivityCheck() { return true; }
- @ModelFeatureFlag(owners = {"hmusum"}) default boolean throwIfResourceLimitsSpecified() { return true; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.470") default boolean throwIfResourceLimitsSpecified() { return true; }
@ModelFeatureFlag(owners = {"hmusum"}) default double resourceLimitDisk() { return 0.8; }
@ModelFeatureFlag(owners = {"hmusum"}) default double resourceLimitMemory() { return 0.8; }
@ModelFeatureFlag(owners = {"geirst", "vekterli"}) default double minNodeRatioPerGroup() { return 0.0; }
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
index 461b9109dfa..46b785ccf42 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
@@ -20,7 +20,6 @@ import com.yahoo.vespa.model.AbstractService;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -101,6 +100,12 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
remaining.forEach((name, rank) -> {
if (areDependenciesReady(rank, rankProfileRegistry)) ready.add(rank);
});
+ if (ready.isEmpty() && ! deployProperties.featureFlags().enforceRankProfileInheritance()) {
+ // Dirty fallback to allow incorrect rankprofile inheritance to pass for now.
+ // We then handle one by one.
+ // TODO remove ASAP
+ ready.add(remaining.values().iterator().next());
+ }
processRankProfiles(ready, queryProfiles, importedModels, search, attributeFields, deployProperties, executor);
ready.forEach(rank -> remaining.remove(rank.getName()));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
index dc5e6c9baee..dff47515957 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
@@ -1,7 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.content.cluster.DomResourceLimitsBuilder;
@@ -37,20 +36,18 @@ public class ClusterResourceLimits {
private final boolean enableFeedBlockInDistributor;
private final boolean hostedVespa;
- private final boolean throwIfSpecified;
- private final DeployLogger deployLogger;
private final double resourceLimitDisk;
private final double resourceLimitMemory;
private ResourceLimits.Builder ctrlBuilder = new ResourceLimits.Builder();
private ResourceLimits.Builder nodeBuilder = new ResourceLimits.Builder();
- public Builder(boolean enableFeedBlockInDistributor, boolean hostedVespa, boolean throwIfSpecified,
- DeployLogger deployLogger, double resourceLimitDisk, double resourceLimitMemory) {
+ public Builder(boolean enableFeedBlockInDistributor,
+ boolean hostedVespa,
+ double resourceLimitDisk,
+ double resourceLimitMemory) {
this.enableFeedBlockInDistributor = enableFeedBlockInDistributor;
this.hostedVespa = hostedVespa;
- this.throwIfSpecified = throwIfSpecified;
- this.deployLogger = deployLogger;
this.resourceLimitDisk = resourceLimitDisk;
this.resourceLimitMemory = resourceLimitMemory;
verifyLimits(resourceLimitDisk, resourceLimitMemory);
@@ -67,7 +64,7 @@ public class ClusterResourceLimits {
private ResourceLimits.Builder createBuilder(ModelElement element) {
return element == null
? new ResourceLimits.Builder()
- : DomResourceLimitsBuilder.createBuilder(element, hostedVespa, throwIfSpecified, deployLogger);
+ : DomResourceLimitsBuilder.createBuilder(element, hostedVespa);
}
public void setClusterControllerBuilder(ResourceLimits.Builder builder) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Distributor.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Distributor.java
index f640af71a59..f84e5c6c3a7 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Distributor.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Distributor.java
@@ -18,7 +18,6 @@ import org.w3c.dom.Element;
public class Distributor extends ContentNode implements StorDistributormanagerConfig.Producer {
PersistenceEngine provider;
- private final int numDistributorStripesFlag;
public static class Builder extends VespaDomBuilder.DomConfigProducerBuilder<Distributor> {
ModelElement clusterXml;
@@ -41,7 +40,6 @@ public class Distributor extends ContentNode implements StorDistributormanagerCo
StorageNode.rootFolder + parent.getClusterName() + "/distributor/" + distributionKey, distributionKey);
this.provider = provider;
- this.numDistributorStripesFlag = properties.featureFlags().numDistributorStripes();
if (distributorBasePort != null) {
setBasePort(distributorBasePort);
@@ -49,23 +47,21 @@ public class Distributor extends ContentNode implements StorDistributormanagerCo
}
private int tuneNumDistributorStripes() {
- if (numDistributorStripesFlag == -1) {
- if (getHostResource() != null) {
- int cores = (int)getHostResource().realResources().vcpu();
- // This should match the calculation used when node flavor is not available:
- // storage/src/vespa/storage/common/bucket_stripe_utils.cpp
- if (cores <= 16) {
- return 1;
- } else if (cores <= 64) {
- return 2;
- } else {
- return 4;
- }
- } else {
+ if (getHostResource() != null &&
+ !getHostResource().realResources().isUnspecified()) {
+ int cores = (int)getHostResource().realResources().vcpu();
+ // This should match the calculation used when node flavor is not available:
+ // storage/src/vespa/storage/common/bucket_stripe_utils.cpp
+ if (cores <= 16) {
return 1;
+ } else if (cores <= 64) {
+ return 2;
+ } else {
+ return 4;
}
+ } else {
+ return 0;
}
- return numDistributorStripesFlag;
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index 1f443da51db..a746dc36540 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -123,8 +123,6 @@ public class ContentCluster extends AbstractConfigProducer<AbstractConfigProduce
boolean enableFeedBlockInDistributor = deployState.getProperties().featureFlags().enableFeedBlockInDistributor();
var resourceLimits = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
stateIsHosted(deployState),
- deployState.featureFlags().throwIfResourceLimitsSpecified(),
- deployState.getDeployLogger(),
deployState.featureFlags().resourceLimitDisk(),
deployState.featureFlags().resourceLimitMemory())
.build(contentElement);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
index 32b0f5b6477..cb417c2d559 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
@@ -1,12 +1,9 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.cluster;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import com.yahoo.vespa.model.content.ResourceLimits;
-import java.util.logging.Level;
-
/**
* Builder for feed block resource limits.
*
@@ -14,23 +11,13 @@ import java.util.logging.Level;
*/
public class DomResourceLimitsBuilder {
- public static ResourceLimits.Builder createBuilder(ModelElement contentXml,
- boolean hostedVespa,
- boolean throwIfSpecified,
- DeployLogger deployLogger) {
+ public static ResourceLimits.Builder createBuilder(ModelElement contentXml, boolean hostedVespa) {
ResourceLimits.Builder builder = new ResourceLimits.Builder();
ModelElement resourceLimits = contentXml.child("resource-limits");
if (resourceLimits == null) { return builder; }
- if (hostedVespa) {
- String message = "Element '" + resourceLimits + "' is not allowed to be set";
- if (throwIfSpecified) throw new IllegalArgumentException(message);
-
-
- deployLogger.logApplicationPackage(Level.WARNING, message);
- // TODO: return (default values will then be used). Cannot be done now as an app needs current behavior
- //return builder;
- }
+ if (hostedVespa)
+ throw new IllegalArgumentException("Element '" + resourceLimits + "' is not allowed to be set");
if (resourceLimits.child("disk") != null) {
builder.setDiskLimit(resourceLimits.childAsDouble("disk"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 7689fffe440..de5eaa2278e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -45,7 +45,7 @@ public class FileSender implements Serializable {
public static void send(FileReference fileReference, Collection<? extends AbstractService> services) {
if (services.isEmpty()) {
throw new IllegalStateException("No service instances. Probably a standalone cluster setting up <nodes> " +
- "using 'count' instead of <node> tags.");
+ "using 'count' instead of <node> tags.");
}
for (AbstractService service : services) {
@@ -57,8 +57,7 @@ public class FileSender implements Serializable {
/**
* Sends all user configured files for a producer to all given services.
*/
- public <PRODUCER extends AbstractConfigProducer<?>>
- void sendUserConfiguredFiles(PRODUCER producer) {
+ public <PRODUCER extends AbstractConfigProducer<?>> void sendUserConfiguredFiles(PRODUCER producer) {
if (services.isEmpty())
return;
@@ -69,7 +68,7 @@ public class FileSender implements Serializable {
try {
sendUserConfiguredFiles(builder, sentFiles, key);
} catch (IllegalArgumentException e) {
- throw new IllegalArgumentException("Unable to send files for " + key, e);
+ throw new IllegalArgumentException("Unable to send file specified in " + key, e);
}
}
}
@@ -78,7 +77,8 @@ public class FileSender implements Serializable {
ConfigDefinition configDefinition = builder.getConfigDefinition();
if (configDefinition == null) {
// TODO: throw new IllegalArgumentException("Not able to find config definition for " + builder);
- logger.logApplicationPackage(Level.FINE, "Not able to find config definition for " + key + ". Will not send files for this config");
+ logger.logApplicationPackage(Level.FINE, "Not able to find config definition for " + key +
+ ". Will not send files for this config");
return;
}
// Inspect fields at this level
@@ -133,7 +133,7 @@ public class FileSender implements Serializable {
for (String name : entries.keySet()) {
ConfigPayloadBuilder fileEntry = builder.getObject(name);
if (fileEntry.getValue() == null) {
- throw new IllegalArgumentException("Unable to send file for field '" + name + "'. Invalid config value " + fileEntry.getValue());
+ throw new IllegalArgumentException("Unable to send file for field '" + name + "': Invalid config value " + fileEntry.getValue());
}
sendFileEntry(fileEntry, sentFiles);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
index c0b1a64bace..f7aed7099d4 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
@@ -1,14 +1,10 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ModelContext;
-import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.TestProperties;
-import com.yahoo.searchdefinition.derived.TestableDeployLogger;
import com.yahoo.text.XML;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
-import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
@@ -28,7 +24,6 @@ public class ClusterResourceLimitsTest {
private static class Fixture {
private final boolean enableFeedBlockInDistributor;
private final boolean hostedVespa;
- private final boolean throwIfSpecified;
private final ResourceLimits.Builder ctrlBuilder = new ResourceLimits.Builder();
private final ResourceLimits.Builder nodeBuilder = new ResourceLimits.Builder();
@@ -37,13 +32,12 @@ public class ClusterResourceLimitsTest {
}
public Fixture(boolean enableFeedBlockInDistributor) {
- this(enableFeedBlockInDistributor, false, false);
+ this(enableFeedBlockInDistributor, false);
}
- public Fixture(boolean enableFeedBlockInDistributor, boolean hostedVespa, boolean throwIfSpecified) {
+ public Fixture(boolean enableFeedBlockInDistributor, boolean hostedVespa) {
this.enableFeedBlockInDistributor = enableFeedBlockInDistributor;
this.hostedVespa = hostedVespa;
- this.throwIfSpecified = throwIfSpecified;
}
public Fixture ctrlDisk(double limit) {
@@ -66,8 +60,6 @@ public class ClusterResourceLimitsTest {
ModelContext.FeatureFlags featureFlags = new TestProperties();
var builder = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
hostedVespa,
- throwIfSpecified,
- new BaseDeployLogger(),
featureFlags.resourceLimitDisk(),
featureFlags.resourceLimitMemory());
builder.setClusterControllerBuilder(ctrlBuilder);
@@ -144,48 +136,18 @@ public class ClusterResourceLimitsTest {
}
@Test
- @Ignore // TODO: Remove hosted_limits_are_used_if_app_is_allowed_to_set_limits and enable this when code is fixed to do so
- public void hosted_log_when_resource_limits_are_specified() {
- TestableDeployLogger logger = new TestableDeployLogger();
-
- var limits = hostedBuildAndLogIfSpecified(logger);
- assertEquals(1, logger.warnings.size());
- assertEquals("Element 'resource-limits' is not allowed to be set", logger.warnings.get(0));
-
- // Verify that default limits are used
- assertLimits(0.8, 0.8, limits.getClusterControllerLimits());
- assertLimits(0.9, 0.9, limits.getContentNodeLimits());
- }
-
- @Test
public void hosted_exception_is_thrown_when_resource_limits_are_specified() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(containsString("Element 'resource-limits' is not allowed to be set"));
- hostedBuildAndThrowIfSpecified();
- }
-
- @Test
- // TODO: Remove this and enable hosted_log_when_resource_limits_are_specified when code is fixed to do so
- public void hosted_limits_are_used_if_app_is_allowed_to_set_limits() {
- TestableDeployLogger logger = new TestableDeployLogger();
-
- var limits = hostedBuildAndLogIfSpecified(logger);
- assertEquals(1, logger.warnings.size());
- assertEquals("Element 'resource-limits' is not allowed to be set", logger.warnings.get(0));
-
- // Verify that limits in XML are used
- assertLimits(0.8, 0.92, limits.getClusterControllerLimits());
- assertLimits(0.9, 0.96, limits.getContentNodeLimits());
+ hostedBuild();
}
@Test
public void hosted_limits_from_feature_flag_are_used() {
- TestableDeployLogger logger = new TestableDeployLogger();
-
TestProperties featureFlags = new TestProperties();
featureFlags.setResourceLimitDisk(0.85);
featureFlags.setResourceLimitMemory(0.90);
- var limits = hostedBuild(false, logger, featureFlags, false);
+ var limits = hostedBuild(featureFlags, false);
// Verify that limits from feature flags are used
assertLimits(0.85, 0.90, limits.getClusterControllerLimits());
@@ -194,35 +156,23 @@ public class ClusterResourceLimitsTest {
@Test
public void exception_is_thrown_when_resource_limits_are_out_of_range() {
- TestableDeployLogger logger = new TestableDeployLogger();
-
TestProperties featureFlags = new TestProperties();
featureFlags.setResourceLimitDisk(1.1);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(containsString("Resource limit for disk is set to illegal value 1.1, but must be in the range [0.0, 1.0]"));
- hostedBuild(false, logger, featureFlags, false);
+ hostedBuild(featureFlags, false);
featureFlags = new TestProperties();
featureFlags.setResourceLimitDisk(-0.1);
expectedException.expectMessage(containsString("Resource limit for disk is set to illegal value -0.1, but must be in the range [0.0, 1.0]"));
- hostedBuild(false, logger, featureFlags, false);
- }
-
- private void hostedBuildAndThrowIfSpecified() {
- hostedBuild(true, new TestableDeployLogger(), new TestProperties(), true);
- }
-
- private ClusterResourceLimits hostedBuildAndLogIfSpecified(DeployLogger deployLogger) {
- return hostedBuild(false, deployLogger);
+ hostedBuild(featureFlags, false);
}
- private ClusterResourceLimits hostedBuild(boolean throwIfSpecified, DeployLogger deployLogger) {
- return hostedBuild(throwIfSpecified, deployLogger, new TestProperties(), true);
+ private ClusterResourceLimits hostedBuild() {
+ return hostedBuild(new TestProperties(), true);
}
- private ClusterResourceLimits hostedBuild(boolean throwIfSpecified,
- DeployLogger deployLogger,
- ModelContext.FeatureFlags featureFlags,
+ private ClusterResourceLimits hostedBuild(ModelContext.FeatureFlags featureFlags,
boolean limitsInXml) {
Document clusterXml = XML.getDocument("<cluster id=\"test\">" +
" <tuning>\n" +
@@ -237,8 +187,6 @@ public class ClusterResourceLimitsTest {
ClusterResourceLimits.Builder builder = new ClusterResourceLimits.Builder(true,
true,
- throwIfSpecified,
- deployLogger,
featureFlags.resourceLimitDisk(),
featureFlags.resourceLimitMemory());
return builder.build(new ModelElement((limitsInXml ? clusterXml : noLimitsXml).getDocumentElement()));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index c7c02c581f9..0b686db6801 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -1057,30 +1057,22 @@ public class ContentClusterTest extends ContentBaseTest {
assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2));
}
- private int resolveNumDistributorStripesConfigWithFeatureFlag(TestProperties props, Optional<Flavor> flavor) throws Exception {
- var cc = createOneNodeCluster(props, flavor);
+ private int resolveNumDistributorStripesConfig(Optional<Flavor> flavor) throws Exception {
+ var cc = createOneNodeCluster(new TestProperties(), flavor);
var builder = new StorDistributormanagerConfig.Builder();
cc.getDistributorNodes().getChildren().get("0").getConfig(builder);
return (new StorDistributormanagerConfig(builder)).num_distributor_stripes();
}
- private int resolveNumDistributorStripesConfigWithFeatureFlag(int numStripes) throws Exception {
- return resolveNumDistributorStripesConfigWithFeatureFlag(new TestProperties().setNumDistributorStripes(numStripes), Optional.empty());
- }
-
private int resolveTunedNumDistributorStripesConfig(int numCpuCores) throws Exception {
var flavor = new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().name("test").minCpuCores(numCpuCores)));
- return resolveNumDistributorStripesConfigWithFeatureFlag(new TestProperties().setNumDistributorStripes(-1),
- Optional.of(flavor));
+ return resolveNumDistributorStripesConfig(Optional.of(flavor));
}
@Test
- public void num_distributor_stripes_config_controlled_by_properties() throws Exception {
- assertEquals(0, resolveNumDistributorStripesConfigWithFeatureFlag(new TestProperties(), Optional.empty()));
- assertEquals(0, resolveNumDistributorStripesConfigWithFeatureFlag(0));
- assertEquals(1, resolveNumDistributorStripesConfigWithFeatureFlag(1));
- assertEquals(1, resolveNumDistributorStripesConfigWithFeatureFlag(-1));
- assertEquals(4, resolveNumDistributorStripesConfigWithFeatureFlag(4));
+ public void num_distributor_stripes_config_defaults_to_zero() throws Exception {
+ // This triggers tuning when starting the distributor process, based on CPU core sampling on the node.
+ assertEquals(0, resolveNumDistributorStripesConfig(Optional.empty()));
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
index cc1b96dc588..e5b757e1514 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model.content;
import com.yahoo.config.model.api.ModelContext;
-import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockRoot;
@@ -28,8 +27,6 @@ public class FleetControllerClusterTest {
clusterElement,
new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
false,
- false,
- new BaseDeployLogger(),
featureFlags.resourceLimitDisk(),
featureFlags.resourceLimitMemory())
.build(clusterElement).getClusterControllerLimits())
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
index 446ddf0560b..e6c9bc2175b 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.config.protocol;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.jrt.DataValue;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.StringValue;
@@ -11,7 +10,7 @@ import com.yahoo.jrt.Value;
import com.yahoo.text.Utf8Array;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.ErrorCode;
-import com.yahoo.vespa.config.util.ConfigUtils;
+import com.yahoo.vespa.config.PayloadChecksums;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -26,8 +25,7 @@ import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64;
* The V3 config protocol implemented on the server side. The V3 protocol uses 2 fields:
*
* * A metadata field containing json data describing config generation, md5 and compression info
- * * A data field containing compressed or uncompressed json config payload. This field can be empty if the payload
- * has not changed since last request, triggering an optimization at the client where the previous payload is used instead.
+ * * A data field containing compressed or uncompressed json config payload
*
* The implementation of addOkResponse is optimized for doing as little copying of payload data as possible, ensuring
* that we get a lower memory footprint.
@@ -74,8 +72,6 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest {
@Override
public void addOkResponse(Payload payload, long generation, boolean applyOnRestart, PayloadChecksums payloadChecksums) {
this.applyOnRestart = applyOnRestart;
- boolean changedConfig = !payloadChecksums.equals(getRequestConfigChecksums());
- boolean changedConfigAndNewGeneration = changedConfig && ConfigUtils.isGenerationNewer(generation, getRequestGeneration());
Payload responsePayload = payload.withCompression(getCompressionType());
ByteArrayOutputStream byteArrayOutputStream = new NoCopyByteArrayOutputStream(4096);
try {
@@ -93,10 +89,6 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest {
throw new RuntimeException("Payload is null for ' " + this + ", not able to create response");
}
CompressionInfo compressionInfo = responsePayload.getCompressionInfo();
- // If payload is not being sent, we must adjust compression info to avoid client confusion.
- if (!changedConfigAndNewGeneration) {
- compressionInfo = CompressionInfo.create(compressionInfo.getCompressionType(), 0);
- }
compressionInfo.serialize(jsonGenerator);
jsonGenerator.writeEndObject();
@@ -106,17 +98,13 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest {
throw new IllegalArgumentException("Could not add OK response for " + this);
}
request.returnValues().add(createResponseValue(byteArrayOutputStream));
- if (changedConfigAndNewGeneration) {
- ByteBuffer buf = responsePayload.getData().wrap();
- if (buf.hasArray() && buf.remaining() == buf.array().length) {
- request.returnValues().add(new DataValue(buf.array()));
- } else {
- byte [] dst = new byte[buf.remaining()];
- buf.get(dst);
- request.returnValues().add(new DataValue(dst));
- }
+ ByteBuffer buf = responsePayload.getData().wrap();
+ if (buf.hasArray() && buf.remaining() == buf.array().length) {
+ request.returnValues().add(new DataValue(buf.array()));
} else {
- request.returnValues().add(new DataValue(new byte[0]));
+ byte[] dst = new byte[buf.remaining()];
+ buf.get(dst);
+ request.returnValues().add(new DataValue(dst));
}
}
diff --git a/configgen/CMakeLists.txt b/configgen/CMakeLists.txt
deleted file mode 100644
index 107037f8008..00000000000
--- a/configgen/CMakeLists.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_java_artifact(configgen)
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java b/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
index c2539b53b28..cc234ea51d7 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
@@ -98,8 +98,12 @@ public class CppClassBuilder implements ClassBuilder {
String newHeader = headerWriter.toString();
String newBody = bodyWriter.toString();
- File headerFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "h"));
- File bodyFile = new File(rootDir, relativePathUnderRoot + "/" + getFileName(root, "cpp"));
+ String prefix = "";
+ if (relativePathUnderRoot != null) {
+ prefix = relativePathUnderRoot + "/";
+ }
+ File headerFile = new File(rootDir, prefix + getFileName(root, "h"));
+ File bodyFile = new File(rootDir, prefix + getFileName(root, "cpp"));
writeFile(headerFile, newHeader);
writeFile(bodyFile, newBody);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 8aebd42b53c..89987891c61 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -183,7 +183,6 @@ public class ModelContextImpl implements ModelContext {
private final int maxConcurrentMergesPerContentNode;
private final int maxMergeQueueSize;
private final int largeRankExpressionLimit;
- private final boolean throwIfResourceLimitsSpecified;
private final double resourceLimitDisk;
private final double resourceLimitMemory;
private final double minNodeRatioPerGroup;
@@ -211,7 +210,6 @@ public class ModelContextImpl implements ModelContext {
this.requireConnectivityCheck = flagValue(source, appId, Flags.REQUIRE_CONNECTIVITY_CHECK);
this.maxConcurrentMergesPerContentNode = flagValue(source, appId, Flags.MAX_CONCURRENT_MERGES_PER_NODE);
this.maxMergeQueueSize = flagValue(source, appId, Flags.MAX_MERGE_QUEUE_SIZE);
- this.throwIfResourceLimitsSpecified = flagValue(source, appId, Flags.THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED);
this.resourceLimitDisk = flagValue(source, appId, PermanentFlags.RESOURCE_LIMIT_DISK);
this.resourceLimitMemory = flagValue(source, appId, PermanentFlags.RESOURCE_LIMIT_MEMORY);
this.minNodeRatioPerGroup = flagValue(source, appId, Flags.MIN_NODE_RATIO_PER_GROUP);
@@ -241,7 +239,7 @@ public class ModelContextImpl implements ModelContext {
@Override public boolean requireConnectivityCheck() { return requireConnectivityCheck; }
@Override public int maxConcurrentMergesPerNode() { return maxConcurrentMergesPerContentNode; }
@Override public int maxMergeQueueSize() { return maxMergeQueueSize; }
- @Override public boolean throwIfResourceLimitsSpecified() { return throwIfResourceLimitsSpecified; }
+ @Override public boolean throwIfResourceLimitsSpecified() { return true; }
@Override public double resourceLimitDisk() { return resourceLimitDisk; }
@Override public double resourceLimitMemory() { return resourceLimitMemory; }
@Override public double minNodeRatioPerGroup() { return minNodeRatioPerGroup; }
diff --git a/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java b/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java
index 087be0f17c5..0c4709e4a2c 100644
--- a/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java
+++ b/container-core/src/main/java/com/yahoo/container/core/config/testutil/HandlersConfigurerTestWrapper.java
@@ -17,6 +17,7 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.test.MockMetric;
import com.yahoo.language.Linguistics;
+import com.yahoo.language.process.Encoder;
import com.yahoo.language.simple.SimpleLinguistics;
import java.io.File;
@@ -140,6 +141,7 @@ public class HandlersConfigurerTestWrapper {
protected void configure() {
// Needed by e.g. SearchHandler
bind(Linguistics.class).to(SimpleLinguistics.class).in(Scopes.SINGLETON);
+ bind(Encoder.class).to(Encoder.FailingEncoder.class).in(Scopes.SINGLETON);
bind(ContainerThreadPool.class).to(SimpleContainerThreadpool.class);
bind(Metric.class).to(MockMetric.class);
}
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index 5debf6c9c02..9b6ccd93a41 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -167,7 +167,6 @@
<buildLegacyVespaPlatformBundle>true</buildLegacyVespaPlatformBundle>
<discPreInstallBundle>
<!-- Vespa bundles -->
- configgen.jar,
config-bundle-jar-with-dependencies.jar,
configdefinitions-jar-with-dependencies.jar,
container-search-and-docproc-jar-with-dependencies.jar,
@@ -178,7 +177,6 @@
vespaclient-container-plugin-jar-with-dependencies.jar,
vespa-athenz-jar-with-dependencies.jar,
security-utils-jar-with-dependencies.jar,
- defaults-jar-with-dependencies.jar,
zkfacade-jar-with-dependencies.jar,
zookeeper-server-jar-with-dependencies.jar,
<!-- Apache http client repackaged as bundle -->
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
index 9fe3728dc2c..853224a5b91 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
@@ -40,8 +40,10 @@ import com.yahoo.log.LogSetup;
import com.yahoo.messagebus.network.rpc.SlobrokConfigSubscriber;
import com.yahoo.net.HostName;
import com.yahoo.vespa.config.ConfigKey;
+import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.yolean.Exceptions;
+import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.IdentityHashMap;
@@ -400,9 +402,17 @@ public final class ConfiguredApplication implements Application {
shutdownDeadlineExecutor = new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("Shutdown deadline timer"));
shutdownDeadlineExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
long delayMillis = 50 * 1000;
- shutdownDeadlineExecutor.schedule(() -> com.yahoo.protect.Process.logAndDie(
- "Timed out waiting for application shutdown. Please check that all your request handlers " +
- "drain their request content channels.", true), delayMillis, TimeUnit.MILLISECONDS);
+ shutdownDeadlineExecutor.schedule(() -> {
+ String heapDumpName = Defaults.getDefaults().underVespaHome("var/crash/java_pid.") + ProcessHandle.current().pid() + ".hprof";
+ try {
+ com.yahoo.protect.Process.dumpHeap(heapDumpName, true);
+ } catch (IOException e) {
+ log.log(Level.WARNING, "Failed writing heap dump:", e);
+ }
+ com.yahoo.protect.Process.logAndDie(
+ "Timed out waiting for application shutdown. Please check that all your request handlers " +
+ "drain their request content channels.", true);
+ }, delayMillis, TimeUnit.MILLISECONDS);
}
private static void addHandlerBindings(ContainerBuilder builder,
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index b577660c1b9..7016eff3185 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -1786,6 +1786,27 @@
],
"fields": []
},
+ "com.yahoo.search.Query$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.search.Query$Builder setRequest(java.lang.String)",
+ "public com.yahoo.search.Query$Builder setRequest(com.yahoo.container.jdisc.HttpRequest)",
+ "public com.yahoo.container.jdisc.HttpRequest getRequest()",
+ "public com.yahoo.search.Query$Builder setRequestMap(java.util.Map)",
+ "public java.util.Map getRequestMap()",
+ "public com.yahoo.search.Query$Builder setQueryProfile(com.yahoo.search.query.profile.compiled.CompiledQueryProfile)",
+ "public com.yahoo.search.query.profile.compiled.CompiledQueryProfile getQueryProfile()",
+ "public com.yahoo.search.Query$Builder setEncoder(com.yahoo.language.process.Encoder)",
+ "public com.yahoo.language.process.Encoder getEncoder()",
+ "public com.yahoo.search.Query build()"
+ ],
+ "fields": []
+ },
"com.yahoo.search.Query$Type": {
"superClass": "java.lang.Enum",
"interfaces": [],
@@ -4237,6 +4258,7 @@
"public"
],
"methods": [
+ "public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.language.process.Encoder, com.yahoo.search.searchchain.ExecutionFactory)",
"public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, com.yahoo.container.handler.threadpool.ContainerThreadPool, com.yahoo.container.logging.AccessLog, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.search.searchchain.ExecutionFactory)",
"public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, java.util.concurrent.Executor, com.yahoo.container.logging.AccessLog, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.search.searchchain.ExecutionFactory)",
"public void <init>(com.yahoo.statistics.Statistics, com.yahoo.jdisc.Metric, java.util.concurrent.Executor, com.yahoo.container.logging.AccessLog, com.yahoo.search.query.profile.config.QueryProfilesConfig, com.yahoo.container.core.ContainerHttpConfig, com.yahoo.search.searchchain.ExecutionFactory)",
@@ -5863,6 +5885,7 @@
],
"methods": [
"public void <init>(com.yahoo.search.query.profile.compiled.CompiledQueryProfile)",
+ "public void <init>(com.yahoo.search.query.profile.compiled.CompiledQueryProfile, com.yahoo.language.process.Encoder)",
"public com.yahoo.search.query.profile.compiled.CompiledQueryProfile getQueryProfile()",
"public java.lang.Object get(com.yahoo.processing.request.CompoundName, java.util.Map, com.yahoo.processing.request.Properties)",
"public void set(com.yahoo.processing.request.CompoundName, java.lang.Object, java.util.Map)",
@@ -6229,6 +6252,18 @@
],
"fields": []
},
+ "com.yahoo.search.query.profile.types.ConversionContext": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.language.process.Encoder, java.util.Map)",
+ "public static com.yahoo.search.query.profile.types.ConversionContext empty()"
+ ],
+ "fields": []
+ },
"com.yahoo.search.query.profile.types.FieldDescription": {
"superClass": "java.lang.Object",
"interfaces": [
@@ -6276,7 +6311,7 @@
"public abstract java.lang.String toString()",
"public abstract java.lang.String toInstanceDescription()",
"public abstract java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)",
- "public abstract java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public abstract java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)",
"public com.yahoo.tensor.TensorType asTensorType()",
"public static com.yahoo.search.query.profile.types.FieldType fromString(java.lang.String, com.yahoo.search.query.profile.types.QueryProfileTypeRegistry)",
"public static boolean isLegalFieldValue(java.lang.Object)"
@@ -6303,7 +6338,7 @@
"public java.lang.String stringValue()",
"public java.lang.String toString()",
"public java.lang.String toInstanceDescription()",
- "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)",
"public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)",
"public int hashCode()",
"public boolean equals(java.lang.Object)"
@@ -6323,7 +6358,7 @@
"public java.lang.String toString()",
"public java.lang.String toInstanceDescription()",
"public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)",
- "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)"
+ "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)"
],
"fields": []
},
@@ -6342,11 +6377,11 @@
"public java.lang.String stringValue()",
"public java.lang.String toString()",
"public java.lang.String toInstanceDescription()",
- "public com.yahoo.search.query.profile.compiled.CompiledQueryProfile convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public com.yahoo.search.query.profile.compiled.CompiledQueryProfile convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)",
"public com.yahoo.search.query.profile.QueryProfile convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)",
"public int hashCode()",
"public boolean equals(java.lang.Object)",
- "public bridge synthetic java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public bridge synthetic java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)",
"public bridge synthetic java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)"
],
"fields": []
@@ -6419,7 +6454,7 @@
"public java.lang.String toString()",
"public java.lang.String toInstanceDescription()",
"public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.QueryProfileRegistry)",
- "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public java.lang.Object convertFrom(java.lang.Object, com.yahoo.search.query.profile.types.ConversionContext)",
"public static com.yahoo.search.query.profile.types.TensorFieldType fromTypeString(java.lang.String)"
],
"fields": []
@@ -6496,7 +6531,7 @@
"public"
],
"methods": [
- "public void <init>(com.yahoo.search.Query, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry)",
+ "public void <init>(com.yahoo.search.Query, com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry, com.yahoo.language.process.Encoder)",
"public void setParentQuery(com.yahoo.search.Query)",
"public java.lang.Object get(com.yahoo.processing.request.CompoundName, java.util.Map, com.yahoo.processing.request.Properties)",
"public void set(com.yahoo.processing.request.CompoundName, java.lang.Object, java.util.Map)",
diff --git a/container-search/src/main/java/com/yahoo/search/Query.java b/container-search/src/main/java/com/yahoo/search/Query.java
index 8c3a30a5a4d..06b71599103 100644
--- a/container-search/src/main/java/com/yahoo/search/Query.java
+++ b/container-search/src/main/java/com/yahoo/search/Query.java
@@ -7,6 +7,7 @@ import com.yahoo.collections.Tuple2;
import com.yahoo.component.Version;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.fs4.MapEncoder;
+import com.yahoo.language.process.Encoder;
import com.yahoo.prelude.fastsearch.DocumentDatabase;
import com.yahoo.prelude.query.Highlight;
import com.yahoo.prelude.query.textualrepresentation.TextualQueryRepresentation;
@@ -333,20 +334,32 @@ public class Query extends com.yahoo.processing.Request implements Cloneable {
public Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile) {
super(new QueryPropertyAliases(propertyAliases));
this.httpRequest = request;
- init(requestMap, queryProfile);
+ init(requestMap, queryProfile, Encoder.throwsOnUse);
}
- private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile) {
+ // TODO: Deprecate most constructors above here
+
+ private Query(Builder builder) {
+ this(builder.getRequest(), builder.getRequestMap(), builder.getQueryProfile(), builder.getEncoder());
+ }
+
+ private Query(HttpRequest request, Map<String, String> requestMap, CompiledQueryProfile queryProfile, Encoder encoder) {
+ super(new QueryPropertyAliases(propertyAliases));
+ this.httpRequest = request;
+ init(requestMap, queryProfile, encoder);
+ }
+
+ private void init(Map<String, String> requestMap, CompiledQueryProfile queryProfile, Encoder encoder) {
startTime = httpRequest.getJDiscRequest().creationTime(TimeUnit.MILLISECONDS);
if (queryProfile != null) {
// Move all request parameters to the query profile just to validate that the parameter settings are legal
- Properties queryProfileProperties = new QueryProfileProperties(queryProfile);
+ Properties queryProfileProperties = new QueryProfileProperties(queryProfile, encoder);
properties().chain(queryProfileProperties);
// TODO: Just checking legality rather than actually setting would be faster
setPropertiesFromRequestMap(requestMap, properties(), true); // Adds errors to the query for illegal set attempts
// Create the full chain
- properties().chain(new QueryProperties(this, queryProfile.getRegistry())).
+ properties().chain(new QueryProperties(this, queryProfile.getRegistry(), encoder)).
chain(new ModelObjectMap()).
chain(new RequestContextProperties(requestMap)).
chain(queryProfileProperties).
@@ -365,7 +378,7 @@ public class Query extends com.yahoo.processing.Request implements Cloneable {
}
else { // bypass these complications if there is no query profile to get values from and validate against
properties().
- chain(new QueryProperties(this, CompiledQueryProfileRegistry.empty)).
+ chain(new QueryProperties(this, CompiledQueryProfileRegistry.empty, encoder)).
chain(new PropertyMap()).
chain(new DefaultProperties());
setPropertiesFromRequestMap(requestMap, properties(), false);
@@ -1112,4 +1125,59 @@ public class Query extends com.yahoo.processing.Request implements Cloneable {
getRanking().prepare();
}
+ public static class Builder {
+
+ private HttpRequest request = null;
+ private Map<String, String> requestMap = null;
+ private CompiledQueryProfile queryProfile = null;
+ private Encoder encoder = Encoder.throwsOnUse;
+
+ public Builder setRequest(String query) {
+ request = HttpRequest.createTestRequest(query, com.yahoo.jdisc.http.HttpRequest.Method.GET);
+ return this;
+ }
+
+ public Builder setRequest(HttpRequest request) {
+ this.request = request;
+ return this;
+ }
+
+ public HttpRequest getRequest() {
+ if (request == null)
+ return HttpRequest.createTestRequest("", com.yahoo.jdisc.http.HttpRequest.Method.GET);
+ return request;
+ }
+
+ /** Sets the request mao to use explicitly. If not set, the request map will be getRequest().propertyMap() */
+ public Builder setRequestMap(Map<String, String> requestMap) {
+ this.requestMap = requestMap;
+ return this;
+ }
+
+ public Map<String, String> getRequestMap() {
+ if (requestMap == null)
+ return getRequest().propertyMap();
+ return requestMap;
+ }
+
+ public Builder setQueryProfile(CompiledQueryProfile queryProfile) {
+ this.queryProfile = queryProfile;
+ return this;
+ }
+
+ /** Returns the query profile of this query, or null if none. */
+ public CompiledQueryProfile getQueryProfile() { return queryProfile; }
+
+ public Builder setEncoder(Encoder encoder) {
+ this.encoder = encoder;
+ return this;
+ }
+
+ public Encoder getEncoder() { return encoder; }
+
+ /** Creates a new query from this builder. No properties are required to before calling this. */
+ public Query build() { return new Query(this); }
+
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index 9f67603f62b..d1e57a30206 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -23,6 +23,7 @@ import com.yahoo.io.IOUtils;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.Request;
import com.yahoo.language.Linguistics;
+import com.yahoo.language.process.Encoder;
import com.yahoo.net.HostName;
import com.yahoo.net.UriTools;
import com.yahoo.prelude.query.parser.ParseException;
@@ -105,6 +106,8 @@ public class SearchHandler extends LoggingRequestHandler {
private final String selfHostname = HostName.getLocalhost();
+ private final Encoder encoder;
+
private final ExecutionFactory executionFactory;
private final AtomicLong numRequestsLeftToTrace;
@@ -129,6 +132,22 @@ public class SearchHandler extends LoggingRequestHandler {
public SearchHandler(Statistics statistics,
Metric metric,
ContainerThreadPool threadpool,
+ CompiledQueryProfileRegistry queryProfileRegistry,
+ ContainerHttpConfig config,
+ Encoder encoder,
+ ExecutionFactory executionFactory) {
+ this(statistics, metric, threadpool.executor(), queryProfileRegistry, encoder, executionFactory,
+ config.numQueriesToTraceOnDebugAfterConstruction(),
+ config.hostResponseHeaderKey().equals("") ? Optional.empty() : Optional.of(config.hostResponseHeaderKey()));
+ }
+
+ /**
+ * @deprecated Use the @Inject annotated constructor instead.
+ */
+ @Deprecated // Vespa 8
+ public SearchHandler(Statistics statistics,
+ Metric metric,
+ ContainerThreadPool threadpool,
AccessLog ignored,
CompiledQueryProfileRegistry queryProfileRegistry,
ContainerHttpConfig config,
@@ -136,6 +155,10 @@ public class SearchHandler extends LoggingRequestHandler {
this(statistics, metric, threadpool.executor(), ignored, queryProfileRegistry, config, executionFactory);
}
+ /**
+ * @deprecated Use the @Inject annotated constructor instead.
+ */
+ @Deprecated // Vespa 8
public SearchHandler(Statistics statistics,
Metric metric,
Executor executor,
@@ -147,6 +170,7 @@ public class SearchHandler extends LoggingRequestHandler {
metric,
executor,
queryProfileRegistry,
+ Encoder.throwsOnUse,
executionFactory,
containerHttpConfig.numQueriesToTraceOnDebugAfterConstruction(),
containerHttpConfig.hostResponseHeaderKey().equals("") ?
@@ -168,12 +192,17 @@ public class SearchHandler extends LoggingRequestHandler {
metric,
executor,
QueryProfileConfigurer.createFromConfig(queryProfileConfig).compile(),
+ Encoder.throwsOnUse,
executionFactory,
containerHttpConfig.numQueriesToTraceOnDebugAfterConstruction(),
containerHttpConfig.hostResponseHeaderKey().equals("") ?
Optional.empty() : Optional.of( containerHttpConfig.hostResponseHeaderKey()));
}
+ /**
+ * @deprecated Use the @Inject annotated constructor instead.
+ */
+ @Deprecated // Vespa 8
public SearchHandler(Statistics statistics,
Metric metric,
Executor executor,
@@ -181,19 +210,22 @@ public class SearchHandler extends LoggingRequestHandler {
CompiledQueryProfileRegistry queryProfileRegistry,
ExecutionFactory executionFactory,
Optional<String> hostResponseHeaderKey) {
- this(statistics, metric, executor, queryProfileRegistry, executionFactory, 0, hostResponseHeaderKey);
+ this(statistics, metric, executor, queryProfileRegistry, Encoder.throwsOnUse,
+ executionFactory, 0, hostResponseHeaderKey);
}
private SearchHandler(Statistics statistics,
Metric metric,
Executor executor,
CompiledQueryProfileRegistry queryProfileRegistry,
+ Encoder encoder,
ExecutionFactory executionFactory,
long numQueriesToTraceOnDebugAfterStartup,
Optional<String> hostResponseHeaderKey) {
super(executor, metric, true);
log.log(Level.FINE, () -> "SearchHandler.init " + System.identityHashCode(this));
this.queryProfileRegistry = queryProfileRegistry;
+ this.encoder = encoder;
this.executionFactory = executionFactory;
this.maxThreads = examineExecutor(executor);
@@ -297,7 +329,11 @@ public class SearchHandler extends LoggingRequestHandler {
String queryProfileName = requestMap.getOrDefault("queryProfile", null);
CompiledQueryProfile queryProfile = queryProfileRegistry.findQueryProfile(queryProfileName);
- Query query = new Query(request, requestMap, queryProfile);
+ Query query = new Query.Builder().setRequest(request)
+ .setRequestMap(requestMap)
+ .setQueryProfile(queryProfile)
+ .setEncoder(encoder)
+ .build();
boolean benchmarking = VespaHeaders.benchmarkOutput(request);
boolean benchmarkCoverage = VespaHeaders.benchmarkCoverage(benchmarking, request.getJDiscRequest().headers());
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
index 34fe376150d..e555000272d 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
@@ -2,14 +2,15 @@
package com.yahoo.search.query.profile;
import com.yahoo.collections.Pair;
+import com.yahoo.language.process.Encoder;
import com.yahoo.processing.IllegalInputException;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.processing.request.properties.PropertyMap;
import com.yahoo.protect.Validator;
-import com.yahoo.search.Query;
import com.yahoo.search.query.Properties;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfile;
import com.yahoo.search.query.profile.compiled.DimensionalValue;
+import com.yahoo.search.query.profile.types.ConversionContext;
import com.yahoo.search.query.profile.types.FieldDescription;
import com.yahoo.search.query.profile.types.QueryProfileFieldType;
import com.yahoo.search.query.profile.types.QueryProfileType;
@@ -29,6 +30,7 @@ import java.util.Map;
public class QueryProfileProperties extends Properties {
private final CompiledQueryProfile profile;
+ private final Encoder encoder;
// Note: The priority order is: values has precedence over references
@@ -42,10 +44,15 @@ public class QueryProfileProperties extends Properties {
*/
private List<Pair<CompoundName, CompiledQueryProfile>> references = null;
- /** Creates an instance from a profile, throws an exception if the given profile is null */
public QueryProfileProperties(CompiledQueryProfile profile) {
+ this(profile, Encoder.throwsOnUse);
+ }
+
+ /** Creates an instance from a profile, throws an exception if the given profile is null */
+ public QueryProfileProperties(CompiledQueryProfile profile, Encoder encoder) {
Validator.ensureNotNull("The profile wrapped by this cannot be null", profile);
this.profile = profile;
+ this.encoder = encoder;
}
/** Returns the query profile backing this, or null if none */
@@ -114,7 +121,9 @@ public class QueryProfileProperties extends Properties {
if (fieldDescription != null) {
if (i == name.size() - 1) { // at the end of the path, check the assignment type
- value = fieldDescription.getType().convertFrom(value, profile.getRegistry());
+ value = fieldDescription.getType().convertFrom(value, new ConversionContext(profile.getRegistry(),
+ encoder,
+ context));
if (value == null)
throw new IllegalInputException("'" + value + "' is not a " +
fieldDescription.getType().toInstanceDescription());
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/ConversionContext.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/ConversionContext.java
new file mode 100644
index 00000000000..4aa95741b06
--- /dev/null
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/ConversionContext.java
@@ -0,0 +1,40 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.search.query.profile.types;
+
+import com.yahoo.language.Language;
+import com.yahoo.language.process.Encoder;
+import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
+
+import java.util.Map;
+
+/**
+ * @author bratseth
+ */
+public class ConversionContext {
+
+ private final CompiledQueryProfileRegistry registry;
+ private final Encoder encoder;
+ private final Language language;
+
+ public ConversionContext(CompiledQueryProfileRegistry registry, Encoder encoder, Map<String, String> context) {
+ this.registry = registry;
+ this.encoder = encoder;
+ this.language = context.containsKey("language") ? Language.fromLanguageTag(context.get("language"))
+ : Language.UNKNOWN;
+ }
+
+ /** Returns the profile registry, or null if none */
+ CompiledQueryProfileRegistry getRegistry() {return registry;}
+
+ /** Returns the configured encoder, never null */
+ Encoder getEncoder() { return encoder; }
+
+ /** Returns the language, which is never null but may be UNKNOWN */
+ Language getLanguage() { return language; }
+
+ /** Returns an empty context */
+ public static ConversionContext empty() {
+ return new ConversionContext(null, Encoder.throwsOnUse, Map.of());
+ }
+
+}
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldDescription.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldDescription.java
index daab5f6a378..7f8836ef2c1 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldDescription.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldDescription.java
@@ -33,7 +33,7 @@ public class FieldDescription implements Comparable<FieldDescription> {
}
public FieldDescription(String name, String type) {
- this(name,FieldType.fromString(type,null));
+ this(name,FieldType.fromString(type, null));
}
public FieldDescription(String name, FieldType type, boolean mandatory) {
@@ -60,7 +60,7 @@ public class FieldDescription implements Comparable<FieldDescription> {
* @param overridable whether this can be overridden when first set in a profile. Default: true
*/
public FieldDescription(String name, String typeString, String aliases, boolean mandatory, boolean overridable) {
- this(name,FieldType.fromString(typeString,null),aliases,mandatory,overridable);
+ this(name,FieldType.fromString(typeString, null), aliases, mandatory, overridable);
}
public FieldDescription(String name, FieldType type, boolean mandatory, boolean overridable) {
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldType.java
index 3bfd33668e6..511b64c7b6e 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/FieldType.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.profile.types;
+import com.yahoo.language.process.Encoder;
import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
@@ -41,7 +42,7 @@ public abstract class FieldType {
public abstract Object convertFrom(Object o, QueryProfileRegistry registry);
/** Converts the given type to an instance of this type, if possible. Returns null if not possible. */
- public abstract Object convertFrom(Object o, CompiledQueryProfileRegistry registry);
+ public abstract Object convertFrom(Object o, ConversionContext context);
/**
* Returns this type as a tensor type: The true tensor type is this is a tensor field an an empty type -
@@ -77,7 +78,7 @@ public abstract class FieldType {
if ("query-profile".equals(typeString))
return genericQueryProfileType;
if (typeString.startsWith("query-profile:"))
- return QueryProfileFieldType.fromString(typeString.substring("query-profile:".length()),registry);
+ return QueryProfileFieldType.fromString(typeString.substring("query-profile:".length()), registry);
throw new IllegalArgumentException("Unknown type '" + typeString + "'");
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/PrimitiveFieldType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/PrimitiveFieldType.java
index 1e904e4f970..b1a9820c6fa 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/PrimitiveFieldType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/PrimitiveFieldType.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.profile.types;
+import com.yahoo.language.process.Encoder;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
@@ -37,7 +38,7 @@ public class PrimitiveFieldType extends FieldType {
}
@Override
- public Object convertFrom(Object object, CompiledQueryProfileRegistry registry) {
+ public Object convertFrom(Object object, ConversionContext context) {
return convertFrom(object, (QueryProfileRegistry)null);
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryFieldType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryFieldType.java
index 1797a2bd59f..09c1a4d0cc0 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryFieldType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryFieldType.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.profile.types;
+import com.yahoo.language.process.Encoder;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
import com.yahoo.search.yql.YqlQuery;
@@ -32,7 +33,7 @@ public class QueryFieldType extends FieldType {
}
@Override
- public Object convertFrom(Object o, CompiledQueryProfileRegistry registry) {
+ public Object convertFrom(Object o, ConversionContext context) {
return convertFrom(o, (QueryProfileRegistry)null);
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileFieldType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileFieldType.java
index fda2d27e682..6958318bee4 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileFieldType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/QueryProfileFieldType.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.profile.types;
+import com.yahoo.language.process.Encoder;
import com.yahoo.search.query.profile.QueryProfile;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfile;
@@ -57,11 +58,11 @@ public class QueryProfileFieldType extends FieldType {
}
@Override
- public CompiledQueryProfile convertFrom(Object object, CompiledQueryProfileRegistry registry) {
+ public CompiledQueryProfile convertFrom(Object object, ConversionContext context) {
String profileId = object.toString();
if (profileId.startsWith("ref:"))
profileId = profileId.substring("ref:".length());
- CompiledQueryProfile profile = registry.getComponent(profileId);
+ CompiledQueryProfile profile = context.getRegistry().getComponent(profileId);
if (profile == null) return null;
if (type != null && ! type.equals(profile.getType())) return null;
return profile;
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/types/TensorFieldType.java b/container-search/src/main/java/com/yahoo/search/query/profile/types/TensorFieldType.java
index 9699a72cb31..34a9f8d41c3 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/types/TensorFieldType.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/types/TensorFieldType.java
@@ -1,6 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.profile.types;
+import com.yahoo.language.Language;
+import com.yahoo.language.process.Encoder;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
import com.yahoo.tensor.Tensor;
@@ -38,14 +40,26 @@ public class TensorFieldType extends FieldType {
@Override
public Object convertFrom(Object o, QueryProfileRegistry registry) {
+ return convertFrom(o, ConversionContext.empty());
+ }
+
+ @Override
+ public Object convertFrom(Object o, ConversionContext context) {
+ return convertFrom(o, context.getEncoder(), context.getLanguage());
+ }
+
+ private Object convertFrom(Object o, Encoder encoder, Language language) {
if (o instanceof Tensor) return o;
+ if (o instanceof String && ((String)o).startsWith("encode(")) return encode((String)o, encoder, language);
if (o instanceof String) return Tensor.from(type, (String)o);
return null;
}
- @Override
- public Object convertFrom(Object o, CompiledQueryProfileRegistry registry) {
- return convertFrom(o, (QueryProfileRegistry)null);
+ private Tensor encode(String s, Encoder encoder, Language language) {
+ if ( ! s.endsWith(")"))
+ throw new IllegalArgumentException("Expected any string enclosed in encode(), but the argument does not end by ')'");
+ String text = s.substring("encode(".length(), s.length() - 1);
+ return encoder.encode(text, language, type);
}
public static TensorFieldType fromTypeString(String s) {
diff --git a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
index 4c65e8003e5..02648f84066 100644
--- a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.query.properties;
+import com.yahoo.language.process.Encoder;
import com.yahoo.processing.IllegalInputException;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.search.Query;
@@ -11,6 +12,7 @@ import com.yahoo.search.query.Properties;
import com.yahoo.search.query.Ranking;
import com.yahoo.search.query.Select;
import com.yahoo.search.query.profile.compiled.CompiledQueryProfileRegistry;
+import com.yahoo.search.query.profile.types.ConversionContext;
import com.yahoo.search.query.profile.types.FieldDescription;
import com.yahoo.search.query.profile.types.QueryProfileType;
import com.yahoo.search.query.ranking.Diversity;
@@ -32,10 +34,12 @@ public class QueryProperties extends Properties {
private Query query;
private final CompiledQueryProfileRegistry profileRegistry;
+ private final Encoder encoder;
- public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry) {
+ public QueryProperties(Query query, CompiledQueryProfileRegistry profileRegistry, Encoder encoder) {
this.query = query;
this.profileRegistry = profileRegistry;
+ this.encoder = encoder;
}
public void setParentQuery(Query query) {
@@ -256,9 +260,15 @@ public class QueryProperties extends Properties {
else if (key.size() > 2) {
String restKey = key.rest().rest().toString();
if (key.get(1).equals(Ranking.FEATURES))
- setRankingFeature(query, restKey, toSpecifiedType(restKey, value, profileRegistry.getTypeRegistry().getComponent("features")));
+ setRankingFeature(query, restKey, toSpecifiedType(restKey,
+ value,
+ profileRegistry.getTypeRegistry().getComponent("features"),
+ context));
else if (key.get(1).equals(Ranking.PROPERTIES))
- ranking.getProperties().put(restKey, toSpecifiedType(restKey, value, profileRegistry.getTypeRegistry().getComponent("properties")));
+ ranking.getProperties().put(restKey, toSpecifiedType(restKey,
+ value,
+ profileRegistry.getTypeRegistry().getComponent("properties"),
+ context));
else
throwIllegalParameter(key.rest().toString(), Ranking.RANKING);
}
@@ -294,9 +304,15 @@ public class QueryProperties extends Properties {
}
}
else if (key.first().equals("rankfeature") || key.first().equals("featureoverride") ) { // featureoverride is deprecated
- setRankingFeature(query, key.rest().toString(), toSpecifiedType(key.rest().toString(), value, profileRegistry.getTypeRegistry().getComponent("features")));
+ setRankingFeature(query, key.rest().toString(), toSpecifiedType(key.rest().toString(),
+ value,
+ profileRegistry.getTypeRegistry().getComponent("features"),
+ context));
} else if (key.first().equals("rankproperty")) {
- query.getRanking().getProperties().put(key.rest().toString(), toSpecifiedType(key.rest().toString(), value, profileRegistry.getTypeRegistry().getComponent("properties")));
+ query.getRanking().getProperties().put(key.rest().toString(), toSpecifiedType(key.rest().toString(),
+ value,
+ profileRegistry.getTypeRegistry().getComponent("properties"),
+ context));
} else if (key.size()==1) {
if (key.equals(Query.HITS))
query.setHits(asInteger(value,10));
@@ -359,12 +375,12 @@ public class QueryProperties extends Properties {
}
}
- private Object toSpecifiedType(String key, Object value, QueryProfileType type) {
+ private Object toSpecifiedType(String key, Object value, QueryProfileType type, Map<String,String> context) {
if ( ! ( value instanceof String)) return value; // already typed
if (type == null) return value; // no type info -> keep as string
FieldDescription field = type.getField(key);
if (field == null) return value; // ditto
- return field.getType().convertFrom(value, profileRegistry);
+ return field.getType().convertFrom(value, new ConversionContext(profileRegistry, encoder, context));
}
private void throwIllegalParameter(String key,String namespace) {
diff --git a/container-search/src/main/java/com/yahoo/search/yql/ProgramCompileException.java b/container-search/src/main/java/com/yahoo/search/yql/ProgramCompileException.java
index 46dfb780e2d..32ab9d682e3 100644
--- a/container-search/src/main/java/com/yahoo/search/yql/ProgramCompileException.java
+++ b/container-search/src/main/java/com/yahoo/search/yql/ProgramCompileException.java
@@ -1,7 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.search.yql;
-class ProgramCompileException extends RuntimeException {
+import com.yahoo.processing.IllegalInputException;
+
+class ProgramCompileException extends IllegalInputException {
private Location sourceLocation;
@@ -9,27 +11,6 @@ class ProgramCompileException extends RuntimeException {
super(message);
}
- public ProgramCompileException(String message, Object... args) {
- super(formatMessage(message, args));
- }
-
- private static String formatMessage(String message, Object... args) {
- return args == null ? message : String.format(message, args);
- }
-
- public ProgramCompileException(String message, Throwable cause) {
- super(message, cause);
- }
-
- public ProgramCompileException(Throwable cause) {
- super(cause);
- }
-
- public ProgramCompileException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
- super(message, cause, enableSuppression, writableStackTrace);
- }
-
-
public ProgramCompileException(Location sourceLocation, String message, Object... args) {
super(String.format("%s %s", sourceLocation != null ? sourceLocation : "", args == null ? message : String.format(message, args)));
this.sourceLocation = sourceLocation;
diff --git a/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java b/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
index 39ba607b741..45f53a1cdb9 100644
--- a/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/profile/types/test/QueryProfileTypeTestCase.java
@@ -3,7 +3,10 @@ package com.yahoo.search.query.profile.types.test;
import com.yahoo.component.ComponentId;
import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.language.Language;
+import com.yahoo.language.process.Encoder;
import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorType;
import com.yahoo.yolean.Exceptions;
import com.yahoo.search.Query;
import com.yahoo.processing.request.CompoundName;
@@ -21,6 +24,8 @@ import org.junit.Test;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
@@ -80,6 +85,7 @@ public class QueryProfileTypeTestCase {
type.addField(new FieldDescription("ranking.features.query(myTensor1)", FieldType.fromString("tensor(a{},b{})", registry)), registry);
type.addField(new FieldDescription("ranking.features.query(myTensor2)", FieldType.fromString("tensor(x[2],y[2])", registry)), registry);
type.addField(new FieldDescription("ranking.features.query(myTensor3)", FieldType.fromString("tensor<float>(x{})",registry)), registry);
+ type.addField(new FieldDescription("ranking.features.query(myTensor4)", FieldType.fromString("tensor<float>(x[5])",registry)), registry);
type.addField(new FieldDescription("myQuery", FieldType.fromString("query", registry)), registry);
type.addField(new FieldDescription("myQueryProfile", FieldType.fromString("query-profile", registry),"qp"), registry);
}
@@ -400,15 +406,15 @@ public class QueryProfileTypeTestCase {
}
@Test
- public void testTensorRankFeatureInRequest() throws UnsupportedEncodingException {
+ public void testTensorRankFeatureInRequest() {
QueryProfile profile = new QueryProfile("test");
profile.setType(testtype);
registry.register(profile);
CompiledQueryProfileRegistry cRegistry = registry.compile();
String tensorString = "{{a:a1, b:b1}:1.0, {a:a2, b:b1}:2.0}}";
- Query query = new Query(HttpRequest.createTestRequest("?" + encode("ranking.features.query(myTensor1)") +
- "=" + encode(tensorString),
+ Query query = new Query(HttpRequest.createTestRequest("?" + urlEncode("ranking.features.query(myTensor1)") +
+ "=" + urlEncode(tensorString),
com.yahoo.jdisc.http.HttpRequest.Method.GET),
cRegistry.getComponent("test"));
assertEquals(0, query.errors().size());
@@ -418,15 +424,15 @@ public class QueryProfileTypeTestCase {
// Expected to work exactly as testTensorRankFeatureInRequest
@Test
- public void testTensorRankFeatureInRequestWithInheritedQueryProfileType() throws UnsupportedEncodingException {
+ public void testTensorRankFeatureInRequestWithInheritedQueryProfileType() {
QueryProfile profile = new QueryProfile("test");
profile.setType(emptyInheritingTesttype);
registry.register(profile);
CompiledQueryProfileRegistry cRegistry = registry.compile();
String tensorString = "{{a:a1, b:b1}:1.0, {a:a2, b:b1}:2.0}}";
- Query query = new Query(HttpRequest.createTestRequest("?" + encode("ranking.features.query(myTensor1)") +
- "=" + encode(tensorString),
+ Query query = new Query(HttpRequest.createTestRequest("?" + urlEncode("ranking.features.query(myTensor1)") +
+ "=" + urlEncode(tensorString),
com.yahoo.jdisc.http.HttpRequest.Method.GET),
cRegistry.getComponent("test"));
assertEquals(0, query.errors().size());
@@ -434,8 +440,41 @@ public class QueryProfileTypeTestCase {
assertEquals(Tensor.from(tensorString), query.getRanking().getFeatures().getTensor("query(myTensor1)").get());
}
- private String encode(String s) throws UnsupportedEncodingException {
- return URLEncoder.encode(s, "utf8");
+ @Test
+ public void testUnencodedTensorRankFeatureInRequest() {
+ QueryProfile profile = new QueryProfile("test");
+ profile.setType(testtype);
+ registry.register(profile);
+
+ CompiledQueryProfileRegistry cRegistry = registry.compile();
+ String textToEncode = "text to encode as tensor";
+ Tensor expectedTensor = Tensor.from("tensor<float>(x[5]):[3,7,4,0,0]]");
+ Query query1 = new Query.Builder().setRequest(HttpRequest.createTestRequest("?" + urlEncode("ranking.features.query(myTensor4)") +
+ "=" + urlEncode("encode(" + textToEncode + ")"),
+ com.yahoo.jdisc.http.HttpRequest.Method.GET))
+ .setQueryProfile(cRegistry.getComponent("test"))
+ .setEncoder(new MockEncoder(textToEncode, Language.UNKNOWN, expectedTensor))
+ .build();
+ assertEquals(0, query1.errors().size());
+ assertEquals(expectedTensor, query1.properties().get("ranking.features.query(myTensor4)"));
+ assertEquals(expectedTensor, query1.getRanking().getFeatures().getTensor("query(myTensor4)").get());
+
+ // Explicit language
+ Query query2 = new Query.Builder().setRequest(HttpRequest.createTestRequest("?" + urlEncode("ranking.features.query(myTensor4)") +
+ "=" + urlEncode("encode(" + textToEncode + ")") +
+ "&language=en",
+ com.yahoo.jdisc.http.HttpRequest.Method.GET))
+ .setQueryProfile(cRegistry.getComponent("test"))
+ .setEncoder(new MockEncoder(textToEncode, Language.ENGLISH, expectedTensor))
+ .build();
+ assertEquals(0, query2.errors().size());
+ assertEquals(expectedTensor, query2.properties().get("ranking.features.query(myTensor4)"));
+ assertEquals(expectedTensor, query2.getRanking().getFeatures().getTensor("query(myTensor4)").get());
+
+ }
+
+ private String urlEncode(String s) {
+ return URLEncoder.encode(s, StandardCharsets.UTF_8);
}
@Test
@@ -684,4 +723,34 @@ public class QueryProfileTypeTestCase {
}
}
+ private static final class MockEncoder implements Encoder {
+
+ private final String expectedText;
+ private final Language expectedLanguage;
+ private final Tensor tensorToReturn;
+
+ public MockEncoder(String expectedText,
+ Language expectedLanguage,
+ Tensor tensorToReturn) {
+ this.expectedText = expectedText;
+ this.expectedLanguage = expectedLanguage;
+ this.tensorToReturn = tensorToReturn;
+ }
+
+ @Override
+ public List<Integer> encode(String text, Language language) {
+ fail("Unexpected call");
+ return null;
+ }
+
+ @Override
+ public Tensor encode(String text, Language language, TensorType tensorType) {
+ assertEquals(expectedText, text);
+ assertEquals(expectedLanguage, language);
+ assertEquals(tensorToReturn.type(), tensorType);
+ return tensorToReturn;
+ }
+
+ }
+
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
index 3391965dc67..617e87c55a9 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
@@ -12,6 +12,7 @@ import java.time.Instant;
import java.util.Collection;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.stream.Collectors;
public class AthenzAccessControlService implements AccessControlService {
@@ -37,7 +38,7 @@ public class AthenzAccessControlService implements AccessControlService {
}
Map<AthenzUser, String> users = zmsClient.listPendingRoleApprovals(dataPlaneAccessRole);
if (users.containsKey(user)) {
- zmsClient.approvePendingRoleMembership(dataPlaneAccessRole, user, expiry);
+ zmsClient.approvePendingRoleMembership(dataPlaneAccessRole, user, expiry, Optional.empty());
return true;
}
return false;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
index 4f45e4370a7..42a5e2b42be 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
@@ -192,7 +192,7 @@ public class ZmsClientMock implements ZmsClient {
}
@Override
- public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry) {
+ public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry, Optional<String> reason) {
}
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 2dbf910b49e..6dd5e7f53e0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -180,8 +180,11 @@ public class DeploymentTrigger {
public List<JobId> forceTrigger(ApplicationId applicationId, JobType jobType, String user, boolean requireTests) {
Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId));
Instance instance = application.require(applicationId.instance());
- DeploymentStatus status = jobs.deploymentStatus(application);
JobId job = new JobId(instance.id(), jobType);
+ if (job.type().environment().isManuallyDeployed())
+ return forceTriggerManualJob(job);
+
+ DeploymentStatus status = jobs.deploymentStatus(application);
Versions versions = Versions.from(instance.change(), application, status.deploymentFor(job), controller.readSystemVersion());
Map<JobId, List<Versions>> jobs = status.testJobs(Map.of(job, versions));
if (jobs.isEmpty() || ! requireTests)
@@ -192,6 +195,16 @@ public class DeploymentTrigger {
return List.copyOf(jobs.keySet());
}
+ private List<JobId> forceTriggerManualJob(JobId job) {
+ Run last = jobs.last(job).orElseThrow(() -> new IllegalArgumentException(job + " has never been run"));
+ Versions target = new Versions(controller.readSystemVersion(),
+ last.versions().targetApplication(),
+ Optional.of(last.versions().targetPlatform()),
+ Optional.of(last.versions().targetApplication()));
+ jobs.start(job.application(), job.type(), target, true);
+ return List.of(job);
+ }
+
/** Retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */
public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) {
JobType jobType = JobType.from(controller.system(), deployment.zoneId())
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
index cd7ce8c3fa6..98fd0342ecd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
@@ -24,8 +24,10 @@ import java.time.temporal.ChronoUnit;
import java.util.HashSet;
import java.util.Optional;
import java.util.OptionalInt;
+import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
/**
* Updates refreshed endpoint certificates and triggers redeployment, and deletes unused certificates.
@@ -60,6 +62,7 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer {
deployRefreshedCertificates();
updateRefreshedCertificates();
deleteUnusedCertificates();
+ reportUnmanagedCertificates();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Exception caught while maintaining endpoint certificates", e);
return 0.0;
@@ -134,6 +137,16 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer {
});
}
+ private void reportUnmanagedCertificates() {
+ Set<String> managedRequestIds = curator.readAllEndpointCertificateMetadata().values().stream().map(EndpointCertificateMetadata::requestId).collect(Collectors.toSet());
+
+ for (EndpointCertificateMetadata cameoCertificateMetadata : endpointCertificateProvider.listCertificates()) {
+ if (!managedRequestIds.contains(cameoCertificateMetadata.requestId())) {
+ log.info("Certificate metadata exists with provider but is not managed by controller: " + cameoCertificateMetadata.requestId() + ", " + cameoCertificateMetadata.issuer() + ", " + cameoCertificateMetadata.requestedDnsSans());
+ }
+ }
+ }
+
private Lock lock(ApplicationId applicationId) {
return curator.lock(TenantAndApplicationId.from(applicationId));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
index 044b7b76d1e..157f57b3bea 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
@@ -19,6 +19,7 @@ import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeStream;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.text.Text;
+import com.yahoo.vespa.configserver.flags.FlagsDb;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
@@ -69,15 +70,17 @@ public class UserApiHandler extends LoggingRequestHandler {
private final UserManagement users;
private final Controller controller;
+ private final FlagsDb flagsDb;
private final BooleanFlag enable_public_signup_flow;
private final IntFlag maxTrialTenants;
private final BooleanFlag enabledHorizonDashboard;
@Inject
- public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource) {
+ public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource, FlagsDb flagsDb) {
super(parentCtx);
this.users = users;
this.controller = controller;
+ this.flagsDb = flagsDb;
this.enable_public_signup_flow = PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.bindTo(flagSource);
this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource);
this.enabledHorizonDashboard = Flags.ENABLED_HORIZON_DASHBOARD.bindTo(flagSource);
@@ -170,6 +173,7 @@ public class UserApiHandler extends LoggingRequestHandler {
root.setBool("isPublic", controller.system().isPublic());
root.setBool("isCd", controller.system().isCd());
+ // TODO (freva): Remove after users have migrated to use 'flags'
root.setBool(enable_public_signup_flow.id().toString(),
enable_public_signup_flow.with(FetchVector.Dimension.CONSOLE_USER_EMAIL, user.email()).value());
root.setBool("hasTrialCapacity", hasTrialCapacity());
@@ -197,6 +201,8 @@ public class UserApiHandler extends LoggingRequestHandler {
operatorRoles.forEach(role -> operator.addString(role.definition().name()));
}
+ UserFlagsSerializer.toSlime(root, flagsDb.getAllFlagData(), tenantRolesByTenantName.keySet(), !operatorRoles.isEmpty(), user.email());
+
return new SlimeJsonResponse(slime);
}
@@ -249,7 +255,7 @@ public class UserApiHandler extends LoggingRequestHandler {
});
}
- private void toSlime(Cursor userObject, User user) {
+ private static void toSlime(Cursor userObject, User user) {
if (user.name() != null) userObject.setString("name", user.name());
userObject.setString("email", user.email());
if (user.nickname() != null) userObject.setString("nickname", user.nickname());
@@ -376,7 +382,7 @@ public class UserApiHandler extends LoggingRequestHandler {
return Exceptions.uncheck(() -> SlimeUtils.jsonToSlime(IOUtils.readBytes(request.getData(), 1 << 10)).get());
}
- private <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) {
+ private static <Type> Type require(String name, Function<Inspector, Type> mapper, Inspector object) {
if ( ! object.field(name).valid()) throw new IllegalArgumentException("Missing field '" + name + "'.");
return mapper.apply(object.field(name));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializer.java
new file mode 100644
index 00000000000..44d537883f9
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializer.java
@@ -0,0 +1,86 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.user;
+
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.lang.MutableBoolean;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.FlagDefinition;
+import com.yahoo.vespa.flags.FlagId;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.RawFlag;
+import com.yahoo.vespa.flags.UnboundFlag;
+import com.yahoo.vespa.flags.json.Condition;
+import com.yahoo.vespa.flags.json.FlagData;
+import com.yahoo.vespa.flags.json.Rule;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * @author freva
+ */
+public class UserFlagsSerializer {
+ static void toSlime(Cursor cursor, Map<FlagId, FlagData> rawFlagData,
+ Set<TenantName> authorizedForTenantNames, boolean isOperator, String userEmail) {
+ FetchVector resolveVector = FetchVector.fromMap(Map.of(FetchVector.Dimension.CONSOLE_USER_EMAIL, userEmail));
+ List<FlagData> filteredFlagData = Flags.getAllFlags().stream()
+ // Only include flags that have CONSOLE_USER_EMAIL dimension, this should be replaced with more explicit
+ // 'target' annotation if/when that is added to flag definition
+ .filter(fd -> fd.getDimensions().contains(FetchVector.Dimension.CONSOLE_USER_EMAIL))
+ .map(FlagDefinition::getUnboundFlag)
+ .map(flag -> filteredFlagData(flag, Optional.ofNullable(rawFlagData.get(flag.id())), authorizedForTenantNames, isOperator, resolveVector))
+ .collect(Collectors.toUnmodifiableList());
+
+ byte[] bytes = FlagData.serializeListToUtf8Json(filteredFlagData);
+ SlimeUtils.copyObject(SlimeUtils.jsonToSlime(bytes).get(), cursor);
+ }
+
+ private static <T> FlagData filteredFlagData(UnboundFlag<T, ?, ?> definition, Optional<FlagData> original,
+ Set<TenantName> authorizedForTenantNames, boolean isOperator, FetchVector resolveVector) {
+ MutableBoolean encounteredEmpty = new MutableBoolean(false);
+ Optional<RawFlag> defaultValue = Optional.of(definition.serializer().serialize(definition.defaultValue()));
+ // Include the original rules from flag DB and the default value from code if there is no default rule in DB
+ List<Rule> rules = Stream.concat(original.stream().flatMap(fd -> fd.rules().stream()), Stream.of(new Rule(defaultValue)))
+ // Exclude rules that do not match the resolveVector
+ .filter(rule -> rule.partialMatch(resolveVector))
+ // Re-create each rule with value explicitly set, either from DB or default from code and
+ // a filtered set of conditions
+ .map(rule -> new Rule(rule.getValueToApply().or(() -> defaultValue),
+ rule.conditions().stream()
+ .flatMap(condition -> filteredCondition(condition, authorizedForTenantNames, isOperator, resolveVector).stream())
+ .collect(Collectors.toUnmodifiableList())))
+ // We can stop as soon as we hit the first rule that has no conditions
+ .takeWhile(rule -> !encounteredEmpty.getAndSet(rule.conditions().isEmpty()))
+ .collect(Collectors.toUnmodifiableList());
+
+ return new FlagData(definition.id(), new FetchVector(), rules);
+ }
+
+ private static Optional<Condition> filteredCondition(Condition condition, Set<TenantName> authorizedForTenantNames,
+ boolean isOperator, FetchVector resolveVector) {
+ // If the condition is one of the conditions that we resolve on the server, e.g. email, we do not need to
+ // propagate it back to the user
+ if (resolveVector.hasDimension(condition.dimension())) return Optional.empty();
+
+ // For the other dimensions, filter the values down to an allowed subset
+ switch (condition.dimension()) {
+ case TENANT_ID: return valueSubset(condition, tenant -> isOperator || authorizedForTenantNames.contains(TenantName.from(tenant)));
+ case APPLICATION_ID: return valueSubset(condition, appId -> isOperator || authorizedForTenantNames.stream().anyMatch(tenant -> appId.startsWith(tenant.value() + ":")));
+ default: throw new IllegalArgumentException("Dimension " + condition.dimension() + " is not supported for user flags");
+ }
+ }
+
+ private static Optional<Condition> valueSubset(Condition condition, Predicate<String> predicate) {
+ Condition.CreateParams createParams = condition.toCreateParams();
+ return Optional.of(createParams
+ .withValues(createParams.values().stream().filter(predicate).collect(Collectors.toUnmodifiableList()))
+ .createAs(condition.type()));
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
index 10f143a8e96..1d844859c37 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
@@ -26,7 +26,6 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Optional;
-import java.util.function.Consumer;
import java.util.function.Supplier;
import java.util.regex.Pattern;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 66c3f7bba16..fb860063696 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -271,6 +271,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
"{\"message\":\"Deployment started in run 1 of dev-us-east-1 for tenant1.application1.instance1. This may take about 15 minutes the first time.\",\"run\":1}");
app1.runJob(JobType.devUsEast1);
+ // POST (deploy) a job to restart a manual deployment to dev
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1", POST)
+ .userIdentity(USER_ID),
+ "{\"message\":\"Triggered dev-us-east-1 for tenant1.application1.instance1\"}");
+ app1.runJob(JobType.devUsEast1);
+
// GET dev application package
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/job/dev-us-east-1/package", GET)
.userIdentity(USER_ID),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
index 7ebc2d24fe9..736e1fe082c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
@@ -4,6 +4,37 @@
"jobName": "dev-us-east-1",
"runs": [
{
+ "id": 2,
+ "url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/job/run/2",
+ "start": "(ignore)",
+ "end": "(ignore)",
+ "status": "success",
+ "versions": {
+ "targetPlatform": "6.1.0",
+ "targetApplication": {
+ "build": 1
+ },
+ "sourcePlatform":"6.1.0",
+ "sourceApplication": {
+ "build": 1
+ }
+ },
+ "steps": [
+ {
+ "name": "deployReal",
+ "status": "succeeded"
+ },
+ {
+ "name": "installReal",
+ "status": "succeeded"
+ },
+ {
+ "name": "copyVespaLogs",
+ "status": "succeeded"
+ }
+ ]
+ },
+ {
"id": 1,
"url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/instance/instance1/job/run/1",
"start": "(ignore)",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiOnPremTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiOnPremTest.java
index acd481030e2..c884eae8afc 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiOnPremTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiOnPremTest.java
@@ -5,6 +5,8 @@ import com.yahoo.application.container.handler.Request;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.utils.AthenzIdentities;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.user.User;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
@@ -25,40 +27,42 @@ public class UserApiOnPremTest extends ControllerContainerTest {
@Test
public void userMetadataOnPremTest() {
- ContainerTester tester = new ContainerTester(container, responseFiles);
- ControllerTester controller = new ControllerTester(tester);
- User user = new User("dev@domail", "Joe Developer", "dev", null);
+ try (Flags.Replacer ignored = Flags.clearFlagsForTesting(PermanentFlags.MAX_TRIAL_TENANTS.id(), PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id())) {
+ ContainerTester tester = new ContainerTester(container, responseFiles);
+ ControllerTester controller = new ControllerTester(tester);
+ User user = new User("dev@domail", "Joe Developer", "dev", null);
- controller.createTenant("tenant1", "domain1", 1L);
- controller.createApplication("tenant1", "app1", "default");
- controller.createApplication("tenant1", "app2", "default");
- controller.createApplication("tenant1", "app2", "myinstance");
- controller.createApplication("tenant1", "app3");
+ controller.createTenant("tenant1", "domain1", 1L);
+ controller.createApplication("tenant1", "app1", "default");
+ controller.createApplication("tenant1", "app2", "default");
+ controller.createApplication("tenant1", "app2", "myinstance");
+ controller.createApplication("tenant1", "app3");
- controller.createTenant("tenant2", "domain2", 2L);
- controller.createApplication("tenant2", "app2", "test");
+ controller.createTenant("tenant2", "domain2", 2L);
+ controller.createApplication("tenant2", "app2", "test");
- controller.createTenant("tenant3", "domain3", 3L);
- controller.createApplication("tenant3", "app1");
+ controller.createTenant("tenant3", "domain3", 3L);
+ controller.createApplication("tenant3", "app1");
- controller.createTenant("sandbox", "domain4", 4L);
- controller.createApplication("sandbox", "app1", "default");
- controller.createApplication("sandbox", "app2", "default");
- controller.createApplication("sandbox", "app2", "dev");
+ controller.createTenant("sandbox", "domain4", 4L);
+ controller.createApplication("sandbox", "app1", "default");
+ controller.createApplication("sandbox", "app2", "default");
+ controller.createApplication("sandbox", "app2", "dev");
- AthenzIdentity operator = AthenzIdentities.from("vespa.alice");
- controller.athenzDb().addHostedOperator(operator);
- AthenzIdentity tenantAdmin = AthenzIdentities.from("domain1.bob");
- Stream.of("domain1", "domain2", "domain4")
- .map(AthenzDomain::new)
- .map(controller.athenzDb()::getOrCreateDomain)
- .forEach(d -> d.admin(AthenzIdentities.from("domain1.bob")));
+ AthenzIdentity operator = AthenzIdentities.from("vespa.alice");
+ controller.athenzDb().addHostedOperator(operator);
+ AthenzIdentity tenantAdmin = AthenzIdentities.from("domain1.bob");
+ Stream.of("domain1", "domain2", "domain4")
+ .map(AthenzDomain::new)
+ .map(controller.athenzDb()::getOrCreateDomain)
+ .forEach(d -> d.admin(AthenzIdentities.from("domain1.bob")));
- tester.assertResponse(createUserRequest(user, operator),
- new File("user-without-applications.json"));
+ tester.assertResponse(createUserRequest(user, operator),
+ new File("user-without-applications.json"));
- tester.assertResponse(createUserRequest(user, tenantAdmin),
- new File("user-with-applications-athenz.json"));
+ tester.assertResponse(createUserRequest(user, tenantAdmin),
+ new File("user-with-applications-athenz.json"));
+ }
}
private Request createUserRequest(User user, AthenzIdentity identity) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java
index 03f1d75a50b..9198369a3ad 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.restapi.user;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.ControllerTester;
@@ -205,65 +206,68 @@ public class UserApiTest extends ControllerContainerCloudTest {
@Test
public void userMetadataTest() {
- ContainerTester tester = new ContainerTester(container, responseFiles);
- ((InMemoryFlagSource) tester.controller().flagSource())
- .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
- ControllerTester controller = new ControllerTester(tester);
- Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
- User user = new User("dev@domail", "Joe Developer", "dev", null);
-
- tester.assertResponse(request("/user/v1/user")
- .roles(operator)
- .user(user),
- new File("user-without-applications.json"));
-
- controller.createTenant("tenant1", Tenant.Type.cloud);
- controller.createApplication("tenant1", "app1", "default");
- controller.createApplication("tenant1", "app2", "default");
- controller.createApplication("tenant1", "app2", "myinstance");
- controller.createApplication("tenant1", "app3");
-
- controller.createTenant("tenant2", Tenant.Type.cloud);
- controller.createApplication("tenant2", "app2", "test");
-
- controller.createTenant("tenant3", Tenant.Type.cloud);
- controller.createApplication("tenant3", "app1");
-
- controller.createTenant("sandbox", Tenant.Type.cloud);
- controller.createApplication("sandbox", "app1", "default");
- controller.createApplication("sandbox", "app2", "default");
- controller.createApplication("sandbox", "app2", "dev");
-
- // Should still be empty because none of the roles explicitly refer to any of the applications
- tester.assertResponse(request("/user/v1/user")
- .roles(operator)
- .user(user),
- new File("user-without-applications.json"));
-
- // Empty applications because tenant dummy does not exist
- tester.assertResponse(request("/user/v1/user")
- .roles(Set.of(Role.administrator(TenantName.from("tenant1")),
- Role.developer(TenantName.from("tenant2")),
- Role.developer(TenantName.from("sandbox")),
- Role.reader(TenantName.from("sandbox"))))
- .user(user),
- new File("user-with-applications-cloud.json"));
+ try (Flags.Replacer ignored = Flags.clearFlagsForTesting(PermanentFlags.MAX_TRIAL_TENANTS.id(), PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id())) {
+ ContainerTester tester = new ContainerTester(container, responseFiles);
+ ((InMemoryFlagSource) tester.controller().flagSource())
+ .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
+ ControllerTester controller = new ControllerTester(tester);
+ Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
+ User user = new User("dev@domail", "Joe Developer", "dev", null);
+
+ tester.assertResponse(request("/user/v1/user")
+ .roles(operator)
+ .user(user),
+ new File("user-without-applications.json"));
+
+ controller.createTenant("tenant1", Tenant.Type.cloud);
+ controller.createApplication("tenant1", "app1", "default");
+ controller.createApplication("tenant1", "app2", "default");
+ controller.createApplication("tenant1", "app2", "myinstance");
+ controller.createApplication("tenant1", "app3");
+
+ controller.createTenant("tenant2", Tenant.Type.cloud);
+ controller.createApplication("tenant2", "app2", "test");
+
+ controller.createTenant("tenant3", Tenant.Type.cloud);
+ controller.createApplication("tenant3", "app1");
+
+ controller.createTenant("sandbox", Tenant.Type.cloud);
+ controller.createApplication("sandbox", "app1", "default");
+ controller.createApplication("sandbox", "app2", "default");
+ controller.createApplication("sandbox", "app2", "dev");
+
+ // Should still be empty because none of the roles explicitly refer to any of the applications
+ tester.assertResponse(request("/user/v1/user")
+ .roles(operator)
+ .user(user),
+ new File("user-without-applications.json"));
+
+ // Empty applications because tenant dummy does not exist
+ tester.assertResponse(request("/user/v1/user")
+ .roles(Set.of(Role.administrator(TenantName.from("tenant1")),
+ Role.developer(TenantName.from("tenant2")),
+ Role.developer(TenantName.from("sandbox")),
+ Role.reader(TenantName.from("sandbox"))))
+ .user(user),
+ new File("user-with-applications-cloud.json"));
+ }
}
@Test
public void maxTrialTenants() {
- ContainerTester tester = new ContainerTester(container, responseFiles);
- ((InMemoryFlagSource) tester.controller().flagSource())
- .withIntFlag(PermanentFlags.MAX_TRIAL_TENANTS.id(), 1)
- .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
- ControllerTester controller = new ControllerTester(tester);
- Set<Role> operator = Set.of(Role.hostedOperator(), Role.hostedSupporter(), Role.hostedAccountant());
- User user = new User("dev@domail", "Joe Developer", "dev", null);
-
- controller.createTenant("tenant1", Tenant.Type.cloud);
-
- tester.assertResponse(
- request("/user/v1/user").user(user),
- new File("user-without-trial-capacity-cloud.json"));
+ try (Flags.Replacer ignored = Flags.clearFlagsForTesting(PermanentFlags.MAX_TRIAL_TENANTS.id(), PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id())) {
+ ContainerTester tester = new ContainerTester(container, responseFiles);
+ ((InMemoryFlagSource) tester.controller().flagSource())
+ .withIntFlag(PermanentFlags.MAX_TRIAL_TENANTS.id(), 1)
+ .withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
+ ControllerTester controller = new ControllerTester(tester);
+ User user = new User("dev@domail", "Joe Developer", "dev", null);
+
+ controller.createTenant("tenant1", Tenant.Type.cloud);
+
+ tester.assertResponse(
+ request("/user/v1/user").user(user),
+ new File("user-without-trial-capacity-cloud.json"));
+ }
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializerTest.java
new file mode 100644
index 00000000000..8625628b74e
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/UserFlagsSerializerTest.java
@@ -0,0 +1,133 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.user;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.test.json.JsonTestHelper;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.FlagId;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.JsonNodeRawFlag;
+import com.yahoo.vespa.flags.json.Condition;
+import com.yahoo.vespa.flags.json.FlagData;
+import com.yahoo.vespa.flags.json.Rule;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.CONSOLE_USER_EMAIL;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
+
+/**
+ * @author freva
+ */
+public class UserFlagsSerializerTest {
+
+ @Test
+ public void user_flag_test() throws IOException {
+ String email1 = "alice@domain.tld";
+ String email2 = "bob@domain.tld";
+
+ try (Flags.Replacer ignored = Flags.clearFlagsForTesting()) {
+ Flags.defineStringFlag("string-id", "default value", List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod", CONSOLE_USER_EMAIL);
+ Flags.defineIntFlag("int-id", 123, List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod", CONSOLE_USER_EMAIL, TENANT_ID, APPLICATION_ID);
+ Flags.defineDoubleFlag("double-id", 3.14d, List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod");
+ Flags.defineListFlag("list-id", List.of("a"), String.class, List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod", CONSOLE_USER_EMAIL);
+ Flags.defineJacksonFlag("jackson-id", new ExampleJacksonClass(123, "abc"), ExampleJacksonClass.class,
+ List.of("owner"), "1970-01-01", "2100-01-01", "desc", "mod", CONSOLE_USER_EMAIL, TENANT_ID);
+
+ Map<FlagId, FlagData> flagData = Stream.of(
+ flagData("string-id", rule("\"value1\"", condition(CONSOLE_USER_EMAIL, Condition.Type.WHITELIST, email1))),
+ flagData("int-id", rule("456")),
+ flagData("list-id",
+ rule("[\"value1\"]", condition(CONSOLE_USER_EMAIL, Condition.Type.WHITELIST, email1), condition(APPLICATION_ID, Condition.Type.BLACKLIST, "tenant1:video:default", "tenant1:video:default", "tenant2:music:default")),
+ rule("[\"value2\"]", condition(CONSOLE_USER_EMAIL, Condition.Type.WHITELIST, email2)),
+ rule("[\"value1\",\"value3\"]", condition(APPLICATION_ID, Condition.Type.BLACKLIST, "tenant1:video:default", "tenant1:video:default", "tenant2:music:default"))),
+ flagData("jackson-id", rule("{\"integer\":456,\"string\":\"xyz\"}", condition(CONSOLE_USER_EMAIL, Condition.Type.WHITELIST, email1), condition(TENANT_ID, Condition.Type.WHITELIST, "tenant1", "tenant3")))
+ ).collect(Collectors.toMap(FlagData::id, fd -> fd));
+
+ // double-id is not here as it does not have CONSOLE_USER_EMAIL dimension
+ assertUserFlags("{\"flags\":[" +
+ "{\"id\":\"int-id\",\"rules\":[{\"value\":456}]}," + // Default from DB
+ "{\"id\":\"jackson-id\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"tenant\"}],\"value\":{\"integer\":456,\"string\":\"xyz\"}},{\"value\":{\"integer\":123,\"string\":\"abc\"}}]}," + // Resolved for email
+ // Resolved for email, but conditions are empty since this user is not authorized for any tenants
+ "{\"id\":\"list-id\",\"rules\":[{\"conditions\":[{\"type\":\"blacklist\",\"dimension\":\"application\"}],\"value\":[\"value1\"]},{\"conditions\":[{\"type\":\"blacklist\",\"dimension\":\"application\"}],\"value\":[\"value1\",\"value3\"]},{\"value\":[\"a\"]}]}," +
+ "{\"id\":\"string-id\",\"rules\":[{\"value\":\"value1\"}]}]}", // resolved for email
+ flagData, Set.of(), false, email1);
+
+ // Same as the first one, but user is authorized for tenant1
+ assertUserFlags("{\"flags\":[" +
+ "{\"id\":\"int-id\",\"rules\":[{\"value\":456}]}," + // Default from DB
+ "{\"id\":\"jackson-id\",\"rules\":[{\"conditions\":[{\"type\":\"whitelist\",\"dimension\":\"tenant\",\"values\":[\"tenant1\"]}],\"value\":{\"integer\":456,\"string\":\"xyz\"}},{\"value\":{\"integer\":123,\"string\":\"abc\"}}]}," + // Resolved for email
+ // Resolved for email, but conditions have filtered out tenant2
+ "{\"id\":\"list-id\",\"rules\":[{\"conditions\":[{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"tenant1:video:default\",\"tenant1:video:default\"]}],\"value\":[\"value1\"]},{\"conditions\":[{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"tenant1:video:default\",\"tenant1:video:default\"]}],\"value\":[\"value1\",\"value3\"]},{\"value\":[\"a\"]}]}," +
+ "{\"id\":\"string-id\",\"rules\":[{\"value\":\"value1\"}]}]}", // resolved for email
+ flagData, Set.of("tenant1"), false, email1);
+
+ // As operator no conditions are filtered, but the email precondition is applied
+ assertUserFlags("{\"flags\":[" +
+ "{\"id\":\"int-id\",\"rules\":[{\"value\":456}]}," + // Default from DB
+ "{\"id\":\"jackson-id\",\"rules\":[{\"value\":{\"integer\":123,\"string\":\"abc\"}}]}," + // Default from code, no DB values match
+ // Includes last value from DB which is not conditioned on email and the default from code
+ "{\"id\":\"list-id\",\"rules\":[{\"conditions\":[{\"type\":\"blacklist\",\"dimension\":\"application\",\"values\":[\"tenant1:video:default\",\"tenant1:video:default\",\"tenant2:music:default\"]}],\"value\":[\"value1\",\"value3\"]},{\"value\":[\"a\"]}]}," +
+ "{\"id\":\"string-id\",\"rules\":[{\"value\":\"default value\"}]}]}", // Default from code
+ flagData, Set.of(), true, "operator@domain.tld");
+ }
+ }
+
+ private static FlagData flagData(String id, Rule... rules) {
+ return new FlagData(new FlagId(id), new FetchVector(), rules);
+ }
+
+ private static Rule rule(String data, Condition... conditions) {
+ return new Rule(Optional.ofNullable(data).map(JsonNodeRawFlag::fromJson), conditions);
+ }
+
+ private static Condition condition(FetchVector.Dimension dimension, Condition.Type type, String... values) {
+ return new Condition.CreateParams(dimension).withValues(values).createAs(type);
+ }
+
+ private static void assertUserFlags(String expected, Map<FlagId, FlagData> rawFlagData,
+ Set<String> authorizedForTenantNames, boolean isOperator, String userEmail) throws IOException {
+ Slime slime = new Slime();
+ UserFlagsSerializer.toSlime(slime.setObject(), rawFlagData, authorizedForTenantNames.stream().map(TenantName::from).collect(Collectors.toSet()), isOperator, userEmail);
+ JsonTestHelper.assertJsonEquals(expected,
+ new String(SlimeUtils.toJsonBytes(slime), StandardCharsets.UTF_8));
+ }
+
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ private static class ExampleJacksonClass {
+ @JsonProperty("integer") public final int integer;
+ @JsonProperty("string") public final String string;
+ private ExampleJacksonClass(@JsonProperty("integer") int integer, @JsonProperty("string") String string) {
+ this.integer = integer;
+ this.string = string;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ExampleJacksonClass that = (ExampleJacksonClass) o;
+ return integer == that.integer &&
+ Objects.equals(string, that.string);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(integer, string);
+ }
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
index 5d3a38334ad..0a416600b2c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
@@ -31,5 +31,6 @@
"reader"
]
}
- }
+ },
+ "flags": [{"id":"enable-public-signup-flow","rules":[{"value":false}]}]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
index e883993cb53..4e179ad83c5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
@@ -29,5 +29,6 @@
],
"enabled-horizon-dashboard":false
}
- }
+ },
+ "flags": [{"id":"enable-public-signup-flow","rules":[{"value":false}]}]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
index 3bf999b490b..7eb445140e7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
@@ -14,5 +14,6 @@
"hostedOperator",
"hostedSupporter",
"hostedAccountant"
- ]
+ ],
+ "flags": [{"id":"enable-public-signup-flow","rules":[{"value":false}]}]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
index 27242424579..3c1edab8cfc 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
@@ -9,5 +9,6 @@
"nickname": "dev",
"verified":false
},
- "tenants": {}
+ "tenants": {},
+ "flags": [{"id":"enable-public-signup-flow","rules":[{"value":false}]}]
} \ No newline at end of file
diff --git a/defaults/CMakeLists.txt b/defaults/CMakeLists.txt
index c42e5402688..ed0ab1e6fb0 100644
--- a/defaults/CMakeLists.txt
+++ b/defaults/CMakeLists.txt
@@ -7,4 +7,4 @@ vespa_define_module(
src/apps/printdefault
)
-install_fat_java_artifact(defaults)
+# No separate java artifact is installed (part of config-bundle)
diff --git a/dist/release-vespa-rpm.sh b/dist/release-vespa-rpm.sh
index b217affe8fd..c975e10dd1a 100755
--- a/dist/release-vespa-rpm.sh
+++ b/dist/release-vespa-rpm.sh
@@ -1,62 +1,38 @@
#!/bin/bash
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
set -e
-if [ $# -ne 2 ]; then
+if [[ $# -ne 2 ]]; then
echo "Usage: $0 <version> <git ref>"
exit 1
fi
+if [[ -z $COPR_WEBHOOK ]]; then
+ echo "This script requires the COPR_WEBHOOK environment variable to be set."
+ exit 1
+fi
+
readonly VERSION=$1
readonly GITREF=$2
-readonly DIST_DIR="dist"
-readonly SPECFILE="${DIST_DIR}/vespa.spec"
-readonly TITO_DIR="${DIST_DIR}/.tito"
-readonly RPM_BRANCH="rpmbuild"
+readonly RELEASE_TAG="v$VERSION"
readonly CURRENT_BRANCH=$(git branch | grep "^\*" | cut -d' ' -f2)
# Make sure we are up to date
git checkout master
git pull --rebase
-# Update the VERSION file on master to be the next releasable version
-echo "$VERSION" | awk -F. '{print $1"."($2+1)".0"}' > VERSION
-git commit -am "Updating VERSION file to next releasable minor version."
-for i in 1 2 3; do
- if git push; then
- break;
- fi
- git pull --rebase
-done
-
# Create a proper release tag
-git tag -a "v$VERSION" -m "Release $VERSION" $GITREF
-git push origin "v$VERSION"
-
-# Delete existing branch if exists and create new one
-git push --delete origin $RPM_BRANCH &> /dev/null || true
-git branch -D $RPM_BRANCH &> /dev/null || true
-git checkout -b $RPM_BRANCH $GITREF
-
-# Tito expects spec file and .tito directory to be on root
-git mv $TITO_DIR .
-git mv $SPECFILE .
-
-# Hide pom.xml to avoid tito doing anything to our pom.xml files
-mv pom.xml pom.xml.hide
-
-# Run tito to update spec file and tag
-tito tag --use-version=$VERSION --no-auto-changelog
-# Push changes and tag to branc
-git push -u origin --follow-tags $RPM_BRANCH
+git tag -a "$RELEASE_TAG" -m "Release version $VERSION" $GITREF
+git push origin "$RELEASE_TAG"
# Trig the build on Copr
curl -X POST \
-H "Content-type: application/json" \
-H "X-GitHub-Event: create" \
- -d '{ "ref": "rpmbuild", "ref_type": "branch", "repository": { "clone_url": "https://github.com/vespa-engine/vespa.git" } }' \
- https://copr.fedorainfracloud.org/webhooks/github/8037/d1dd5867-b493-4647-a888-0c887e6087b3/
+ -d '{ "ref": "$RELEASE_TAG", "ref_type": "tag", "repository": { "clone_url": "https://github.com/vespa-engine/vespa.git" } }' \
+ "$COPR_WEBHOOK"
git reset --hard HEAD
git checkout $CURRENT_BRANCH
diff --git a/dist/vespa.spec b/dist/vespa.spec
index aa3d64401d3..e976d710fb5 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -791,7 +791,6 @@ fi
%{_prefix}/lib/jars/bcprov-jdk15on-*.jar
%{_prefix}/lib/jars/config-bundle-jar-with-dependencies.jar
%{_prefix}/lib/jars/configdefinitions-jar-with-dependencies.jar
-%{_prefix}/lib/jars/configgen.jar
%{_prefix}/lib/jars/config-model-api-jar-with-dependencies.jar
%{_prefix}/lib/jars/config-model-jar-with-dependencies.jar
%{_prefix}/lib/jars/config-provisioning-jar-with-dependencies.jar
@@ -799,7 +798,6 @@ fi
%{_prefix}/lib/jars/container-disc-jar-with-dependencies.jar
%{_prefix}/lib/jars/container-search-and-docproc-jar-with-dependencies.jar
%{_prefix}/lib/jars/container-search-gui-jar-with-dependencies.jar
-%{_prefix}/lib/jars/defaults-jar-with-dependencies.jar
%{_prefix}/lib/jars/docprocs-jar-with-dependencies.jar
%{_prefix}/lib/jars/flags-jar-with-dependencies.jar
%{_prefix}/lib/jars/hk2-*.jar
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 8a596ad38d4..d0767b45224 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -14,6 +14,19 @@ using vespalib::make_string_short::fmt;
namespace {
+struct IgnoreJava : TestBuilder {
+ TestBuilder &dst;
+ IgnoreJava(TestBuilder &dst_in) : TestBuilder(dst_in.full), dst(dst_in) {}
+ void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs,
+ const std::set<vespalib::string> &ignore) override
+ {
+ auto my_ignore = ignore;
+ my_ignore.insert("vespajlib");
+ dst.add(expression, inputs, my_ignore);
+ }
+};
+
//-----------------------------------------------------------------------------
const std::vector<vespalib::string> basic_layouts = {
@@ -273,6 +286,9 @@ void generate_join(TestBuilder &dst) {
generate_op2_join("min(a,b)", Div16(N()), dst);
generate_op2_join("max(a,b)", Div16(N()), dst);
generate_op2_join("bit(a,b)", Seq({-128, -43, -1, 0, 85, 127}), Seq({0, 1, 2, 3, 4, 5, 6, 7}), dst);
+ // TODO: add ignored Java test when it can be ignored
+ // IgnoreJava ignore_java(dst);
+ // generate_op2_join("hamming(a,b)", Seq({-128, -43, -1, 0, 85, 127}), ignore_java); // TODO: require java
// inverted lambda
generate_join_expr("join(a,b,f(a,b)(b-a))", Div16(N()), dst);
// custom lambda
@@ -331,6 +347,9 @@ void generate_merge(TestBuilder &dst) {
generate_op2_merge("min(a,b)", Div16(N()), dst);
generate_op2_merge("max(a,b)", Div16(N()), dst);
generate_op2_merge("bit(a,b)", Seq({-128, -43, -1, 0, 85, 127}), Seq({0, 1, 2, 3, 4, 5, 6, 7}), dst);
+ // TODO: add ignored Java test when it can be ignored
+ // IgnoreJava ignore_java(dst);
+ // generate_op2_merge("hamming(a,b)", Seq({-128, -43, -1, 0, 85, 127}), ignore_java); // TODO: require java
// inverted lambda
generate_merge_expr("merge(a,b,f(a,b)(b-a))", Div16(N()), dst);
// custom lambda
diff --git a/eval/src/apps/tensor_conformance/generate.h b/eval/src/apps/tensor_conformance/generate.h
index e9482b9015c..9aa90ae9a7a 100644
--- a/eval/src/apps/tensor_conformance/generate.h
+++ b/eval/src/apps/tensor_conformance/generate.h
@@ -18,11 +18,6 @@ struct TestBuilder {
{
add(expression, inputs, {});
}
- void add_ignore_java(const vespalib::string &expression,
- const std::map<vespalib::string,TensorSpec> &inputs)
- {
- add(expression, inputs, {"vespajlib"});
- }
virtual ~TestBuilder() {}
};
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
index e6bbb1f8a41..6c28b1e652e 100644
--- a/eval/src/apps/tensor_conformance/tensor_conformance.cpp
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -167,6 +167,15 @@ void print_test(const Inspector &test, OutputWriter &dst) {
}
auto result = eval_expr(test, prod_factory);
dst.printf("result: %s\n", result.to_string().c_str());
+ auto ignore = extract_fields(test["ignore"]);
+ if (!ignore.empty()) {
+ dst.printf("ignore:");
+ for (const auto &impl: ignore) {
+ REQUIRE(test["ignore"][impl].asBool());
+ dst.printf(" %s", impl.c_str());
+ }
+ dst.printf("\n");
+ }
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/tests/eval/inline_operation/inline_operation_test.cpp b/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
index ae5f503b680..8e765708574 100644
--- a/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
+++ b/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
@@ -116,6 +116,7 @@ TEST(InlineOperationTest, op2_lambdas_are_recognized) {
EXPECT_EQ(as_op2("min(a,b)"), &Min::f);
EXPECT_EQ(as_op2("max(a,b)"), &Max::f);
EXPECT_EQ(as_op2("bit(a,b)"), &Bit::f);
+ EXPECT_EQ(as_op2("hamming(a,b)"), &Hamming::f);
}
TEST(InlineOperationTest, op2_lambdas_are_recognized_with_different_parameter_names) {
diff --git a/eval/src/tests/eval/node_tools/node_tools_test.cpp b/eval/src/tests/eval/node_tools/node_tools_test.cpp
index e8296c01d73..b95ea2d4b14 100644
--- a/eval/src/tests/eval/node_tools/node_tools_test.cpp
+++ b/eval/src/tests/eval/node_tools/node_tools_test.cpp
@@ -101,6 +101,7 @@ TEST("require that call node types can be copied") {
TEST_DO(verify_copy("elu(a)"));
TEST_DO(verify_copy("erf(a)"));
TEST_DO(verify_copy("bit(a,b)"));
+ TEST_DO(verify_copy("hamming(a,b)"));
}
TEST("require that tensor node types can NOT be copied (yet)") {
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index b2373f0d8f5..5b860f0e1b3 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -219,6 +219,7 @@ TEST("require that various operations resolve appropriate type") {
TEST_DO(verify_op1("elu(%s)")); // Elu
TEST_DO(verify_op1("erf(%s)")); // Erf
TEST_DO(verify_op2("bit(%s,%s)")); // Bit
+ TEST_DO(verify_op2("hamming(%s,%s)")); // Hamming
}
TEST("require that map resolves correct type") {
diff --git a/eval/src/vespa/eval/eval/call_nodes.cpp b/eval/src/vespa/eval/eval/call_nodes.cpp
index 798583cf89a..95dbecdd153 100644
--- a/eval/src/vespa/eval/eval/call_nodes.cpp
+++ b/eval/src/vespa/eval/eval/call_nodes.cpp
@@ -44,6 +44,7 @@ CallRepo::CallRepo() : _map() {
add(nodes::Elu());
add(nodes::Erf());
add(nodes::Bit());
+ add(nodes::Hamming());
}
} // namespace vespalib::eval::nodes
diff --git a/eval/src/vespa/eval/eval/call_nodes.h b/eval/src/vespa/eval/eval/call_nodes.h
index 945aba69596..47fc5d6eccd 100644
--- a/eval/src/vespa/eval/eval/call_nodes.h
+++ b/eval/src/vespa/eval/eval/call_nodes.h
@@ -140,6 +140,7 @@ struct Sigmoid : CallHelper<Sigmoid> { Sigmoid() : Helper("sigmoid", 1) {} };
struct Elu : CallHelper<Elu> { Elu() : Helper("elu", 1) {} };
struct Erf : CallHelper<Erf> { Erf() : Helper("erf", 1) {} };
struct Bit : CallHelper<Bit> { Bit() : Helper("bit", 2) {} };
+struct Hamming : CallHelper<Hamming> { Hamming() : Helper("hamming", 2) {} };
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/hamming_distance.h b/eval/src/vespa/eval/eval/hamming_distance.h
new file mode 100644
index 00000000000..3419de3569f
--- /dev/null
+++ b/eval/src/vespa/eval/eval/hamming_distance.h
@@ -0,0 +1,13 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib::eval {
+
+inline double hamming_distance(double a, double b) {
+ uint8_t x = (uint8_t) a;
+ uint8_t y = (uint8_t) b;
+ return __builtin_popcount(x ^ y);
+}
+
+}
diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp
index a40a8887119..cbbce61402c 100644
--- a/eval/src/vespa/eval/eval/key_gen.cpp
+++ b/eval/src/vespa/eval/eval/key_gen.cpp
@@ -88,6 +88,7 @@ struct KeyGen : public NodeVisitor, public NodeTraverser {
void visit(const Elu &) override { add_byte(61); }
void visit(const Erf &) override { add_byte(62); }
void visit(const Bit &) override { add_byte(63); }
+ void visit(const Hamming &) override { add_byte(64); }
// traverse
bool open(const Node &node) override { node.accept(*this); return true; }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
index 3e4f4fe8257..a101745dca0 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
@@ -5,6 +5,7 @@
#include <vespa/eval/eval/node_visitor.h>
#include <vespa/eval/eval/node_traverser.h>
#include <vespa/eval/eval/extract_bit.h>
+#include <vespa/eval/eval/hamming_distance.h>
#include <llvm/IR/Verifier.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/IR/IRBuilder.h>
@@ -31,6 +32,7 @@ double vespalib_eval_relu(double a) { return std::max(a, 0.0); }
double vespalib_eval_sigmoid(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
double vespalib_eval_elu(double a) { return (a < 0) ? std::exp(a) - 1.0 : a; }
double vespalib_eval_bit(double a, double b) { return vespalib::eval::extract_bit(a, b); }
+double vespalib_eval_hamming(double a, double b) { return vespalib::eval::hamming_distance(a, b); }
using vespalib::eval::gbdt::Forest;
using resolve_function = double (*)(void *ctx, size_t idx);
@@ -651,6 +653,9 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Bit &) override {
make_call_2("vespalib_eval_bit");
}
+ void visit(const Hamming &) override {
+ make_call_2("vespalib_eval_hamming");
+ }
};
FunctionBuilder::~FunctionBuilder() { }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
index e04b477750d..727954d59e9 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
@@ -20,6 +20,7 @@ extern "C" {
double vespalib_eval_sigmoid(double a);
double vespalib_eval_elu(double a);
double vespalib_eval_bit(double a, double b);
+ double vespalib_eval_hamming(double a, double b);
};
namespace vespalib::eval {
diff --git a/eval/src/vespa/eval/eval/make_tensor_function.cpp b/eval/src/vespa/eval/eval/make_tensor_function.cpp
index 498be2a738b..7746676f86b 100644
--- a/eval/src/vespa/eval/eval/make_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/make_tensor_function.cpp
@@ -360,6 +360,9 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Bit &node) override {
make_join(node, operation::Bit::f);
}
+ void visit(const Hamming &node) override {
+ make_join(node, operation::Hamming::f);
+ }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/node_tools.cpp b/eval/src/vespa/eval/eval/node_tools.cpp
index fa2d16a2271..48ee1a90b67 100644
--- a/eval/src/vespa/eval/eval/node_tools.cpp
+++ b/eval/src/vespa/eval/eval/node_tools.cpp
@@ -183,6 +183,7 @@ struct CopyNode : NodeTraverser, NodeVisitor {
void visit(const Elu &node) override { copy_call(node); }
void visit(const Erf &node) override { copy_call(node); }
void visit(const Bit &node) override { copy_call(node); }
+ void visit(const Hamming &node) override { copy_call(node); }
// traverse nodes
bool open(const Node &) override { return !error; }
diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp
index 8622fd734f1..2cb6e637201 100644
--- a/eval/src/vespa/eval/eval/node_types.cpp
+++ b/eval/src/vespa/eval/eval/node_types.cpp
@@ -279,6 +279,7 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
void visit(const Elu &node) override { resolve_op1(node); }
void visit(const Erf &node) override { resolve_op1(node); }
void visit(const Bit &node) override { resolve_op2(node); }
+ void visit(const Hamming &node) override { resolve_op2(node); }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/node_visitor.h b/eval/src/vespa/eval/eval/node_visitor.h
index 475bbf5405c..b581a94f7ee 100644
--- a/eval/src/vespa/eval/eval/node_visitor.h
+++ b/eval/src/vespa/eval/eval/node_visitor.h
@@ -86,6 +86,7 @@ struct NodeVisitor {
virtual void visit(const nodes::Elu &) = 0;
virtual void visit(const nodes::Erf &) = 0;
virtual void visit(const nodes::Bit &) = 0;
+ virtual void visit(const nodes::Hamming &) = 0;
virtual ~NodeVisitor() {}
};
@@ -156,6 +157,7 @@ struct EmptyNodeVisitor : NodeVisitor {
void visit(const nodes::Elu &) override {}
void visit(const nodes::Erf &) override {}
void visit(const nodes::Bit &) override {}
+ void visit(const nodes::Hamming &) override {}
};
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/operation.cpp b/eval/src/vespa/eval/eval/operation.cpp
index a82a79e6bc4..ddd188d250f 100644
--- a/eval/src/vespa/eval/eval/operation.cpp
+++ b/eval/src/vespa/eval/eval/operation.cpp
@@ -4,6 +4,7 @@
#include "function.h"
#include "key_gen.h"
#include "extract_bit.h"
+#include "hamming_distance.h"
#include <vespa/vespalib/util/approx.h>
#include <algorithm>
@@ -52,6 +53,7 @@ double Sigmoid::f(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
double Elu::f(double a) { return (a < 0) ? std::exp(a) - 1 : a; }
double Erf::f(double a) { return std::erf(a); }
double Bit::f(double a, double b) { return extract_bit(a, b); }
+double Hamming::f(double a, double b) { return hamming_distance(a, b); }
//-----------------------------------------------------------------------------
double Inv::f(double a) { return (1.0 / a); }
double Square::f(double a) { return (a * a); }
@@ -146,6 +148,7 @@ std::map<vespalib::string,op2_t> make_op2_map() {
add_op2(map, "min(a,b)", Min::f);
add_op2(map, "max(a,b)", Max::f);
add_op2(map, "bit(a,b)", Bit::f);
+ add_op2(map, "hamming(a,b)", Hamming::f);
return map;
}
diff --git a/eval/src/vespa/eval/eval/operation.h b/eval/src/vespa/eval/eval/operation.h
index 438b510b714..e2a524f318c 100644
--- a/eval/src/vespa/eval/eval/operation.h
+++ b/eval/src/vespa/eval/eval/operation.h
@@ -50,6 +50,7 @@ struct Sigmoid { static double f(double a); };
struct Elu { static double f(double a); };
struct Erf { static double f(double a); };
struct Bit { static double f(double a, double b); };
+struct Hamming { static double f(double a, double b); };
//-----------------------------------------------------------------------------
struct Inv { static double f(double a); };
struct Square { static double f(double a); };
diff --git a/eval/src/vespa/eval/eval/test/eval_spec.cpp b/eval/src/vespa/eval/eval/test/eval_spec.cpp
index 5d51a1d23b5..03b3af84fc9 100644
--- a/eval/src/vespa/eval/eval/test/eval_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_spec.cpp
@@ -8,6 +8,24 @@
namespace vespalib::eval::test {
+namespace {
+
+double byte(const vespalib::string &bits) {
+ int8_t res = 0;
+ assert(bits.size() == 8);
+ for (const auto &c: bits) {
+ if (c == '1') {
+ res = (res << 1) | 1;
+ } else {
+ assert(c == '0');
+ res = (res << 1);
+ }
+ }
+ return res;
+}
+
+} // <unnamed>
+
constexpr double my_nan = std::numeric_limits<double>::quiet_NaN();
constexpr double my_inf = std::numeric_limits<double>::infinity();
@@ -169,6 +187,9 @@ EvalSpec::add_function_call_cases() {
.add_case({85, 3}, 0.0).add_case({85, 2}, 1.0).add_case({85, 1}, 0.0).add_case({85, 0}, 1.0)
.add_case({127, 7}, 0.0).add_case({127, 6}, 1.0).add_case({127, 5}, 1.0).add_case({127, 4}, 1.0)
.add_case({127, 3}, 1.0).add_case({127, 2}, 1.0).add_case({127, 1}, 1.0).add_case({127, 0}, 1.0);
+ add_expression({"a", "b"}, "hamming(a,b)")
+ .add_case({0, 0}, 0.0).add_case({-1, -1}, 0.0).add_case({-1, 0}, 8.0).add_case({0, -1}, 8.0)
+ .add_case({byte("11001100"), byte("10101010")}, 4.0).add_case({byte("11001100"), byte("11110000")}, 4.0);
}
void
diff --git a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
index 58e4b91f6d9..def3f64c1a1 100644
--- a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
@@ -338,6 +338,9 @@ struct EvalNode : public NodeVisitor {
void visit(const Bit &node) override {
eval_join(node.get_child(0), node.get_child(1), operation::Bit::f);
}
+ void visit(const Hamming &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Hamming::f);
+ }
};
TensorSpec eval_node(const Node &node, const std::vector<TensorSpec> &params) {
diff --git a/eval/src/vespa/eval/eval/visit_stuff.cpp b/eval/src/vespa/eval/eval/visit_stuff.cpp
index 786562d823f..1d684e1c340 100644
--- a/eval/src/vespa/eval/eval/visit_stuff.cpp
+++ b/eval/src/vespa/eval/eval/visit_stuff.cpp
@@ -60,6 +60,7 @@ vespalib::string name_of(join_fun_t fun) {
if (fun == operation::Min::f) return "min";
if (fun == operation::Max::f) return "max";
if (fun == operation::Bit::f) return "bit";
+ if (fun == operation::Hamming::f) return "hamming";
return "[other join function]";
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
index ede7bd6a109..5b3b2a94beb 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
@@ -4,9 +4,7 @@ package com.yahoo.vespa.flags;
import com.yahoo.vespa.flags.json.DimensionHelper;
import javax.annotation.concurrent.Immutable;
-import java.util.Collections;
import java.util.EnumMap;
-import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@@ -72,15 +70,15 @@ public class FetchVector {
private final Map<Dimension, String> map;
public FetchVector() {
- this.map = Collections.emptyMap();
+ this.map = Map.of();
}
public static FetchVector fromMap(Map<Dimension, String> map) {
- return new FetchVector(new HashMap<>(map));
+ return new FetchVector(map);
}
private FetchVector(Map<Dimension, String> map) {
- this.map = Collections.unmodifiableMap(map);
+ this.map = Map.copyOf(map);
}
public Optional<String> getValue(Dimension dimension) {
@@ -93,6 +91,10 @@ public class FetchVector {
public boolean isEmpty() { return map.isEmpty(); }
+ public boolean hasDimension(FetchVector.Dimension dimension) {
+ return map.containsKey(dimension);
+ }
+
/**
* Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}.
* Dimension is removed if the value is null.
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/FlagDefinition.java b/flags/src/main/java/com/yahoo/vespa/flags/FlagDefinition.java
index d01ca64cb9f..7ddbd85a904 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/FlagDefinition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/FlagDefinition.java
@@ -3,9 +3,9 @@ package com.yahoo.vespa.flags;
import javax.annotation.concurrent.Immutable;
import java.time.Instant;
-import java.util.Arrays;
-import java.util.Collections;
+import java.util.EnumSet;
import java.util.List;
+import java.util.Set;
/**
* @author hakonhall
@@ -28,14 +28,14 @@ public class FlagDefinition {
String description,
String modificationEffect,
FetchVector.Dimension... dimensions) {
- validate(owners, createdAt, expiresAt);
this.unboundFlag = unboundFlag;
this.owners = owners;
this.createdAt = createdAt;
this.expiresAt = expiresAt;
this.description = description;
this.modificationEffect = modificationEffect;
- this.dimensions = Collections.unmodifiableList(Arrays.asList(dimensions));
+ this.dimensions = List.of(dimensions);
+ validate(owners, createdAt, expiresAt, this.dimensions);
}
public UnboundFlag<?, ?, ?> getUnboundFlag() {
@@ -60,13 +60,14 @@ public class FlagDefinition {
public Instant getExpiresAt() { return expiresAt; }
- private static void validate(List<String> owners, Instant createdAt, Instant expiresAt) {
+ private static void validate(List<String> owners, Instant createdAt, Instant expiresAt, List<FetchVector.Dimension> dimensions) {
if (expiresAt.isBefore(createdAt)) {
throw new IllegalArgumentException(
String.format(
"Flag cannot expire before its creation date (createdAt='%s', expiresAt='%s')",
createdAt, expiresAt));
}
+
if (owners == PermanentFlags.OWNERS) {
if (!createdAt.equals(PermanentFlags.CREATED_AT) || !expiresAt.equals(PermanentFlags.EXPIRES_AT)) {
throw new IllegalArgumentException("Invalid creation or expiration date for permanent flag");
@@ -74,5 +75,15 @@ public class FlagDefinition {
} else if (owners.isEmpty()) {
throw new IllegalArgumentException("Owner(s) must be specified");
}
+
+ if (dimensions.contains(FetchVector.Dimension.CONSOLE_USER_EMAIL)) {
+ Set<FetchVector.Dimension> disallowedCombinations = EnumSet.allOf(FetchVector.Dimension.class);
+ disallowedCombinations.remove(FetchVector.Dimension.CONSOLE_USER_EMAIL);
+ disallowedCombinations.remove(FetchVector.Dimension.APPLICATION_ID);
+ disallowedCombinations.remove(FetchVector.Dimension.TENANT_ID);
+ disallowedCombinations.retainAll(dimensions);
+ if (!disallowedCombinations.isEmpty())
+ throw new IllegalArgumentException("Dimension " + FetchVector.Dimension.CONSOLE_USER_EMAIL + " cannot be combined with " + disallowedCombinations);
+ }
}
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 7c9ee425d8f..d4157b659ae 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -13,6 +13,7 @@ import java.util.Optional;
import java.util.TreeMap;
import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.CONSOLE_USER_EMAIL;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION;
@@ -79,7 +80,7 @@ public class Flags {
ZONE_ID, APPLICATION_ID);
public static final UnboundBooleanFlag ENFORCE_RANK_PROFILE_INHERITANCE = defineFeatureFlag(
- "enforce-rank-profile-inheritance", false,
+ "enforce-rank-profile-inheritance", true,
List.of("baldersheim"), "2021-09-07", "2021-10-01",
"Should we enforce verification of rank-profile inheritance.",
"Takes effect at redeployment",
@@ -134,13 +135,6 @@ public class Flags {
"Number of threads used for speeding up building of models.",
"Takes effect on first (re)start of config server");
- public static final UnboundBooleanFlag GROUP_PERMANENT_SUSPENSION = defineFeatureFlag(
- "group-permanent-suspension", true,
- List.of("hakonhall"), "2021-09-11", "2021-11-11",
- "Allow all content nodes in a hierarchical group to suspend at the same time when" +
- "permanently suspending a host.",
- "Takes effect on the next permanent suspension request to the Orchestrator.");
-
public static final UnboundBooleanFlag ENCRYPT_DIRTY_DISK = defineFeatureFlag(
"encrypt-dirty-disk", false,
List.of("hakonhall"), "2021-05-14", "2021-10-05",
@@ -154,13 +148,19 @@ public class Flags {
"Takes effect on next host-admin tick.");
public static final UnboundBooleanFlag NEW_SPARE_DISKS = defineFeatureFlag(
- "new-spare-disks", false,
+ "new-spare-disks", true,
List.of("hakonhall"), "2021-09-08", "2021-11-08",
"Use a new algorithm to calculate the spare disks of a host.",
"Takes effect on first run of DiskTask, typically after host-admin restart/upgrade.");
+ public static final UnboundBooleanFlag LOCAL_SUSPEND = defineFeatureFlag(
+ "local-suspend", true,
+ List.of("hakonhall"), "2021-09-21", "2021-10-21",
+ "Whether the cfghost host admin should suspend against only the local cfg (true and legacy) or all.",
+ "Takes effect immediately.");
+
public static final UnboundBooleanFlag USE_UNKNOWN_SERVICE_STATUS = defineFeatureFlag(
- "use-unknown-service-status", false,
+ "use-unknown-service-status", true,
List.of("hakonhall"), "2021-09-13", "2021-10-13",
"Whether to use the UNKNOWN ServiceStatus for services that have not yet been probed by service monitor.",
"Takes effect on first (re)start of config server.");
@@ -234,13 +234,6 @@ public class Flags {
"Takes effect on next restart",
ZONE_ID, APPLICATION_ID);
- public static final UnboundBooleanFlag THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED = defineFeatureFlag(
- "throw-exception-if-resource-limits-specified", true,
- List.of("hmusum"), "2021-06-07", "2021-10-01",
- "Whether to throw an exception in hosted Vespa if the application specifies resource limits in services.xml",
- "Takes effect on next deployment through controller",
- APPLICATION_ID);
-
public static final UnboundListFlag<String> DEFER_APPLICATION_ENCRYPTION = defineListFlag(
"defer-application-encryption", List.of(), String.class,
List.of("mpolden", "hakonhall"), "2021-06-23", "2021-10-01",
@@ -286,7 +279,7 @@ public class Flags {
List.of("olaa"), "2021-09-13", "2021-12-31",
"Enable Horizon dashboard",
"Takes effect immediately",
- TENANT_ID
+ TENANT_ID, CONSOLE_USER_EMAIL
);
public static final UnboundBooleanFlag ENABLE_ONPREM_TENANT_S3_ARCHIVE = defineFeatureFlag(
@@ -424,8 +417,8 @@ public class Flags {
*
* <p>NOT thread-safe. Tests using this cannot run in parallel.
*/
- public static Replacer clearFlagsForTesting() {
- return new Replacer();
+ public static Replacer clearFlagsForTesting(FlagId... flagsToKeep) {
+ return new Replacer(flagsToKeep);
}
public static class Replacer implements AutoCloseable {
@@ -433,10 +426,11 @@ public class Flags {
private final TreeMap<FlagId, FlagDefinition> savedFlags;
- private Replacer() {
+ private Replacer(FlagId... flagsToKeep) {
verifyAndSetFlagsCleared(true);
this.savedFlags = Flags.flags;
Flags.flags = new TreeMap<>();
+ List.of(flagsToKeep).forEach(id -> Flags.flags.put(id, savedFlags.get(id)));
}
@Override
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 9c06fe5fa7d..cdce5b03fea 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -169,6 +169,13 @@ public class PermanentFlags {
APPLICATION_ID
);
+ public static final UnboundStringFlag ENDPOINT_CERTIFICATE_ALGORITHM = defineStringFlag(
+ "endpoint-certificate-algorithm", "",
+ // Acceptable values are: "rsa_2048", "rsa_4096", "ecdsa_p256"
+ "Selects algorithm used for an applications endpoint certificate, or use provider default if blank",
+ "Takes effect when a new endpoint certificate is requested (first deployment of new application/instance)",
+ APPLICATION_ID);
+
public static final UnboundDoubleFlag RESOURCE_LIMIT_DISK = defineDoubleFlag(
"resource-limit-disk", 0.8,
"Resource limit (between 0.0 and 1.0) for disk used by cluster controller for when to block feed",
@@ -190,6 +197,12 @@ public class PermanentFlags {
APPLICATION_ID, HOSTNAME
);
+ public static final UnboundStringFlag CONFIG_PROXY_JVM_ARGS = defineStringFlag(
+ "config-proxy-jvm-args", "",
+ "Sets jvm args for config proxy (added at the end of startup command, will override existing ones)",
+ "Takes effect on restart of Docker container",
+ ZONE_ID, APPLICATION_ID);
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/Condition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/Condition.java
index 46961fbd8cc..f73e0033773 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/Condition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/Condition.java
@@ -52,6 +52,16 @@ public interface Condition extends Predicate<FetchVector> {
public FetchVector.Dimension dimension() { return dimension; }
public List<String> values() { return values; }
public Optional<String> predicate() { return predicate; }
+
+ public Condition createAs(Condition.Type type) {
+ switch (type) {
+ case WHITELIST: return WhitelistCondition.create(this);
+ case BLACKLIST: return BlacklistCondition.create(this);
+ case RELATIONAL: return RelationalCondition.create(this);
+ }
+
+ throw new IllegalArgumentException("Unknown type '" + type + "'");
+ }
}
static Condition fromWire(WireCondition wireCondition) {
@@ -70,14 +80,14 @@ public interface Condition extends Predicate<FetchVector> {
params.withPredicate(wireCondition.predicate);
}
- switch (type) {
- case WHITELIST: return WhitelistCondition.create(params);
- case BLACKLIST: return BlacklistCondition.create(params);
- case RELATIONAL: return RelationalCondition.create(params);
- }
-
- throw new IllegalArgumentException("Unknown type '" + type + "'");
+ return params.createAs(type);
}
+ Condition.Type type();
+
+ FetchVector.Dimension dimension();
+
+ CreateParams toCreateParams();
+
WireCondition toWire();
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java b/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java
index c4079380a8c..eea61eb71ef 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java
@@ -14,9 +14,6 @@ import com.yahoo.vespa.flags.json.wire.WireRule;
import javax.annotation.concurrent.Immutable;
import java.io.InputStream;
import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
@@ -35,16 +32,16 @@ public class FlagData {
private final FetchVector defaultFetchVector;
public FlagData(FlagId id) {
- this(id, new FetchVector(), Collections.emptyList());
+ this(id, new FetchVector(), List.of());
}
public FlagData(FlagId id, FetchVector defaultFetchVector, Rule... rules) {
- this(id, defaultFetchVector, Arrays.asList(rules));
+ this(id, defaultFetchVector, List.of(rules));
}
public FlagData(FlagId id, FetchVector defaultFetchVector, List<Rule> rules) {
this.id = id;
- this.rules = Collections.unmodifiableList(new ArrayList<>(rules));
+ this.rules = List.copyOf(rules);
this.defaultFetchVector = defaultFetchVector;
}
@@ -52,6 +49,10 @@ public class FlagData {
return id;
}
+ public List<Rule> rules() {
+ return rules;
+ }
+
public boolean isEmpty() { return rules.isEmpty() && defaultFetchVector.isEmpty(); }
public Optional<RawFlag> resolve(FetchVector fetchVector) {
@@ -136,7 +137,7 @@ public class FlagData {
}
private static List<Rule> rulesFromWire(List<WireRule> wireRules) {
- if (wireRules == null) return Collections.emptyList();
+ if (wireRules == null) return List.of();
return wireRules.stream().map(Rule::fromWire).collect(Collectors.toList());
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java
index c2c76529833..136857bea5f 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java
@@ -27,6 +27,21 @@ public abstract class ListCondition implements Condition {
}
@Override
+ public Type type() {
+ return type;
+ }
+
+ @Override
+ public FetchVector.Dimension dimension() {
+ return dimension;
+ }
+
+ @Override
+ public CreateParams toCreateParams() {
+ return new CreateParams(dimension).withValues(values);
+ }
+
+ @Override
public boolean test(FetchVector fetchVector) {
boolean listContainsValue = fetchVector.getValue(dimension).map(values::contains).orElse(false);
return isWhitelist == listContainsValue;
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
index db2f0a3a197..4ed3e49029f 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
@@ -48,6 +48,21 @@ public class RelationalCondition implements Condition {
}
@Override
+ public Type type() {
+ return Type.RELATIONAL;
+ }
+
+ @Override
+ public FetchVector.Dimension dimension() {
+ return dimension;
+ }
+
+ @Override
+ public CreateParams toCreateParams() {
+ return new CreateParams(dimension).withPredicate(relationalPredicate.toWire());
+ }
+
+ @Override
public boolean test(FetchVector fetchVector) {
return fetchVector.getValue(dimension).map(predicate::test).orElse(false);
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java b/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java
index b7d60889419..0d50f1e283f 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java
@@ -6,7 +6,6 @@ import com.yahoo.vespa.flags.JsonNodeRawFlag;
import com.yahoo.vespa.flags.RawFlag;
import com.yahoo.vespa.flags.json.wire.WireRule;
-import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
@@ -20,18 +19,32 @@ public class Rule {
private final Optional<RawFlag> valueToApply;
public Rule(Optional<RawFlag> valueToApply, Condition... andConditions) {
- this(valueToApply, Arrays.asList(andConditions));
+ this(valueToApply, List.of(andConditions));
}
public Rule(Optional<RawFlag> valueToApply, List<Condition> andConditions) {
- this.andConditions = andConditions;
+ this.andConditions = List.copyOf(andConditions);
this.valueToApply = valueToApply;
}
+ public List<Condition> conditions() {
+ return andConditions;
+ }
+
+ /** Returns true if all the conditions satisfy the given fetch vector */
public boolean match(FetchVector fetchVector) {
return andConditions.stream().allMatch(condition -> condition.test(fetchVector));
}
+ /**
+ * Returns true if all the conditions on dimensions set in the fetch vector are satisfied.
+ * Conditions on dimensions not specified in the given fetch vector are ignored.
+ */
+ public boolean partialMatch(FetchVector fetchVector) {
+ return andConditions.stream()
+ .allMatch(condition -> !fetchVector.hasDimension(condition.dimension()) || condition.test(fetchVector));
+ }
+
public Optional<RawFlag> getValueToApply() {
return valueToApply;
}
diff --git a/fnet/src/vespa/fnet/transport_debugger.cpp b/fnet/src/vespa/fnet/transport_debugger.cpp
index 1878b921171..32179aba254 100644
--- a/fnet/src/vespa/fnet/transport_debugger.cpp
+++ b/fnet/src/vespa/fnet/transport_debugger.cpp
@@ -50,10 +50,10 @@ TransportDebugger::attach(std::initializer_list<std::reference_wrapper<FNET_Tran
}
void
-TransportDebugger::step()
+TransportDebugger::step(vespalib::duration time_passed)
{
REQUIRE(_meet);
- _time += 5ms; // pretend 5ms passes between each event loop iteration
+ _time += time_passed; // pretend time passes between each event loop iteration
REQUIRE(_meet->rendezvous(true)); // release transport threads
REQUIRE(_meet->rendezvous(true)); // capture transport threads
}
diff --git a/fnet/src/vespa/fnet/transport_debugger.h b/fnet/src/vespa/fnet/transport_debugger.h
index ed3738bb9fe..30b0c4dcb1c 100644
--- a/fnet/src/vespa/fnet/transport_debugger.h
+++ b/fnet/src/vespa/fnet/transport_debugger.h
@@ -21,10 +21,10 @@ namespace fnet {
* is used to start controlling event loop execution. While attached,
* calling the step function will run each transport thread event loop
* exactly once (in parallel), wait for pending dns resolving, wait
- * for pending tls handshake work and advance the current time with
- * 5ms (making sure 'time passes' and 'stuff happens' at a reasonable
- * relative rate). It is important to call detach to release the
- * transports before trying to shut them down.
+ * for pending tls handshake work and advance the current time (the
+ * default 5ms will make sure 'time passes' and 'stuff happens' at a
+ * reasonable relative rate). It is important to call detach to
+ * release the transports before trying to shut them down.
*
* Note that both server and client should be controlled by the same
* debugger when testing rpc. Using external services will result in
@@ -53,7 +53,15 @@ public:
return TimeTools::make_debug(vespalib::duration::zero(), [this]() noexcept { return time(); });
}
void attach(std::initializer_list<std::reference_wrapper<FNET_Transport> > list);
- void step();
+ void step(vespalib::duration time_passed = 5ms);
+ template <typename Pred>
+ bool step_until(Pred pred, vespalib::duration time_limit = 120s) {
+ auto start = time();
+ while (!pred() && ((time() - start) < time_limit)) {
+ step();
+ }
+ return pred();
+ }
void detach();
};
diff --git a/functions.cmake b/functions.cmake
index fe59cc3aaa9..3d192552be5 100644
--- a/functions.cmake
+++ b/functions.cmake
@@ -125,17 +125,13 @@ function(vespa_generate_config TARGET RELATIVE_CONFIG_DEF_PATH)
get_filename_component(CONFIG_NAME ${RELATIVE_CONFIG_DEF_PATH} NAME_WE)
endif()
- # configgen.jar takes the parent dir of the destination dir and the destination dirname as separate parameters
- # so it can produce the correct include statements within the generated .cpp-file (silent cry)
# Make config path an absolute_path
set(CONFIG_DEF_PATH ${CMAKE_CURRENT_LIST_DIR}/${RELATIVE_CONFIG_DEF_PATH})
- # Config destination is the
+ # Config destination is the current source directory (or parallel in build tree)
+ # configgen.jar takes the destination dirname as a property parameter
set(CONFIG_DEST_DIR ${CMAKE_CURRENT_BINARY_DIR})
- # Get parent of destination directory
- set(CONFIG_DEST_PARENT_DIR ${CONFIG_DEST_DIR}/..)
-
# Get destination dirname
get_filename_component(CONFIG_DEST_DIRNAME ${CMAKE_CURRENT_BINARY_DIR} NAME)
@@ -144,8 +140,8 @@ function(vespa_generate_config TARGET RELATIVE_CONFIG_DEF_PATH)
add_custom_command(
OUTPUT ${CONFIG_H_PATH} ${CONFIG_CPP_PATH}
- COMMAND java -Dconfig.spec=${CONFIG_DEF_PATH} -Dconfig.dest=${CONFIG_DEST_PARENT_DIR} -Dconfig.lang=cpp -Dconfig.subdir=${CONFIG_DEST_DIRNAME} -Dconfig.dumpTree=false -Xms64m -Xmx64m -jar ${PROJECT_SOURCE_DIR}/configgen/target/configgen.jar
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..
+ COMMAND java -Dconfig.spec=${CONFIG_DEF_PATH} -Dconfig.dest=${CONFIG_DEST_DIR} -Dconfig.lang=cpp -Dconfig.dumpTree=false -Xms64m -Xmx64m -jar ${PROJECT_SOURCE_DIR}/configgen/target/configgen.jar
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generating cpp config for ${CONFIG_NAME} in ${CMAKE_CURRENT_SOURCE_DIR}"
MAIN_DEPENDENCY ${CONFIG_DEF_PATH}
)
@@ -157,11 +153,6 @@ function(vespa_generate_config TARGET RELATIVE_CONFIG_DEF_PATH)
# Add generated to sources for target
target_sources(${TARGET} PRIVATE ${CONFIG_H_PATH} ${CONFIG_CPP_PATH})
- # Needed to be able to do a #include <CONFIG_DEST_DIRNAME/config-<name>.h> for this target
- # This is used within the generated config-<name>.cpp
- # TODO: Should modify configgen to use #include <vespa/<modulename>/config-<name>.h> instead
- target_include_directories(${TARGET} PRIVATE ${CONFIG_DEST_PARENT_DIR})
-
# Needed to be able to do a #include <config-<name>.h> for this target
# This is used within some unit tests
target_include_directories(${TARGET} PRIVATE ${CONFIG_DEST_DIR})
diff --git a/linguistics/abi-spec.json b/linguistics/abi-spec.json
index dc7450678c5..dbf4842ea1a 100644
--- a/linguistics/abi-spec.json
+++ b/linguistics/abi-spec.json
@@ -918,16 +918,5 @@
"public java.lang.String normalize(java.lang.String)"
],
"fields": []
- },
- "com.yahoo.language.sentencepiece.Trie": {
- "superClass": "java.lang.Object",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>()"
- ],
- "fields": []
}
} \ No newline at end of file
diff --git a/linguistics/src/main/java/com/yahoo/language/Linguistics.java b/linguistics/src/main/java/com/yahoo/language/Linguistics.java
index 64ef8762be8..8af0fcd42cb 100644
--- a/linguistics/src/main/java/com/yahoo/language/Linguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/Linguistics.java
@@ -88,4 +88,5 @@ public interface Linguistics {
/** Check if another instance is equivalent to this one */
boolean equals(Linguistics other);
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/detect/Detection.java b/linguistics/src/main/java/com/yahoo/language/detect/Detection.java
index 4b816335154..127777db4d2 100644
--- a/linguistics/src/main/java/com/yahoo/language/detect/Detection.java
+++ b/linguistics/src/main/java/com/yahoo/language/detect/Detection.java
@@ -44,4 +44,5 @@ public class Detection {
public boolean isLocal() {
return local;
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/detect/DetectionException.java b/linguistics/src/main/java/com/yahoo/language/detect/DetectionException.java
index a43dc0cb537..5fceabefae3 100644
--- a/linguistics/src/main/java/com/yahoo/language/detect/DetectionException.java
+++ b/linguistics/src/main/java/com/yahoo/language/detect/DetectionException.java
@@ -4,11 +4,12 @@ package com.yahoo.language.detect;
/**
* Exception that is thrown when detection fails.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
public final class DetectionException extends RuntimeException {
public DetectionException(String str) {
super(str);
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/detect/Hint.java b/linguistics/src/main/java/com/yahoo/language/detect/Hint.java
index 50291c922e8..b6bf4403cf3 100644
--- a/linguistics/src/main/java/com/yahoo/language/detect/Hint.java
+++ b/linguistics/src/main/java/com/yahoo/language/detect/Hint.java
@@ -2,9 +2,9 @@
package com.yahoo.language.detect;
/**
- * <p>A hint that can be given to a {@link Detector}.</p>
+ * A hint that can be given to a {@link Detector}.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
*/
public class Hint {
@@ -35,4 +35,5 @@ public class Hint {
public static Hint newInstance(String market, String country) {
return new Hint(market, country);
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
index 64888dba183..0edd48f5ee3 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java
@@ -49,4 +49,5 @@ public class OpenNlpLinguistics extends SimpleLinguistics {
@Override
public boolean equals(Linguistics other) { return (other instanceof OpenNlpLinguistics); }
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
index 73518876c3f..603905bead8 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpTokenizer.java
@@ -19,7 +19,6 @@ import opennlp.tools.stemmer.Stemmer;
import opennlp.tools.stemmer.snowball.SnowballStemmer;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
/**
@@ -52,7 +51,7 @@ public class OpenNlpTokenizer implements Tokenizer {
@Override
public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) {
- if (input.isEmpty()) return Collections.emptyList();
+ if (input.isEmpty()) return List.of();
Stemmer stemmer = stemmerFor(language, stemMode);
if (stemmer == null) return simpleTokenizer.tokenize(input, language, stemMode, removeAccents);
diff --git a/linguistics/src/main/java/com/yahoo/language/opennlp/OptimaizeDetector.java b/linguistics/src/main/java/com/yahoo/language/opennlp/OptimaizeDetector.java
index bf07c91ba44..9bf1281e015 100644
--- a/linguistics/src/main/java/com/yahoo/language/opennlp/OptimaizeDetector.java
+++ b/linguistics/src/main/java/com/yahoo/language/opennlp/OptimaizeDetector.java
@@ -32,10 +32,10 @@ import java.util.logging.Level;
*/
public class OptimaizeDetector implements Detector {
- static private Object initGuard = new Object();
- static private TextObjectFactory textObjectFactory = null;
- static private LanguageDetector languageDetector = null;
- static private final Logger log = Logger.getLogger(OptimaizeDetector.class.getName());
+ private static final Object initGuard = new Object();
+ private static TextObjectFactory textObjectFactory = null;
+ private static LanguageDetector languageDetector = null;
+ private static final Logger log = Logger.getLogger(OptimaizeDetector.class.getName());
static private void initOptimaize() {
synchronized (initGuard) {
@@ -60,7 +60,7 @@ public class OptimaizeDetector implements Detector {
}
}
- private SimpleDetector simpleDetector = new SimpleDetector();
+ private final SimpleDetector simpleDetector = new SimpleDetector();
public OptimaizeDetector() {
initOptimaize();
diff --git a/linguistics/src/main/java/com/yahoo/language/process/ProcessingException.java b/linguistics/src/main/java/com/yahoo/language/process/ProcessingException.java
index 752992f5a26..99576240635 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/ProcessingException.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/ProcessingException.java
@@ -15,4 +15,5 @@ public class ProcessingException extends RuntimeException {
public ProcessingException(String message, Throwable cause) {
super(message, cause);
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/StemList.java b/linguistics/src/main/java/com/yahoo/language/process/StemList.java
index a38a2e51cb6..d5451e7660d 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/StemList.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/StemList.java
@@ -3,6 +3,7 @@ package com.yahoo.language.process;
import java.util.AbstractList;
import java.util.ArrayList;
+import java.util.List;
/**
* A list of strings which does not allow for duplicate elements.
@@ -10,7 +11,8 @@ import java.util.ArrayList;
* @author steinar
*/
public class StemList extends AbstractList<String> {
- private final ArrayList<String> stems;
+
+ private final List<String> stems;
public StemList() {
this(new String[0]);
diff --git a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
index 628f6910c9e..4adb5de62da 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
@@ -10,16 +10,10 @@ package com.yahoo.language.process;
*/
public enum StemMode {
- NONE(0),
- DEFAULT(1),
- ALL(2),
- SHORTEST(4),
- BEST(5);
-
- private final int value;
-
- StemMode(int value) {
- this.value = value;
- }
+ NONE,
+ DEFAULT,
+ ALL,
+ SHORTEST,
+ BEST;
}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/Stemmer.java b/linguistics/src/main/java/com/yahoo/language/process/Stemmer.java
index a2d0d0a84c9..1c6180c1f59 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/Stemmer.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/Stemmer.java
@@ -18,7 +18,7 @@ public interface Stemmer {
* @param input the string to stem.
* @param mode the stemming mode
* @param language the language to use for stemming
- * @return list of possible stems. Empty if none.
+ * @return a list of possible stems. Empty if none.
* @throws ProcessingException thrown if there is an exception stemming this input
*/
List<StemList> stem(String input, StemMode mode, Language language);
diff --git a/linguistics/src/main/java/com/yahoo/language/process/StemmerImpl.java b/linguistics/src/main/java/com/yahoo/language/process/StemmerImpl.java
index f401ddaba99..dd830570e88 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/StemmerImpl.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/StemmerImpl.java
@@ -43,4 +43,5 @@ public class StemmerImpl implements Stemmer {
}
}
}
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/TokenScript.java b/linguistics/src/main/java/com/yahoo/language/process/TokenScript.java
index efe4073d97e..ff87b9b128b 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/TokenScript.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/TokenScript.java
@@ -5,7 +5,7 @@ package com.yahoo.language.process;
* List of token scripts (e.g. latin, japanese, chinese, etc.) which may warrant different
* linguistics treatment.
*
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author Mathias Mølster Lidal
*/
public enum TokenScript {
diff --git a/linguistics/src/main/java/com/yahoo/language/sentencepiece/Trie.java b/linguistics/src/main/java/com/yahoo/language/sentencepiece/Trie.java
index 9abed89e7a2..8e7c2db2ed3 100644
--- a/linguistics/src/main/java/com/yahoo/language/sentencepiece/Trie.java
+++ b/linguistics/src/main/java/com/yahoo/language/sentencepiece/Trie.java
@@ -9,7 +9,7 @@ import java.util.Map;
*
* @author bratseth
*/
-public class Trie {
+class Trie {
final Node root = new Node();
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleDetector.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleDetector.java
index 3de0eb3e997..e15c6257414 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleDetector.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleDetector.java
@@ -70,15 +70,6 @@ public class SimpleDetector implements Detector {
block == Character.UnicodeBlock.HANGUL_COMPATIBILITY_JAMO) {
return Language.KOREAN;
}
- // katakana phonetic extensions.
- if (0x31f0 <= c && c <= 0x31ff) {
- // See http://www.unicode.org/charts/PDF/U31F0.pdf
- // This is a special case because This range of character
- // codes is classified as unasigned in
- // Character.UnicodeBlock. But clearly it is assigned as
- // per above.
- return Language.JAPANESE;
- }
if (0x31f0 <= c && c <= 0x31ff || // these are standard character blocks for japanese characters.
block == Character.UnicodeBlock.HIRAGANA ||
block == Character.UnicodeBlock.KATAKANA ||
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
index 026bc8add25..b319c343510 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleLinguistics.java
@@ -72,4 +72,5 @@ public class SimpleLinguistics implements Linguistics {
@Override
public boolean equals(Linguistics other) { return (other instanceof SimpleLinguistics); }
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
index d7eb8a72ed8..b5c11b13c67 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleTokenType.java
@@ -65,4 +65,5 @@ public class SimpleTokenType {
}
throw new UnsupportedOperationException(String.valueOf(Character.getType(codePoint)));
}
+
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsException.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsException.java
deleted file mode 100644
index bdfbc65882f..00000000000
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsException.java
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-package ai.vespa.metricsproxy.http.application;
-
-/**
- * @author gjoranv
- */
-class ApplicationMetricsException extends RuntimeException {
-
- ApplicationMetricsException(String message, Throwable cause) {
- super(message, cause);
- }
-
-}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java
index 87a0d7d8ad9..2603b9025c2 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetriever.java
@@ -63,7 +63,6 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru
taskTimeout = timeout(clients.size());
stopped = false;
consumerSet = new HashSet<>();
- consumerSet.add(defaultMetricsConsumerId);
httpClient.start();
pollThread = new Thread(this, "metrics-poller");
pollThread.setDaemon(true);
@@ -104,17 +103,17 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru
pollThread.notifyAll();
}
try {
+ pollThread.join();
+ } catch (InterruptedException e) {}
+ try {
httpClient.close();
} catch (IOException e) {
log.warning("Failed closing httpclient: " + e);
}
- try {
- pollThread.join();
- } catch (InterruptedException e) {}
super.deconstruct();
}
- public Map<Node, List<MetricsPacket>> getMetrics() {
+ Map<Node, List<MetricsPacket>> getMetrics() {
return getMetrics(defaultMetricsConsumerId);
}
@@ -141,7 +140,8 @@ public class ApplicationMetricsRetriever extends AbstractComponent implements Ru
}
long before = pollCount;
pollThread.notifyAll();
- while (pollCount == before) {
+ while (pollCount <= before + 1) {
+ pollThread.notifyAll();
pollThread.wait();
}
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/ServiceHealthGatherer.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/ServiceHealthGatherer.java
index 4a6eeae474a..f9106b60ab2 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/ServiceHealthGatherer.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/ServiceHealthGatherer.java
@@ -1,6 +1,7 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.metricsproxy.node;
+import ai.vespa.metricsproxy.metric.HealthMetric;
import ai.vespa.metricsproxy.metric.model.ConsumerId;
import ai.vespa.metricsproxy.metric.model.DimensionId;
import ai.vespa.metricsproxy.metric.model.MetricsPacket;
@@ -19,15 +20,16 @@ public class ServiceHealthGatherer {
protected static List<MetricsPacket.Builder> gatherServiceHealthMetrics(VespaServices vespaServices) {
return vespaServices.getVespaServices()
.stream()
- .map(service ->
- new MetricsPacket.Builder(service.getMonitoringName())
+ .map(service -> {
+ HealthMetric healt = service.getHealth();
+ return new MetricsPacket.Builder(service.getMonitoringName())
.timestamp(Instant.now().getEpochSecond())
- .statusMessage(service.getHealth().getStatus().status)
- .statusCode(service.getHealth().getStatus().code)
+ .statusMessage(healt.getStatus().status)
+ .statusCode(healt.getStatus().code)
.putDimension(DimensionId.toDimensionId("instance"), service.getInstanceName())
.putDimension(DimensionId.toDimensionId("metrictype"), "health")
- .addConsumers(Set.of(ConsumerId.toConsumerId("Vespa")))
- )
+ .addConsumers(Set.of(ConsumerId.toConsumerId("Vespa")));
+ })
.collect(Collectors.toList());
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
index d07a52f42bd..3d834106ebc 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
@@ -24,7 +24,9 @@ import java.util.logging.Logger;
public class ConfigSentinelClient extends AbstractComponent {
private final static Logger log = Logger.getLogger(ConfigSentinelClient.class.getName());
+ private static final Spec SPEC = new Spec("localhost", 19097);
private final Supervisor supervisor;
+ private Target connection = null;
@Inject
public ConfigSentinelClient() {
@@ -33,6 +35,12 @@ public class ConfigSentinelClient extends AbstractComponent {
@Override
public void deconstruct() {
+ synchronized (this) {
+ if (connection != null) {
+ connection.close();
+ connection = null;
+ }
+ }
supervisor.transport().shutdown().join();
super.deconstruct();
}
@@ -126,7 +134,7 @@ public class ConfigSentinelClient extends AbstractComponent {
}
for (int i = 1; i < parts.length; i++) {
- String keyValue[] = parts[i].split("=");
+ String [] keyValue = parts[i].split("=");
String key = keyValue[0];
String value = keyValue[1];
@@ -155,26 +163,24 @@ public class ConfigSentinelClient extends AbstractComponent {
String sentinelLs() {
String servicelist = "";
- int rpcPort = 19097;
- Spec spec = new Spec("localhost", rpcPort);
- Target connection = supervisor.connect(spec);
- try {
- if (connection.isValid()) {
- Request req = new Request("sentinel.ls");
- connection.invokeSync(req, 5.0);
- if (req.errorCode() == ErrorCode.NONE &&
- req.checkReturnTypes("s"))
- {
- servicelist = req.returnValues().get(0).asString();
- } else {
- log.log(Level.WARNING, "Bad answer to RPC request: " + req.errorMessage());
- }
+ synchronized (this) {
+ if (connection == null || ! connection.isValid()) {
+ connection = supervisor.connect(SPEC);
+ }
+ }
+ if (connection.isValid()) {
+ Request req = new Request("sentinel.ls");
+ connection.invokeSync(req, 5.0);
+ if (req.errorCode() == ErrorCode.NONE &&
+ req.checkReturnTypes("s"))
+ {
+ servicelist = req.returnValues().get(0).asString();
} else {
- log.log(Level.WARNING, "Could not connect to sentinel at: "+spec);
+ log.log(Level.WARNING, "Bad answer to RPC request: " + req.errorMessage());
}
- return servicelist;
- } finally {
- connection.close();
+ } else {
+ log.log(Level.WARNING, "Could not connect to sentinel at: " + SPEC);
}
+ return servicelist;
}
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteHealthMetricFetcher.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteHealthMetricFetcher.java
index 09087c32914..59db14670aa 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteHealthMetricFetcher.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteHealthMetricFetcher.java
@@ -32,8 +32,8 @@ public class RemoteHealthMetricFetcher extends HttpMetricFetcher {
* Connect to remote service over http and fetch metrics
*/
public HealthMetric getHealth(int fetchCount) {
- try {
- return createHealthMetrics(getJson(), fetchCount);
+ try (InputStream stream = getJson()) {
+ return createHealthMetrics(stream, fetchCount);
} catch (IOException | InterruptedException | ExecutionException e) {
logMessageNoResponse(errMsgNoResponse(e), fetchCount);
byte [] empty = {'{','}'};
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java
index 8acaa0fb58e..3ee1e05c263 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/RemoteMetricsFetcher.java
@@ -1,8 +1,6 @@
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.metricsproxy.service;
-import ai.vespa.metricsproxy.metric.Metrics;
-
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.ExecutionException;
@@ -24,8 +22,8 @@ public class RemoteMetricsFetcher extends HttpMetricFetcher {
* Connect to remote service over http and fetch metrics
*/
public void getMetrics(MetricsParser.Consumer consumer, int fetchCount) {
- try {
- createMetrics(getJson(), consumer, fetchCount);
+ try (InputStream stream = getJson()) {
+ createMetrics(stream, consumer, fetchCount);
} catch (IOException | InterruptedException | ExecutionException e) {
}
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java
index 5f1c045781c..d568e83c9ad 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsHandlerTest.java
@@ -85,6 +85,7 @@ public class ApplicationMetricsHandlerTest {
ApplicationMetricsHandler handler = new ApplicationMetricsHandler(Executors.newSingleThreadExecutor(),
applicationMetricsRetriever,
getMetricsConsumers());
+ applicationMetricsRetriever.getMetrics(defaultMetricsConsumerId);
applicationMetricsRetriever.getMetrics(ConsumerId.toConsumerId(CUSTOM_CONSUMER));
applicationMetricsRetriever.startPollAndWait();
testDriver = new RequestHandlerTestDriver(handler);
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetrieverTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetrieverTest.java
index 09cc355d292..c98b962f671 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetrieverTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/application/ApplicationMetricsRetrieverTest.java
@@ -53,6 +53,7 @@ public class ApplicationMetricsRetrieverTest {
.willReturn(aResponse().withBody(RESPONSE)));
ApplicationMetricsRetriever retriever = new ApplicationMetricsRetriever(config);
+ retriever.getMetrics();
retriever.startPollAndWait();
var metricsByNode = retriever.getMetrics();
assertEquals(1, metricsByNode.size());
@@ -71,6 +72,7 @@ public class ApplicationMetricsRetrieverTest {
.willReturn(aResponse().withBody(RESPONSE)));
ApplicationMetricsRetriever retriever = new ApplicationMetricsRetriever(config);
+ retriever.getMetrics();
retriever.startPollAndWait();
var metricsByNode = retriever.getMetrics();
assertEquals(2, metricsByNode.size());
@@ -100,6 +102,7 @@ public class ApplicationMetricsRetrieverTest {
.willReturn(aResponse().withBody(RESPONSE)));
ApplicationMetricsRetriever retriever = new ApplicationMetricsRetriever(config);
+ retriever.getMetrics();
retriever.startPollAndWait();
var metricsByNode = retriever.getMetrics();
assertEquals(2, metricsByNode.size());
@@ -134,6 +137,7 @@ public class ApplicationMetricsRetrieverTest {
.withFixedDelay(10)));
ApplicationMetricsRetriever retriever = new ApplicationMetricsRetriever(config);
+ retriever.getMetrics();
retriever.setTaskTimeout(Duration.ofMillis(1));
retriever.startPollAndWait();
assertTrue(retriever.getMetrics().get(node).isEmpty());
diff --git a/model-integration/pom.xml b/model-integration/pom.xml
index dc3154c5c41..62014ef174a 100644
--- a/model-integration/pom.xml
+++ b/model-integration/pom.xml
@@ -106,29 +106,4 @@
</plugins>
</build>
- <profiles>
- <!-- Exclude TF JNI when building for rhel6, which needs a special, natively installed variant -->
- <profile>
- <id>rhel6</id>
- <activation>
- <property>
- <name>target.env</name>
- <value>rhel6</value>
- </property>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.tensorflow</groupId>
- <artifactId>tensorflow</artifactId>
- <exclusions>
- <exclusion>
- <groupId>org.tensorflow</groupId>
- <artifactId>libtensorflow_jni</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- </dependencies>
- </profile>
- </profiles>
-
</project>
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApi.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApi.java
index 90768facf34..d8d58aee8c2 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApi.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApi.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.configserver;
+import java.net.URI;
import java.time.Duration;
import java.util.Optional;
@@ -10,41 +11,65 @@ import java.util.Optional;
* @author freva
*/
public interface ConfigServerApi extends AutoCloseable {
- class Params {
- private Optional<Duration> connectionTimeout;
+
+ /**
+ * The result of sending a request to a config server results in a jackson response or exception. If a response
+ * is returned, an instance of this interface is conferred to discard the result and try the next config server,
+ * unless it was the last attempt.
+ *
+ * @param <T> the type of the returned jackson response
+ */
+ interface RetryPolicy<T> {
+ boolean tryNextConfigServer(URI configServerEndpoint, T response);
+ }
+
+ class Params<T> {
+ private Optional<Duration> connectionTimeout = Optional.empty();
+
+ private RetryPolicy<T> retryPolicy = (configServerEndpoint, response) -> false;
+
+ public Params() {}
/** Set the socket connect and read timeouts. */
- public Params setConnectionTimeout(Duration connectionTimeout) {
+ public Params<T> setConnectionTimeout(Duration connectionTimeout) {
this.connectionTimeout = Optional.of(connectionTimeout);
return this;
}
public Optional<Duration> getConnectionTimeout() { return connectionTimeout; }
+
+ /** Set the retry policy to use against the config servers. */
+ public Params<T> setRetryPolicy(RetryPolicy<T> retryPolicy) {
+ this.retryPolicy = retryPolicy;
+ return this;
+ }
+
+ public RetryPolicy<T> getRetryPolicy() { return retryPolicy; }
}
- <T> T get(String path, Class<T> wantedReturnType, Params params);
+ <T> T get(String path, Class<T> wantedReturnType, Params<T> params);
default <T> T get(String path, Class<T> wantedReturnType) {
- return get(path, wantedReturnType, null);
+ return get(path, wantedReturnType, new Params<>());
}
- <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params params);
+ <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params<T> params);
default <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) {
- return post(path, bodyJsonPojo, wantedReturnType, null);
+ return post(path, bodyJsonPojo, wantedReturnType, new Params<>());
}
- <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType, Params params);
+ <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType, Params<T> params);
default <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) {
- return put(path, bodyJsonPojo, wantedReturnType, null);
+ return put(path, bodyJsonPojo, wantedReturnType, new Params<>());
}
- <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params params);
+ <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params<T> params);
default <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) {
- return patch(path, bodyJsonPojo, wantedReturnType, null);
+ return patch(path, bodyJsonPojo, wantedReturnType, new Params<>());
}
- <T> T delete(String path, Class<T> wantedReturnType, Params params);
+ <T> T delete(String path, Class<T> wantedReturnType, Params<T> params);
default <T> T delete(String path, Class<T> wantedReturnType) {
- return delete(path, wantedReturnType, null);
+ return delete(path, wantedReturnType, new Params<>());
}
/** Close the underlying HTTP client and any threads this class might have started. */
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
index 67dcb6744ce..c41528c64ec 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java
@@ -106,8 +106,10 @@ public class ConfigServerApiImpl implements ConfigServerApi {
HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException;
}
- private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) {
+ private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType, Params<T> params) {
+ T lastResult = null;
Exception lastException = null;
+
for (URI configServer : configServers) {
var request = Exceptions.uncheck(() -> requestFactory.createRequest(configServer));
try (CloseableHttpResponse response = client.execute(request)) {
@@ -115,15 +117,26 @@ public class ConfigServerApiImpl implements ConfigServerApi {
HttpException.handleStatusCode(response.getStatusLine().getStatusCode(),
request.getMethod() + " " + request.getURI() +
" failed with response '" + responseBody + "'");
+
+ T result;
try {
- return mapper.readValue(responseBody, wantedReturnType);
+ result = mapper.readValue(responseBody, wantedReturnType);
} catch (IOException e) {
throw new UncheckedIOException("Failed parse response from config server", e);
}
+
+ if (params.getRetryPolicy().tryNextConfigServer(configServer, result)) {
+ lastResult = result;
+ lastException = null;
+ } else {
+ return result;
+ }
} catch (HttpException e) {
if (!e.isRetryable()) throw e;
+ lastResult = null;
lastException = e;
} catch (Exception e) {
+ lastResult = null;
lastException = e;
if (configServers.size() == 1) break;
@@ -136,6 +149,11 @@ public class ConfigServerApiImpl implements ConfigServerApi {
}
}
+ if (lastResult != null) {
+ logger.warning("Giving up after trying all config servers: returning result: " + lastResult);
+ return lastResult;
+ }
+
String prefix = configServers.size() == 1 ?
"Request against " + configServers.get(0) + " failed: " :
"All requests against the config servers (" + configServers + ") failed, last as follows: ";
@@ -143,8 +161,8 @@ public class ConfigServerApiImpl implements ConfigServerApi {
}
@Override
- public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType, Params paramsOrNull) {
- Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(paramsOrNull);
+ public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType, Params<T> params) {
+ Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(params);
return tryAllConfigServers(configServer -> {
HttpPut put = new HttpPut(configServer.resolve(path));
requestConfigOverride.ifPresent(put::setConfig);
@@ -153,51 +171,51 @@ public class ConfigServerApiImpl implements ConfigServerApi {
put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get())));
}
return put;
- }, wantedReturnType);
+ }, wantedReturnType, params);
}
@Override
- public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params paramsOrNull) {
- Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(paramsOrNull);
+ public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params<T> params) {
+ Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(params);
return tryAllConfigServers(configServer -> {
HttpPatch patch = new HttpPatch(configServer.resolve(path));
requestConfigOverride.ifPresent(patch::setConfig);
setContentTypeToApplicationJson(patch);
patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo)));
return patch;
- }, wantedReturnType);
+ }, wantedReturnType, params);
}
@Override
- public <T> T delete(String path, Class<T> wantedReturnType, Params paramsOrNull) {
- Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(paramsOrNull);
+ public <T> T delete(String path, Class<T> wantedReturnType, Params<T> params) {
+ Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(params);
return tryAllConfigServers(configServer -> {
HttpDelete delete = new HttpDelete(configServer.resolve(path));
requestConfigOverride.ifPresent(delete::setConfig);
return delete;
- }, wantedReturnType);
+ }, wantedReturnType, params);
}
@Override
- public <T> T get(String path, Class<T> wantedReturnType, Params paramsOrNull) {
- Optional<RequestConfig> requestConfig = getRequestConfigOverride(paramsOrNull);
+ public <T> T get(String path, Class<T> wantedReturnType, Params<T> params) {
+ Optional<RequestConfig> requestConfig = getRequestConfigOverride(params);
return tryAllConfigServers(configServer -> {
HttpGet get = new HttpGet(configServer.resolve(path));
requestConfig.ifPresent(get::setConfig);
return get;
- }, wantedReturnType);
+ }, wantedReturnType, params);
}
@Override
- public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params paramsOrNull) {
- Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(paramsOrNull);
+ public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType, Params<T> params) {
+ Optional<RequestConfig> requestConfigOverride = getRequestConfigOverride(params);
return tryAllConfigServers(configServer -> {
HttpPost post = new HttpPost(configServer.resolve(path));
requestConfigOverride.ifPresent(post::setConfig);
setContentTypeToApplicationJson(post);
post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo)));
return post;
- }, wantedReturnType);
+ }, wantedReturnType, params);
}
@Override
@@ -235,12 +253,12 @@ public class ConfigServerApiImpl implements ConfigServerApi {
.build();
}
- private static Optional<RequestConfig> getRequestConfigOverride(Params paramsOrNull) {
- if (paramsOrNull == null) return Optional.empty();
+ private static <T> Optional<RequestConfig> getRequestConfigOverride(Params<T> params) {
+ if (params.getConnectionTimeout().isEmpty()) return Optional.empty();
RequestConfig.Builder builder = RequestConfig.copy(DEFAULT_REQUEST_CONFIG);
- paramsOrNull.getConnectionTimeout().ifPresent(connectionTimeout -> {
+ params.getConnectionTimeout().ifPresent(connectionTimeout -> {
builder.setConnectTimeout((int) connectionTimeout.toMillis());
builder.setSocketTimeout((int) connectionTimeout.toMillis());
});
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
index a3cc7042c47..8b74dd35f96 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java
@@ -6,11 +6,14 @@ import com.yahoo.vespa.hosted.node.admin.configserver.ConnectionException;
import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult;
+import com.yahoo.vespa.orchestrator.restapi.wire.HostStateChangeDenialReason;
import com.yahoo.vespa.orchestrator.restapi.wire.UpdateHostResponse;
+import java.net.URI;
import java.time.Duration;
import java.util.List;
import java.util.Optional;
+import java.util.logging.Logger;
/**
* @author stiankri
@@ -18,6 +21,8 @@ import java.util.Optional;
* @author dybis
*/
public class OrchestratorImpl implements Orchestrator {
+ private static final Logger logger = Logger.getLogger(OrchestratorImpl.class.getName());
+
// The server-side Orchestrator has an internal timeout of 10s.
//
// Note: A 409 has been observed to be returned after 33s in a case possibly involving
@@ -44,7 +49,10 @@ public class OrchestratorImpl implements Orchestrator {
public void suspend(final String hostName) {
UpdateHostResponse response;
try {
- var params = new ConfigServerApi.Params().setConnectionTimeout(CONNECTION_TIMEOUT);
+ var params = new ConfigServerApi
+ .Params<UpdateHostResponse>()
+ .setConnectionTimeout(CONNECTION_TIMEOUT)
+ .setRetryPolicy(createRetryPolicyForSuspend());
response = configServerApi.put(getSuspendPath(hostName), Optional.empty(), UpdateHostResponse.class, params);
} catch (HttpException.NotFoundException n) {
throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found");
@@ -61,11 +69,35 @@ public class OrchestratorImpl implements Orchestrator {
});
}
+ private static ConfigServerApi.RetryPolicy<UpdateHostResponse> createRetryPolicyForSuspend() {
+ return new ConfigServerApi.RetryPolicy<UpdateHostResponse>() {
+ @Override
+ public boolean tryNextConfigServer(URI configServerEndpoint, UpdateHostResponse response) {
+ HostStateChangeDenialReason reason = response.reason();
+ if (reason == null) {
+ return false;
+ }
+
+ // The config server has likely just bootstrapped, so try the next.
+ if ("unknown-service-status".equals(reason.constraintName())) {
+ // Warn for now and until this feature has proven to work well
+ logger.warning("Config server at [" + configServerEndpoint +
+ "] failed with transient error (will try next): " +
+ reason.message());
+
+ return true;
+ }
+
+ return false;
+ }
+ };
+ }
+
@Override
public void suspend(String parentHostName, List<String> hostNames) {
final BatchOperationResult batchOperationResult;
try {
- var params = new ConfigServerApi.Params().setConnectionTimeout(CONNECTION_TIMEOUT);
+ var params = new ConfigServerApi.Params<BatchOperationResult>().setConnectionTimeout(CONNECTION_TIMEOUT);
String hostnames = String.join("&hostname=", hostNames);
String url = String.format("%s/%s?hostname=%s", ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
parentHostName, hostnames);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
index 66cc5e64b67..47ee9ba3d20 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
@@ -109,7 +109,9 @@ public class VespaServiceDumperImpl implements VespaServiceDumper {
for (String artifactType : artifactTypes) {
ArtifactProducer producer = artifactProducers.get(artifactType);
if (producer == null) {
- handleFailure(context, request, startedAt, "No artifact producer exists for '" + artifactType + "'");
+ String supportedValues = String.join(",", artifactProducers.keySet());
+ handleFailure(context, request, startedAt, "No artifact producer exists for '" + artifactType + "'. " +
+ "Following values are allowed: " + supportedValues);
return;
}
context.log(log, "Producing artifact of type '" + artifactType + "'");
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImplTest.java
index a11fdc903e7..bccf34e87ab 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImplTest.java
@@ -116,7 +116,7 @@ public class ConfigServerApiImplTest {
public void testBasicSuccessWithCustomTimeouts() {
mockReturnCode = TIMEOUT_RETURN_CODE;
- var params = new ConfigServerApi.Params();
+ var params = new ConfigServerApi.Params<TestPojo>();
params.setConnectionTimeout(Duration.ofSeconds(3));
try {
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
index d183e863500..208e12690ff 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java
@@ -4,9 +4,7 @@ package com.yahoo.vespa.orchestrator.policy;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.applicationmodel.ClusterId;
import com.yahoo.vespa.applicationmodel.ServiceType;
-import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.orchestrator.model.ClusterApi;
import com.yahoo.vespa.orchestrator.model.VespaModelUtil;
@@ -17,12 +15,9 @@ import static com.yahoo.vespa.orchestrator.policy.HostedVespaPolicy.ENOUGH_SERVI
public class HostedVespaClusterPolicy implements ClusterPolicy {
- private final BooleanFlag groupSuspensionInPermanentSuspendFlag;
private final Zone zone;
public HostedVespaClusterPolicy(FlagSource flagSource, Zone zone) {
- // Note that the "group" in this flag refers to hierarchical groups of a content cluster.
- this.groupSuspensionInPermanentSuspendFlag = Flags.GROUP_PERMANENT_SUSPENSION.bindTo(flagSource);
this.zone = zone;
}
@@ -32,7 +27,7 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
return SuspensionReasons.nothingNoteworthy();
}
- int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, true).asPercentage();
+ int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi).asPercentage();
if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) {
return SuspensionReasons.nothingNoteworthy();
}
@@ -63,9 +58,7 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
return;
}
- boolean enableContentGroupSuspension = groupSuspensionInPermanentSuspendFlag.value();
-
- int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi, enableContentGroupSuspension)
+ int percentageOfServicesAllowedToBeDown = getConcurrentSuspensionLimit(clusterApi)
.asPercentage();
if (clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown() <= percentageOfServicesAllowedToBeDown) {
return;
@@ -81,116 +74,85 @@ public class HostedVespaClusterPolicy implements ClusterPolicy {
}
// Non-private for testing purposes
- ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi, boolean enableContentGroupSuspension) {
- if (enableContentGroupSuspension) {
- // Possible service clusters on a node as of 2021-01-22:
- //
- // CLUSTER ID SERVICE TYPE HEALTH ASSOCIATION
- // 1 CCN-controllers container-clustercontrollers Slobrok 1, 3, or 6 in content cluster
- // 2 CCN distributor Slobrok content cluster
- // 3 CCN storagenode Slobrok content cluster
- // 4 CCN searchnode Slobrok content cluster
- // 5 CCN transactionlogserver not checked content cluster
- // 6 JCCN container Slobrok jdisc container cluster
- // 7 admin slobrok not checked 1-3 in jdisc container cluster
- // 8 metrics metricsproxy-container Slobrok application
- // 9 admin logd not checked application
- // 10 admin config-sentinel not checked application
- // 11 admin configproxy not checked application
- // 12 admin logforwarder not checked application
- // 13 controller controller state/v1 controllers
- // 14 zone-config-servers configserver state/v1 config servers
- // 15 controller-host hostadmin state/v1 controller hosts
- // 16 configserver-host hostadmin state/v1 config server hosts
- // 17 tenant-host hostadmin state/v1 tenant hosts
- // 18 proxy-host hostadmin state/v1 proxy hosts
- //
- // CCN refers to the content cluster's name, as specified in services.xml.
- // JCCN refers to the jdisc container cluster's name, as specified in services.xml.
- //
- // For instance a content node will have 2-5 and 8-12 and possibly 1, while a combined
- // cluster node may have all 1-12.
- //
- // The services on a node can be categorized into these main types, ref association column above:
- // A content
- // B container
- // C tenant host
- // D config server
- // E config server host
- // F controller
- // G controller host
- // H proxy (same as B)
- // I proxy host
-
- if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
-
- if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER)
- .contains(clusterApi.serviceType())) {
- // Delegate to the cluster controller
- return ConcurrentSuspensionLimitForCluster.ALL_NODES;
- }
-
- if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) {
- return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
- }
-
- if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) {
- if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
-
- return ConcurrentSuspensionLimitForCluster.ALL_NODES;
- } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) {
- return ConcurrentSuspensionLimitForCluster.ALL_NODES;
- }
-
- if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
-
- if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) {
- if (Set.of(ClusterId.CONFIG_SERVER_HOST, ClusterId.CONTROLLER_HOST).contains(clusterApi.clusterId())) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
+ ConcurrentSuspensionLimitForCluster getConcurrentSuspensionLimit(ClusterApi clusterApi) {
+ // Possible service clusters on a node as of 2021-01-22:
+ //
+ // CLUSTER ID SERVICE TYPE HEALTH ASSOCIATION
+ // 1 CCN-controllers container-clustercontrollers Slobrok 1, 3, or 6 in content cluster
+ // 2 CCN distributor Slobrok content cluster
+ // 3 CCN storagenode Slobrok content cluster
+ // 4 CCN searchnode Slobrok content cluster
+ // 5 CCN transactionlogserver not checked content cluster
+ // 6 JCCN container Slobrok jdisc container cluster
+ // 7 admin slobrok not checked 1-3 in jdisc container cluster
+ // 8 metrics metricsproxy-container Slobrok application
+ // 9 admin logd not checked application
+ // 10 admin config-sentinel not checked application
+ // 11 admin configproxy not checked application
+ // 12 admin logforwarder not checked application
+ // 13 controller controller state/v1 controllers
+ // 14 zone-config-servers configserver state/v1 config servers
+ // 15 controller-host hostadmin state/v1 controller hosts
+ // 16 configserver-host hostadmin state/v1 config server hosts
+ // 17 tenant-host hostadmin state/v1 tenant hosts
+ // 18 proxy-host hostadmin state/v1 proxy hosts
+ //
+ // CCN refers to the content cluster's name, as specified in services.xml.
+ // JCCN refers to the jdisc container cluster's name, as specified in services.xml.
+ //
+ // For instance a content node will have 2-5 and 8-12 and possibly 1, while a combined
+ // cluster node may have all 1-12.
+ //
+ // The services on a node can be categorized into these main types, ref association column above:
+ // A content
+ // B container
+ // C tenant host
+ // D config server
+ // E config server host
+ // F controller
+ // G controller host
+ // H proxy (same as B)
+ // I proxy host
+
+ if (clusterApi.serviceType().equals(ServiceType.CLUSTER_CONTROLLER)) {
+ return ConcurrentSuspensionLimitForCluster.ONE_NODE;
+ }
- return zone.system().isCd()
- ? ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT
- : ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
- }
+ if (Set.of(ServiceType.STORAGE, ServiceType.SEARCH, ServiceType.DISTRIBUTOR, ServiceType.TRANSACTION_LOG_SERVER)
+ .contains(clusterApi.serviceType())) {
+ // Delegate to the cluster controller
+ return ConcurrentSuspensionLimitForCluster.ALL_NODES;
+ }
- // The above should cover all cases, but if not we'll return a reasonable default:
+ if (clusterApi.serviceType().equals(ServiceType.CONTAINER)) {
return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
- } else {
- // TODO: Remove this legacy branch
- if (clusterApi.isStorageCluster()) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
+ }
- if (ServiceType.CLUSTER_CONTROLLER.equals(clusterApi.serviceType())) {
+ if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) {
+ if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) {
return ConcurrentSuspensionLimitForCluster.ONE_NODE;
}
- if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) {
- return ConcurrentSuspensionLimitForCluster.ALL_NODES;
- }
-
- if (VespaModelUtil.ADMIN_CLUSTER_ID.equals(clusterApi.clusterId())) {
- if (ServiceType.SLOBROK.equals(clusterApi.serviceType())) {
- return ConcurrentSuspensionLimitForCluster.ONE_NODE;
- }
+ return ConcurrentSuspensionLimitForCluster.ALL_NODES;
+ } else if (ServiceType.METRICS_PROXY.equals(clusterApi.serviceType())) {
+ return ConcurrentSuspensionLimitForCluster.ALL_NODES;
+ }
- return ConcurrentSuspensionLimitForCluster.ALL_NODES;
- }
+ if (Set.of(ServiceType.CONFIG_SERVER, ServiceType.CONTROLLER).contains(clusterApi.serviceType())) {
+ return ConcurrentSuspensionLimitForCluster.ONE_NODE;
+ }
- if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) {
- return zone.system().isCd()
- ? ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT
- : ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
+ if (clusterApi.serviceType().equals(ServiceType.HOST_ADMIN)) {
+ if (Set.of(ClusterId.CONFIG_SERVER_HOST, ClusterId.CONTROLLER_HOST).contains(clusterApi.clusterId())) {
+ return ConcurrentSuspensionLimitForCluster.ONE_NODE;
}
- return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
+ return zone.system().isCd()
+ ? ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT
+ : ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT;
}
+
+ // The above should cover all cases, but if not we'll return a reasonable default:
+ return ConcurrentSuspensionLimitForCluster.TEN_PERCENT;
}
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
index 1e29f0ca5de..da8591c6631 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/model/ClusterApiImplTest.java
@@ -10,7 +10,6 @@ import com.yahoo.vespa.applicationmodel.ServiceInstance;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.applicationmodel.ServiceStatusInfo;
import com.yahoo.vespa.applicationmodel.ServiceType;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.orchestrator.OrchestratorUtil;
import com.yahoo.vespa.orchestrator.policy.ClusterParams;
@@ -182,18 +181,6 @@ public class ClusterApiImplTest {
fail();
} catch (HostStateChangeDeniedException e) {
assertThat(e.getMessage(),
- containsString("Changing the state of cfg1 would violate enough-services-up: " +
- "Suspension of service with type 'configserver' not allowed: 33% are suspended already. " +
- "Services down on resumed hosts: [1 missing config server]."));
- }
-
- flagSource.withBooleanFlag(Flags.GROUP_PERMANENT_SUSPENSION.id(), true);
-
- try {
- policy.verifyGroupGoingDownIsFine(clusterApi);
- fail();
- } catch (HostStateChangeDeniedException e) {
- assertThat(e.getMessage(),
containsString("Suspension of service with type 'configserver' not allowed: 33% are suspended already. " +
"Services down on resumed hosts: [1 missing config server]."));
}
@@ -214,18 +201,6 @@ public class ClusterApiImplTest {
fail();
} catch (HostStateChangeDeniedException e) {
assertThat(e.getMessage(),
- containsString("Changing the state of cfg1 would violate enough-services-up: " +
- "Suspension of service with type 'hostadmin' not allowed: 33% are suspended already. " +
- "Services down on resumed hosts: [1 missing config server host]."));
- }
-
- flagSource.withBooleanFlag(Flags.GROUP_PERMANENT_SUSPENSION.id(), true);
-
- try {
- policy.verifyGroupGoingDownIsFine(clusterApi);
- fail();
- } catch (HostStateChangeDeniedException e) {
- assertThat(e.getMessage(),
containsString("Suspension of service with type 'hostadmin' not allowed: 33% are suspended already. " +
"Services down on resumed hosts: [1 missing config server host]."));
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
index 0c3da1656bc..303dabebba8 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java
@@ -63,7 +63,7 @@ public class HostedVespaClusterPolicyTest {
when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID);
when(clusterApi.serviceType()).thenReturn(ServiceType.SLOBROK);
assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
}
@Test
@@ -71,46 +71,38 @@ public class HostedVespaClusterPolicyTest {
when(clusterApi.clusterId()).thenReturn(VespaModelUtil.ADMIN_CLUSTER_ID);
when(clusterApi.serviceType()).thenReturn(new ServiceType("non-slobrok-service-type"));
assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
}
@Test
public void testStorageSuspensionLimit() {
when(clusterApi.serviceType()).thenReturn(ServiceType.STORAGE);
when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id"));
- when(clusterApi.isStorageCluster()).thenReturn(true);
assertEquals(ConcurrentSuspensionLimitForCluster.ALL_NODES,
- policy.getConcurrentSuspensionLimit(clusterApi, true));
- }
-
- @Test
- public void testStorageSuspensionLimit_legacy() {
- when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id"));
- when(clusterApi.isStorageCluster()).thenReturn(true);
- assertEquals(ConcurrentSuspensionLimitForCluster.ONE_NODE,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
}
@Test
public void testTenantHostSuspensionLimit() {
when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID);
- when(clusterApi.isStorageCluster()).thenReturn(false);
+ when(clusterApi.clusterId()).thenReturn(ClusterId.TENANT_HOST);
+ when(clusterApi.serviceType()).thenReturn(ServiceType.HOST_ADMIN);
assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
when(zone.system()).thenReturn(SystemName.cd);
assertEquals(ConcurrentSuspensionLimitForCluster.FIFTY_PERCENT,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
}
@Test
public void testDefaultSuspensionLimit() {
when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c"));
when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id"));
- when(clusterApi.isStorageCluster()).thenReturn(false);
+ when(clusterApi.serviceType()).thenReturn(new ServiceType("some-service-type"));
assertEquals(ConcurrentSuspensionLimitForCluster.TEN_PERCENT,
- policy.getConcurrentSuspensionLimit(clusterApi, false));
+ policy.getConcurrentSuspensionLimit(clusterApi));
}
@Test
@@ -141,7 +133,7 @@ public class HostedVespaClusterPolicyTest {
when(clusterApi.noServicesOutsideGroupIsDown()).thenReturn(noServicesOutsideGroupIsDown);
when(clusterApi.reasonsForNoServicesInGroupIsUp()).thenReturn(noServicesInGroupIsUp);
when(clusterApi.percentageOfServicesDownIfGroupIsAllowedToBeDown()).thenReturn(20);
- doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi, false);
+ doReturn(ConcurrentSuspensionLimitForCluster.TEN_PERCENT).when(policy).getConcurrentSuspensionLimit(clusterApi);
when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c"));
when(clusterApi.serviceType()).thenReturn(new ServiceType("service-type"));
diff --git a/parent/pom.xml b/parent/pom.xml
index ca61f6707e7..fa04c167d0e 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -543,18 +543,6 @@
<version>${prometheus.client.version}</version>
</dependency>
<dependency>
- <!-- TODO: Try to remove, as this overlaps with javax.activation. -->
- <groupId>jakarta.activation</groupId>
- <artifactId>jakarta.activation-api</artifactId>
- <version>1.2.1</version>
- </dependency>
- <dependency>
- <!-- TODO: Try to remove, as this conflicts with javax.xml.bind:jaxb-api -->
- <groupId>jakarta.xml.bind</groupId>
- <artifactId>jakarta.xml.bind-api</artifactId>
- <version>2.3.2</version>
- </dependency>
- <dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt
index c76f35bd9ff..c0353747e80 100644
--- a/searchcore/CMakeLists.txt
+++ b/searchcore/CMakeLists.txt
@@ -50,6 +50,7 @@ vespa_define_module(
src/apps/vespa-feed-bm
src/apps/vespa-gen-testdocs
src/apps/vespa-proton-cmd
+ src/apps/vespa-redistribute-bm
src/apps/vespa-transactionlog-inspect
TESTS
diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
index 7f9ae442dd3..3dac66685f9 100644
--- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
+++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
@@ -51,8 +51,6 @@ using search::bmcluster::BmNodeStatsReporter;
using search::bmcluster::BmRange;
using search::bmcluster::BucketSelector;
using search::index::DummyFileHeaderContext;
-using storage::spi::PersistenceProvider;
-using vespalib::makeLambdaTask;
namespace {
@@ -114,71 +112,73 @@ BMParams::check() const
}
-struct PersistenceProviderFixture {
+class Benchmark {
+ BMParams _params;
std::shared_ptr<const DocumenttypesConfig> _document_types;
std::shared_ptr<const DocumentTypeRepo> _repo;
- std::unique_ptr<BmCluster> _bm_cluster;
+ std::unique_ptr<BmCluster> _cluster;
BmFeed _feed;
- explicit PersistenceProviderFixture(const BMParams& params);
- ~PersistenceProviderFixture();
+ void benchmark_feed(BmFeeder& feeder, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed, uint32_t passes, const vespalib::string &op_name);
+public:
+ explicit Benchmark(const BMParams& params);
+ ~Benchmark();
+ void run();
};
-PersistenceProviderFixture::PersistenceProviderFixture(const BMParams& params)
- : _document_types(make_document_types()),
+Benchmark::Benchmark(const BMParams& params)
+ : _params(params),
+ _document_types(make_document_types()),
_repo(document::DocumentTypeRepoFactory::make(*_document_types)),
- _bm_cluster(std::make_unique<BmCluster>(base_dir, base_port, params, _document_types, _repo)),
+ _cluster(std::make_unique<BmCluster>(base_dir, base_port, _params, _document_types, _repo)),
_feed(_repo)
{
- _bm_cluster->make_nodes();
+ _cluster->make_nodes();
}
-PersistenceProviderFixture::~PersistenceProviderFixture() = default;
+Benchmark::~Benchmark() = default;
void
-benchmark_feed(BmFeeder& feeder, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed, const BMParams& params, uint32_t passes, const vespalib::string &op_name)
+Benchmark::benchmark_feed(BmFeeder& feeder, int64_t& time_bias, const std::vector<vespalib::nbostream>& serialized_feed, uint32_t passes, const vespalib::string &op_name)
{
if (passes == 0) {
return;
}
AvgSampler sampler;
LOG(info, "--------------------------------");
- LOG(info, "%sAsync: %u small documents, passes=%u", op_name.c_str(), params.get_documents(), passes);
+ LOG(info, "%sAsync: %u small documents, passes=%u", op_name.c_str(), _params.get_documents(), passes);
for (uint32_t pass = 0; pass < passes; ++pass) {
- feeder.run_feed_tasks(pass, time_bias, serialized_feed, params, sampler, op_name);
+ feeder.run_feed_tasks(pass, time_bias, serialized_feed, _params, sampler, op_name);
}
LOG(info, "%sAsync: AVG %s/s: %8.2f", op_name.c_str(), op_name.c_str(), sampler.avg());
}
-void benchmark(const BMParams &bm_params)
+void
+Benchmark::run()
{
- vespalib::rmdir(base_dir, true);
- PersistenceProviderFixture f(bm_params);
- auto& cluster = *f._bm_cluster;
- cluster.start(f._feed);
- vespalib::ThreadStackExecutor executor(bm_params.get_client_threads(), 128_Ki);
- BmFeeder feeder(f._repo, *cluster.get_feed_handler(), executor);
- auto& feed = f._feed;
- auto put_feed = feed.make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_put_feed(range, bucket_selector); }, f._feed.num_buckets(), "put");
- auto update_feed = feed.make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_update_feed(range, bucket_selector); }, f._feed.num_buckets(), "update");
- auto get_feed = feed.make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_get_feed(range, bucket_selector); }, f._feed.num_buckets(), "get");
- auto remove_feed = feed.make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_remove_feed(range, bucket_selector); }, f._feed.num_buckets(), "remove");
- BmNodeStatsReporter reporter(cluster);
+ _cluster->start(_feed);
+ vespalib::ThreadStackExecutor executor(_params.get_client_threads(), 128_Ki);
+ BmFeeder feeder(_repo, *_cluster->get_feed_handler(), executor);
+ auto put_feed = _feed.make_feed(executor, _params, [this](BmRange range, BucketSelector bucket_selector) { return _feed.make_put_feed(range, bucket_selector); }, _feed.num_buckets(), "put");
+ auto update_feed = _feed.make_feed(executor, _params, [this](BmRange range, BucketSelector bucket_selector) { return _feed.make_update_feed(range, bucket_selector); }, _feed.num_buckets(), "update");
+ auto get_feed = _feed.make_feed(executor, _params, [this](BmRange range, BucketSelector bucket_selector) { return _feed.make_get_feed(range, bucket_selector); }, _feed.num_buckets(), "get");
+ auto remove_feed = _feed.make_feed(executor, _params, [this](BmRange range, BucketSelector bucket_selector) { return _feed.make_remove_feed(range, bucket_selector); }, _feed.num_buckets(), "remove");
+ BmNodeStatsReporter reporter(*_cluster);
reporter.start(500ms);
int64_t time_bias = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch() - 24h).count();
LOG(info, "Feed handler is '%s'", feeder.get_feed_handler().get_name().c_str());
- benchmark_feed(feeder, time_bias, put_feed, bm_params, bm_params.get_put_passes(), "put");
+ benchmark_feed(feeder, time_bias, put_feed, _params.get_put_passes(), "put");
reporter.report_now();
- benchmark_feed(feeder, time_bias, update_feed, bm_params, bm_params.get_update_passes(), "update");
+ benchmark_feed(feeder, time_bias, update_feed, _params.get_update_passes(), "update");
reporter.report_now();
- benchmark_feed(feeder, time_bias, get_feed, bm_params, bm_params.get_get_passes(), "get");
+ benchmark_feed(feeder, time_bias, get_feed, _params.get_get_passes(), "get");
reporter.report_now();
- benchmark_feed(feeder, time_bias, remove_feed, bm_params, bm_params.get_remove_passes(), "remove");
+ benchmark_feed(feeder, time_bias, remove_feed, _params.get_remove_passes(), "remove");
reporter.report_now();
reporter.stop();
LOG(info, "--------------------------------");
- cluster.stop();
+ _cluster->stop();
}
class App : public FastOS_Application
@@ -382,7 +382,9 @@ App::Main()
usage();
return 1;
}
- benchmark(_bm_params);
+ vespalib::rmdir(base_dir, true);
+ Benchmark bm(_bm_params);
+ bm.run();
return 0;
}
diff --git a/searchcore/src/apps/vespa-redistribute-bm/.gitignore b/searchcore/src/apps/vespa-redistribute-bm/.gitignore
new file mode 100644
index 00000000000..4a7424f7ef4
--- /dev/null
+++ b/searchcore/src/apps/vespa-redistribute-bm/.gitignore
@@ -0,0 +1 @@
+vespa-redistribute-bm
diff --git a/searchcore/src/apps/vespa-redistribute-bm/CMakeLists.txt b/searchcore/src/apps/vespa-redistribute-bm/CMakeLists.txt
new file mode 100644
index 00000000000..5b34c1aefb8
--- /dev/null
+++ b/searchcore/src/apps/vespa-redistribute-bm/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_vespa_redistribute_bm_app
+ SOURCES
+ vespa_redistribute_bm.cpp
+ OUTPUT_NAME vespa-redistribute-bm
+ DEPENDS
+ searchcore_bmcluster
+)
diff --git a/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp b/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp
new file mode 100644
index 00000000000..e5c3959d2d4
--- /dev/null
+++ b/searchcore/src/apps/vespa-redistribute-bm/vespa_redistribute_bm.cpp
@@ -0,0 +1,626 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/document_type_repo_factory.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/fastos/app.h>
+#include <vespa/searchcore/bmcluster/avg_sampler.h>
+#include <vespa/searchcore/bmcluster/bm_cluster.h>
+#include <vespa/searchcore/bmcluster/bm_cluster_controller.h>
+#include <vespa/searchcore/bmcluster/bm_cluster_params.h>
+#include <vespa/searchcore/bmcluster/bm_distribution.h>
+#include <vespa/searchcore/bmcluster/bm_feed.h>
+#include <vespa/searchcore/bmcluster/bm_feeder.h>
+#include <vespa/searchcore/bmcluster/bm_feed_params.h>
+#include <vespa/searchcore/bmcluster/bm_node.h>
+#include <vespa/searchcore/bmcluster/bm_node_stats.h>
+#include <vespa/searchcore/bmcluster/bm_node_stats_reporter.h>
+#include <vespa/searchcore/bmcluster/bm_range.h>
+#include <vespa/searchcore/bmcluster/bucket_selector.h>
+#include <vespa/searchcore/bmcluster/spi_bm_feed_handler.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <getopt.h>
+#include <iostream>
+#include <thread>
+
+#include <vespa/log/log.h>
+LOG_SETUP("vespa-redistribute-bm");
+
+using namespace proton;
+using namespace std::chrono_literals;
+
+using document::DocumentTypeRepo;
+using document::DocumentTypeRepoFactory;
+using document::DocumenttypesConfig;
+using document::DocumenttypesConfigBuilder;
+using search::bmcluster::AvgSampler;
+using search::bmcluster::BmClusterController;
+using search::bmcluster::IBmFeedHandler;
+using search::bmcluster::BmClusterParams;
+using search::bmcluster::BmCluster;
+using search::bmcluster::BmFeed;
+using search::bmcluster::BmFeedParams;
+using search::bmcluster::BmFeeder;
+using search::bmcluster::BmNode;
+using search::bmcluster::BmNodeStatsReporter;
+using search::bmcluster::BmRange;
+using search::bmcluster::BucketSelector;
+using search::index::DummyFileHeaderContext;
+using storage::lib::State;
+
+namespace {
+
+vespalib::string base_dir = "testdb";
+constexpr int base_port = 9017;
+
+std::shared_ptr<DocumenttypesConfig> make_document_types() {
+ using Struct = document::config_builder::Struct;
+ using DataType = document::DataType;
+ document::config_builder::DocumenttypesConfigBuilderHelper builder;
+ builder.document(42, "test", Struct("test.header").addField("int", DataType::T_INT), Struct("test.body"));
+ return std::make_shared<DocumenttypesConfig>(builder.config());
+}
+
+enum class Mode {
+ GROW,
+ SHRINK,
+ PERM_CRASH,
+ TEMP_CRASH,
+ REPLACE,
+ BAD,
+};
+
+std::vector<vespalib::string> mode_names = {
+ "grow",
+ "shrink",
+ "perm-crash",
+ "temp-crash",
+ "replace"
+};
+
+vespalib::string bad_mode_name("bad");
+
+Mode get_mode(const vespalib::string& mode_name) {
+ for (uint32_t i = 0; i < mode_names.size(); ++i) {
+ if (mode_name == mode_names[i]) {
+ return static_cast<Mode>(i);
+ }
+ }
+ return Mode::BAD;
+}
+
+vespalib::string& get_mode_name(Mode mode) {
+ uint32_t i = static_cast<uint32_t>(mode);
+ return (i < mode_names.size()) ? mode_names[i] : bad_mode_name;
+}
+
+double
+estimate_lost_docs_base_ratio(uint32_t redundancy, uint32_t lost_nodes, uint32_t num_nodes)
+{
+ if (redundancy > lost_nodes) {
+ return 0.0;
+ }
+ double loss_ratio = 1.0;
+ for (uint32_t i = 0; i < redundancy; ++i) {
+ loss_ratio *= ((double) (lost_nodes - i)) / (num_nodes - i);
+ }
+ LOG(info, "estimated lost docs base ratio: %4.2f", loss_ratio);
+ return loss_ratio;
+}
+
+double
+estimate_moved_docs_ratio_grow(uint32_t redundancy, uint32_t added_nodes, uint32_t num_nodes)
+{
+ double new_redundancy = redundancy;
+ double new_per_node_doc_ratio = new_redundancy / num_nodes;
+ double moved_ratio = new_per_node_doc_ratio * added_nodes;
+ LOG(info, "estimated_moved_docs_ratio_grow(%u,%u,%u)=%4.2f", redundancy, added_nodes, num_nodes, moved_ratio);
+ return moved_ratio;
+}
+
+double
+estimate_moved_docs_ratio_shrink(uint32_t redundancy, uint32_t retired_nodes, uint32_t num_nodes)
+{
+ double old_redundancy = redundancy;
+ double old_per_node_doc_ratio = old_redundancy / num_nodes;
+ uint32_t new_nodes = num_nodes - retired_nodes;
+ double new_redundancy = std::min(redundancy, new_nodes);
+ double new_per_node_doc_ratio = new_redundancy / new_nodes;
+ double moved_ratio = (new_per_node_doc_ratio - old_per_node_doc_ratio) * new_nodes;
+ LOG(info, "estimated_moved_docs_ratio_shrink(%u,%u,%u)=%4.2f", redundancy, retired_nodes, num_nodes, moved_ratio);
+ return moved_ratio;
+}
+
+double
+estimate_moved_docs_ratio_crash(uint32_t redundancy, uint32_t crashed_nodes, uint32_t num_nodes)
+{
+ double old_redundancy = redundancy;
+ double old_per_node_doc_ratio = old_redundancy / num_nodes;
+ uint32_t new_nodes = num_nodes - crashed_nodes;
+ double new_redundancy = std::min(redundancy, new_nodes);
+ double new_per_node_doc_ratio = new_redundancy / new_nodes;
+ double lost_docs_ratio = estimate_lost_docs_base_ratio(redundancy, crashed_nodes, num_nodes) * new_redundancy;
+ double moved_ratio = (new_per_node_doc_ratio - old_per_node_doc_ratio) * new_nodes - lost_docs_ratio;
+ LOG(info, "estimated_moved_docs_ratio_crash(%u,%u,%u)=%4.2f", redundancy, crashed_nodes, num_nodes, moved_ratio);
+ return moved_ratio;
+}
+
+double
+estimate_moved_docs_ratio_replace(uint32_t redundancy, uint32_t added_nodes, uint32_t retired_nodes, uint32_t num_nodes)
+{
+ uint32_t old_nodes = num_nodes - added_nodes;
+ double old_redundancy = std::min(redundancy, old_nodes);
+ double old_per_node_doc_ratio = old_redundancy / old_nodes;
+ uint32_t new_nodes = num_nodes - retired_nodes;
+ double new_redundancy = std::min(redundancy, new_nodes);
+ double new_per_node_doc_ratio = new_redundancy / new_nodes;
+ double moved_ratio = new_per_node_doc_ratio * added_nodes;
+ uint32_t stable_nodes = num_nodes - added_nodes - retired_nodes;
+ // Account for extra documents moved from retired nodes to stable nodes
+ double extra_per_stable_node_doc_ratio = new_per_node_doc_ratio * added_nodes / old_nodes;
+ double extra_moved_ratio = (std::min(1.0, new_per_node_doc_ratio + extra_per_stable_node_doc_ratio) - old_per_node_doc_ratio) * stable_nodes;
+ moved_ratio += extra_moved_ratio;
+ LOG(info, "estimated_moved_docs_ratio_replace(%u,%u,%u,%u)=%4.2f, (of which %4.2f extra)", redundancy, added_nodes, retired_nodes, num_nodes, moved_ratio, extra_moved_ratio);
+ return moved_ratio;
+}
+
+class BMParams : public BmClusterParams,
+ public BmFeedParams
+{
+ uint32_t _flip_nodes;
+ Mode _mode;
+ bool _use_feed_settle;
+public:
+ BMParams();
+ uint32_t get_flip_nodes() const noexcept { return _flip_nodes; }
+ Mode get_mode() const noexcept { return _mode; }
+ bool get_use_feed_settle() const noexcept { return _use_feed_settle; }
+ void set_flip_nodes(uint32_t value) { _flip_nodes = value; }
+ void set_mode(Mode value) { _mode = value; }
+ void set_use_feed_settle(bool value) { _use_feed_settle = value; }
+ bool check() const;
+};
+
+BMParams::BMParams()
+ : BmClusterParams(),
+ BmFeedParams(),
+ _flip_nodes(1u),
+ _mode(Mode::GROW),
+ _use_feed_settle(false)
+{
+ set_enable_service_layer(true);
+ set_enable_distributor(true);
+ set_use_document_api(true);
+ set_num_nodes(4);
+}
+
+
+bool
+BMParams::check() const
+{
+ if (!BmClusterParams::check()) {
+ return false;
+ }
+ if (!BmFeedParams::check()) {
+ return false;
+ }
+ if (get_num_nodes() < 2u) {
+ std::cerr << "Too few nodes: " << get_num_nodes() << std::endl;
+ return false;
+ }
+ if (_mode == Mode::REPLACE) {
+ if (_flip_nodes * 2 > get_num_nodes()) {
+ std::cerr << "Too many flip nodes (" << _flip_nodes << ") with " << get_num_nodes() << " nodes (replace mode)" << std::endl;
+ return false;
+ }
+ } else {
+ if (_flip_nodes >= get_num_nodes()) {
+ std::cerr << "Too many flip nodes (" << _flip_nodes << ") with " << get_num_nodes() << " nodes (" << get_mode_name(_mode) << " mode)" << std::endl;
+ return false;
+ }
+ }
+ if (_mode == Mode::BAD) {
+ std::cerr << "Bad mode" << std::endl;
+ return false;
+ }
+ return true;
+}
+
+}
+
+class Benchmark {
+ BMParams _params;
+ std::shared_ptr<const DocumenttypesConfig> _document_types;
+ std::shared_ptr<const DocumentTypeRepo> _repo;
+ std::unique_ptr<BmCluster> _cluster;
+ BmFeed _feed;
+
+ void adjust_cluster_state_before_feed();
+ void adjust_cluster_state_after_feed();
+ void adjust_cluster_state_after_first_redistribution();
+ double estimate_lost_docs();
+ double estimate_moved_docs();
+ void feed();
+ std::chrono::duration<double> redistribute();
+
+public:
+ explicit Benchmark(const BMParams& params);
+ ~Benchmark();
+ void run();
+};
+
+Benchmark::Benchmark(const BMParams& params)
+ : _params(params),
+ _document_types(make_document_types()),
+ _repo(document::DocumentTypeRepoFactory::make(*_document_types)),
+ _cluster(std::make_unique<BmCluster>(base_dir, base_port, _params, _document_types, _repo)),
+ _feed(_repo)
+{
+ _cluster->make_nodes();
+}
+
+Benchmark::~Benchmark() = default;
+
+void
+Benchmark::adjust_cluster_state_before_feed()
+{
+ auto& dist = _cluster->get_real_distribution();
+ auto& mode_name = get_mode_name(_params.get_mode());
+ switch (_params.get_mode()) {
+ case Mode::GROW:
+ case Mode::REPLACE:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::DOWN);
+ }
+ LOG(info, "Mode %s: Taking down %u node(s) initially", mode_name.c_str(), _params.get_flip_nodes());
+ break;
+ default:
+ LOG(info, "Mode %s: No cluster state adjust before feed", mode_name.c_str());
+ }
+ dist.commit_cluster_state_change();
+}
+
+void
+Benchmark::adjust_cluster_state_after_feed()
+{
+ auto& dist = _cluster->get_real_distribution();
+ auto& mode_name = get_mode_name(_params.get_mode());
+ switch (_params.get_mode()) {
+ case Mode::GROW:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::UP);
+ }
+ LOG(info, "Mode %s: taking up %u node(s)", mode_name.c_str(), _params.get_flip_nodes());
+ break;
+ case Mode::SHRINK:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::RETIRED);
+ }
+ LOG(info, "Mode %s: Retiring %u node(s)", mode_name.c_str(), _params.get_flip_nodes());
+ break;
+ case Mode::PERM_CRASH:
+ case Mode::TEMP_CRASH:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::DOWN);
+ }
+ LOG(info, "Mode %s: taking down %u node(s)", mode_name.c_str(), _params.get_flip_nodes());
+ break;
+ case Mode::REPLACE:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::UP);
+ }
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i + _params.get_flip_nodes(), State::RETIRED);
+ }
+ LOG(info, "Mode %s: Taking up %u node(s) and retiring %u node(s)", mode_name.c_str(), _params.get_flip_nodes(), _params.get_flip_nodes());
+ break;
+ default:
+ LOG(info, "Mode %s: No cluster state adjust after feed", mode_name.c_str());
+ }
+ dist.commit_cluster_state_change();
+}
+
+void
+Benchmark::adjust_cluster_state_after_first_redistribution()
+{
+ auto& dist = _cluster->get_real_distribution();
+ auto& mode_name = get_mode_name(_params.get_mode());
+ switch (_params.get_mode()) {
+ case Mode::TEMP_CRASH:
+ for (uint32_t i = 0; i < _params.get_flip_nodes(); ++i) {
+ dist.set_node_state(i, State::UP);
+ }
+ LOG(info, "Mode %s: taking up %u node(s)", mode_name.c_str(), _params.get_flip_nodes());
+ break;
+ default:
+ LOG(info, "Mode %s: No cluster state adjust after first redistribution", mode_name.c_str());
+ }
+ dist.commit_cluster_state_change();
+}
+
+void
+Benchmark::feed()
+{
+ vespalib::ThreadStackExecutor executor(_params.get_client_threads(), 128_Ki);
+ BmFeeder feeder(_repo, *_cluster->get_feed_handler(), executor);
+ auto put_feed = _feed.make_feed(executor, _params, [this](BmRange range, BucketSelector bucket_selector) { return _feed.make_put_feed(range, bucket_selector); }, _feed.num_buckets(), "put");
+ BmNodeStatsReporter reporter(*_cluster);
+ reporter.start(500ms);
+ int64_t time_bias = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::system_clock::now().time_since_epoch() - 24h).count();
+ LOG(info, "Feed handler is '%s'", feeder.get_feed_handler().get_name().c_str());
+ AvgSampler sampler;
+ feeder.run_feed_tasks(0, time_bias, put_feed, _params, sampler, "put");
+ reporter.report_now();
+ if (_params.get_use_feed_settle()) {
+ LOG(info, "Settling feed");
+ std::this_thread::sleep_for(2s);
+ reporter.report_now();
+ }
+}
+
+
+std::chrono::duration<double>
+Benchmark::redistribute()
+{
+ BmNodeStatsReporter reporter(*_cluster);
+ auto before = std::chrono::steady_clock::now();
+ reporter.start(500ms);
+ _cluster->propagate_cluster_state();
+ reporter.report_now();
+ for (;;) {
+ auto duration = std::chrono::steady_clock::now() - reporter.get_change_time();
+ if (duration >= 6s) {
+ break;
+ }
+ std::this_thread::sleep_for(100ms);
+ }
+ return reporter.get_change_time() - before;
+}
+
+double
+Benchmark::estimate_lost_docs()
+{
+ switch (_params.get_mode()) {
+ case Mode::PERM_CRASH:
+ case Mode::TEMP_CRASH:
+ {
+ double new_redundancy = std::min(_params.get_redundancy(), _params.get_num_nodes() - _params.get_flip_nodes());
+ auto lost_docs_ratio = estimate_lost_docs_base_ratio(_params.get_redundancy(), _params.get_flip_nodes(), _params.get_num_nodes()) * new_redundancy;
+ return _params.get_documents() * lost_docs_ratio;
+ }
+ default:
+ return 0.0;
+ }
+}
+
+double
+Benchmark::estimate_moved_docs()
+{
+ switch(_params.get_mode()) {
+ case Mode::GROW:
+ return _params.get_documents() * estimate_moved_docs_ratio_grow(_params.get_redundancy(), _params.get_flip_nodes(), _params.get_num_nodes());
+ case Mode::SHRINK:
+ return _params.get_documents() * estimate_moved_docs_ratio_shrink(_params.get_redundancy(), _params.get_flip_nodes(), _params.get_num_nodes());
+ case Mode::PERM_CRASH:
+ case Mode::TEMP_CRASH:
+ return _params.get_documents() * estimate_moved_docs_ratio_crash(_params.get_redundancy(), _params.get_flip_nodes(), _params.get_num_nodes());
+ case Mode::REPLACE:
+ return _params.get_documents() * estimate_moved_docs_ratio_replace(_params.get_redundancy(), _params.get_flip_nodes(), _params.get_flip_nodes(), _params.get_num_nodes());
+ default:
+ return 0.0;
+ }
+}
+
+void
+Benchmark::run()
+{
+ adjust_cluster_state_before_feed();
+ _cluster->start(_feed);
+ feed();
+ LOG(info, "--------------------------------");
+ adjust_cluster_state_after_feed();
+ auto elapsed = redistribute();
+ double moved_docs = estimate_moved_docs();
+ double lost_docs = estimate_lost_docs();
+ LOG(info, "Redistributed estimated %4.2f docs in %5.3f seconds, %4.2f docs/s, estimated %4.2f lost docs", moved_docs, elapsed.count(), moved_docs / elapsed.count(), lost_docs);
+ if (_params.get_mode() == Mode::TEMP_CRASH) {
+ if (_params.get_use_feed_settle()) {
+ LOG(info, "Settling redistribution");
+ std::this_thread::sleep_for(2s);
+ }
+ adjust_cluster_state_after_first_redistribution();
+ elapsed = redistribute();
+ LOG(info, "Cleanup of %4.2f docs in %5.3f seconds, %4.2f docs/s, estimated %4.2f refound docs", moved_docs, elapsed.count(), moved_docs / elapsed.count(), lost_docs);
+ }
+ _cluster->stop();
+}
+
+class App : public FastOS_Application
+{
+ BMParams _bm_params;
+public:
+ App();
+ ~App() override;
+ void usage();
+ bool get_options();
+ int Main() override;
+};
+
+App::App()
+ : _bm_params()
+{
+}
+
+App::~App() = default;
+
+void
+App::usage()
+{
+ std::cerr <<
+ "vespa-redistribute-bm version 0.0\n"
+ "\n"
+ "USAGE:\n";
+ std::cerr <<
+ "vespa-redistribute-bm\n"
+ "[--bucket-db-stripe-bits bits]\n"
+ "[--client-threads threads]\n"
+ "[--distributor-stripes stripes]\n"
+ "[--documents documents]\n"
+ "[--flip-nodes flip-nodes]\n"
+ "[--indexing-sequencer [latency,throughput,adaptive]]\n"
+ "[--max-pending max-pending]\n"
+ "[--mode [grow, shrink, perm-crash, temp-crash, replace]\n"
+ "[--nodes nodes]\n"
+ "[--redundancy redundancy]\n"
+ "[--rpc-events-before-wakeup events]\n"
+ "[--rpc-network-threads threads]\n"
+ "[--rpc-targets-per-node targets]\n"
+ "[--response-threads threads]\n"
+ "[--skip-communicationmanager-thread]\n"
+ "[--use-async-message-handling]\n"
+ "[--use-feed-settle]" << std::endl;
+}
+
+bool
+App::get_options()
+{
+ int c;
+ const char *opt_argument = nullptr;
+ int long_opt_index = 0;
+ static struct option long_opts[] = {
+ { "bucket-db-stripe-bits", 1, nullptr, 0 },
+ { "client-threads", 1, nullptr, 0 },
+ { "distributor-stripes", 1, nullptr, 0 },
+ { "documents", 1, nullptr, 0 },
+ { "flip-nodes", 1, nullptr, 0 },
+ { "indexing-sequencer", 1, nullptr, 0 },
+ { "max-pending", 1, nullptr, 0 },
+ { "mode", 1, nullptr, 0 },
+ { "nodes", 1, nullptr, 0 },
+ { "redundancy", 1, nullptr, 0 },
+ { "response-threads", 1, nullptr, 0 },
+ { "rpc-events-before-wakeup", 1, nullptr, 0 },
+ { "rpc-network-threads", 1, nullptr, 0 },
+ { "rpc-targets-per-node", 1, nullptr, 0 },
+ { "skip-communicationmanager-thread", 0, nullptr, 0 },
+ { "use-async-message-handling", 0, nullptr, 0 },
+ { "use-feed-settle", 0, nullptr, 0 }
+ };
+ enum longopts_enum {
+ LONGOPT_BUCKET_DB_STRIPE_BITS,
+ LONGOPT_CLIENT_THREADS,
+ LONGOPT_DISTRIBUTOR_STRIPES,
+ LONGOPT_DOCUMENTS,
+ LONGOPT_FLIP_NODES,
+ LONGOPT_INDEXING_SEQUENCER,
+ LONGOPT_MAX_PENDING,
+ LONGOPT_MODE,
+ LONGOPT_NODES,
+ LONGOPT_REDUNDANCY,
+ LONGOPT_RESPONSE_THREADS,
+ LONGOPT_RPC_EVENTS_BEFORE_WAKEUP,
+ LONGOPT_RPC_NETWORK_THREADS,
+ LONGOPT_RPC_TARGETS_PER_NODE,
+ LONGOPT_SKIP_COMMUNICATIONMANAGER_THREAD,
+ LONGOPT_USE_ASYNC_MESSAGE_HANDLING,
+ LONGOPT_USE_FEED_SETTLE
+ };
+ int opt_index = 1;
+ resetOptIndex(opt_index);
+ while ((c = GetOptLong("", opt_argument, opt_index, long_opts, &long_opt_index)) != -1) {
+ switch (c) {
+ case 0:
+ switch(long_opt_index) {
+ case LONGOPT_BUCKET_DB_STRIPE_BITS:
+ _bm_params.set_bucket_db_stripe_bits(atoi(opt_argument));
+ break;
+ case LONGOPT_CLIENT_THREADS:
+ _bm_params.set_client_threads(atoi(opt_argument));
+ break;
+ case LONGOPT_DISTRIBUTOR_STRIPES:
+ _bm_params.set_distributor_stripes(atoi(opt_argument));
+ break;
+ case LONGOPT_DOCUMENTS:
+ _bm_params.set_documents(atoi(opt_argument));
+ break;
+ case LONGOPT_FLIP_NODES:
+ _bm_params.set_flip_nodes(atoi(opt_argument));
+ break;
+ case LONGOPT_INDEXING_SEQUENCER:
+ _bm_params.set_indexing_sequencer(opt_argument);
+ break;
+ case LONGOPT_MAX_PENDING:
+ _bm_params.set_max_pending(atoi(opt_argument));
+ break;
+ case LONGOPT_MODE:
+ _bm_params.set_mode(get_mode(opt_argument));
+ if (_bm_params.get_mode() == Mode::BAD) {
+ std::cerr << "Unknown mode name " << opt_argument << std::endl;
+ }
+ break;
+ case LONGOPT_NODES:
+ _bm_params.set_num_nodes(atoi(opt_argument));
+ break;
+ case LONGOPT_REDUNDANCY:
+ _bm_params.set_redundancy(atoi(opt_argument));
+ break;
+ case LONGOPT_RESPONSE_THREADS:
+ _bm_params.set_response_threads(atoi(opt_argument));
+ break;
+ case LONGOPT_RPC_EVENTS_BEFORE_WAKEUP:
+ _bm_params.set_rpc_events_before_wakeup(atoi(opt_argument));
+ break;
+ case LONGOPT_RPC_NETWORK_THREADS:
+ _bm_params.set_rpc_network_threads(atoi(opt_argument));
+ break;
+ case LONGOPT_RPC_TARGETS_PER_NODE:
+ _bm_params.set_rpc_targets_per_node(atoi(opt_argument));
+ break;
+ case LONGOPT_SKIP_COMMUNICATIONMANAGER_THREAD:
+ _bm_params.set_skip_communicationmanager_thread(true);
+ break;
+ case LONGOPT_USE_ASYNC_MESSAGE_HANDLING:
+ _bm_params.set_use_async_message_handling_on_schedule(true);
+ break;
+ case LONGOPT_USE_FEED_SETTLE:
+ _bm_params.set_use_feed_settle(true);
+ break;
+ default:
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+ }
+ return _bm_params.check();
+}
+
+int
+App::Main()
+{
+ if (!get_options()) {
+ usage();
+ return 1;
+ }
+ vespalib::rmdir(base_dir, true);
+ Benchmark bm(_bm_params);
+ bm.run();
+ return 0;
+}
+
+int
+main(int argc, char* argv[])
+{
+ DummyFileHeaderContext::setCreator("vespa-redistribute-bm");
+ App app;
+ auto exit_value = app.Entry(argc, argv);
+ vespalib::rmdir(base_dir, true);
+ return exit_value;
+}
diff --git a/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp b/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp
index 18b9962003c..b19c9163254 100644
--- a/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attribute_usage_filter/attribute_usage_filter_test.cpp
@@ -12,6 +12,7 @@ LOG_SETUP("attribute_usage_filter_test");
using proton::AttributeUsageFilter;
using proton::AttributeUsageStats;
using proton::IAttributeUsageListener;
+using search::AddressSpaceComponents;
using search::AddressSpaceUsage;
using vespalib::AddressSpace;
@@ -27,17 +28,15 @@ class MyAttributeStats : public AttributeUsageStats
{
public:
void triggerEnumStoreLimit() {
- merge({ enumStoreOverLoad,
- search::AddressSpaceComponents::default_multi_value_usage() },
- "enumeratedName",
- "ready");
+ AddressSpaceUsage usage;
+ usage.set(AddressSpaceComponents::enum_store, enumStoreOverLoad);
+ merge(usage, "enumeratedName", "ready");
}
void triggerMultiValueLimit() {
- merge({ search::AddressSpaceComponents::default_enum_store_usage(),
- multiValueOverLoad },
- "multiValueName",
- "ready");
+ AddressSpaceUsage usage;
+ usage.set(AddressSpaceComponents::multi_value, multiValueOverLoad);
+ merge(usage, "multiValueName", "ready");
}
};
@@ -130,7 +129,8 @@ TEST_F("Check that multivalue limit can be reached", Fixture)
TEST_F("listener is updated when attribute stats change", Fixture)
{
AttributeUsageStats stats;
- AddressSpaceUsage usage(AddressSpace(12, 10, 15), AddressSpace(22, 20, 25));
+ AddressSpaceUsage usage;
+ usage.set("my_comp", AddressSpace(12, 10, 15));
stats.merge(usage, "my_attr", "my_subdb");
f.setAttributeStats(stats);
EXPECT_EQUAL(stats, f.listener->stats);
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp
index 36e1e394f40..9fc0b26fff5 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp
@@ -148,7 +148,8 @@ BmCluster::BmCluster(const vespalib::string& base_dir, int base_port, const BmCl
_document_types(std::move(document_types)),
_repo(std::move(repo)),
_field_set_repo(std::make_unique<const document::FieldSetRepo>(*_repo)),
- _distribution(std::make_shared<const BmDistribution>(params.get_num_nodes())),
+ _real_distribution(std::make_shared<BmDistribution>(params.get_num_nodes(), params.get_redundancy())),
+ _distribution(_real_distribution),
_nodes(params.get_num_nodes()),
_cluster_controller(std::make_shared<BmClusterController>(*this, *_distribution)),
_feed_handler()
@@ -438,4 +439,10 @@ BmCluster::get_node_stats()
return node_stats;
}
+void
+BmCluster::propagate_cluster_state()
+{
+ _cluster_controller->propagate_cluster_state();
+}
+
}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h
index 8615b44bd7e..62ec710c296 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h
@@ -26,6 +26,7 @@ namespace storage::rpc { class SharedRpcResources; }
namespace search::bmcluster {
class BmClusterController;
+class BmDistribution;
class BmFeed;
class BmMessageBus;
class BmNode;
@@ -55,6 +56,7 @@ class BmCluster {
std::shared_ptr<DocumenttypesConfig> _document_types;
std::shared_ptr<const document::DocumentTypeRepo> _repo;
std::unique_ptr<const document::FieldSetRepo> _field_set_repo;
+ std::shared_ptr<BmDistribution> _real_distribution;
std::shared_ptr<const IBmDistribution> _distribution;
std::vector<std::unique_ptr<BmNode>> _nodes;
std::shared_ptr<BmClusterController> _cluster_controller;
@@ -90,6 +92,8 @@ public:
uint32_t get_num_nodes() const { return _nodes.size(); }
BmNode *get_node(uint32_t node_idx) const { return node_idx < _nodes.size() ? _nodes[node_idx].get() : nullptr; }
std::vector<BmNodeStats> get_node_stats();
+ BmDistribution& get_real_distribution() { return *_real_distribution; }
+ void propagate_cluster_state();
};
}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp
index 7766ab6c5b3..2a214130392 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp
@@ -12,6 +12,7 @@ BmClusterParams::BmClusterParams()
_enable_service_layer(false),
_indexing_sequencer(),
_num_nodes(1),
+ _redundancy(1),
_response_threads(2), // Same default as in stor-filestor.def
_rpc_events_before_wakeup(1), // Same default as in stor-communicationmanager.def
_rpc_network_threads(1), // Same default as previous in stor-communicationmanager.def
@@ -42,6 +43,10 @@ BmClusterParams::check() const
std::cerr << "Too few rpc targets per node: " << _rpc_targets_per_node << std::endl;
return false;
}
+ if (_num_nodes < _redundancy) {
+ std::cerr << "Too high redundancy " << _redundancy << " with " << _num_nodes << " nodes" << std::endl;
+ return false;
+ }
return true;
}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h
index 5bc6b97487c..68dff7f52ed 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h
@@ -18,6 +18,7 @@ class BmClusterParams
bool _enable_service_layer;
vespalib::string _indexing_sequencer;
uint32_t _num_nodes;
+ uint32_t _redundancy;
uint32_t _response_threads;
uint32_t _rpc_events_before_wakeup;
uint32_t _rpc_network_threads;
@@ -36,6 +37,7 @@ public:
bool get_enable_distributor() const { return _enable_distributor; }
const vespalib::string & get_indexing_sequencer() const { return _indexing_sequencer; }
uint32_t get_num_nodes() const { return _num_nodes; }
+ uint32_t get_redundancy() const { return _redundancy; }
uint32_t get_response_threads() const { return _response_threads; }
uint32_t get_rpc_events_before_wakeup() const { return _rpc_events_before_wakeup; }
uint32_t get_rpc_network_threads() const { return _rpc_network_threads; }
@@ -55,6 +57,7 @@ public:
void set_enable_service_layer(bool value) { _enable_service_layer = value; }
void set_indexing_sequencer(vespalib::stringref sequencer) { _indexing_sequencer = sequencer; }
void set_num_nodes(uint32_t value) { _num_nodes = value; }
+ void set_redundancy(uint32_t value) { _redundancy = value; }
void set_response_threads(uint32_t threads_in) { _response_threads = threads_in; }
void set_rpc_events_before_wakeup(uint32_t value) { _rpc_events_before_wakeup = value; }
void set_rpc_network_threads(uint32_t threads_in) { _rpc_network_threads = threads_in; }
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.cpp
index 44b253b3d35..2ab1d4fd7f7 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.cpp
@@ -5,7 +5,12 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <cassert>
+using storage::lib::ClusterState;
using storage::lib::ClusterStateBundle;
+using storage::lib::Node;
+using storage::lib::NodeState;
+using storage::lib::NodeType;
+using storage::lib::State;
namespace search::bmcluster {
@@ -14,7 +19,7 @@ using DistributionConfigBuilder = BmDistribution::DistributionConfigBuilder;
namespace {
BmDistribution::DistributionConfig
-make_distribution_config(uint32_t num_nodes)
+make_distribution_config(uint32_t num_nodes, uint32_t redundancy)
{
DistributionConfigBuilder dc;
{
@@ -32,27 +37,28 @@ make_distribution_config(uint32_t num_nodes)
group.partitions = "";
dc.group.push_back(std::move(group));
}
- dc.redundancy = 1;
- dc.readyCopies = 1;
+ dc.redundancy = redundancy;
+ dc.readyCopies = redundancy;
return dc;
}
-ClusterStateBundle
-make_cluster_state_bundle(uint32_t num_nodes)
+ClusterState
+make_cluster_state(uint32_t num_nodes)
{
vespalib::asciistream s;
s << "version:2 distributor:" << num_nodes << " storage:" << num_nodes;
- storage::lib::ClusterStateBundle bundle(storage::lib::ClusterState(s.str()));
- return bundle;
+ return storage::lib::ClusterState(s.str());
}
}
-BmDistribution::BmDistribution(uint32_t num_nodes)
+BmDistribution::BmDistribution(uint32_t num_nodes, uint32_t redundancy)
: _num_nodes(num_nodes),
- _distribution_config(make_distribution_config(num_nodes)),
+ _distribution_config(make_distribution_config(num_nodes, redundancy)),
_distribution(_distribution_config),
- _cluster_state_bundle(make_cluster_state_bundle(num_nodes))
+ _pending_cluster_state(make_cluster_state(num_nodes)),
+ _cluster_state_bundle(_pending_cluster_state),
+ _has_pending_cluster_state(false)
{
}
@@ -95,4 +101,33 @@ BmDistribution::get_cluster_state_bundle() const
return _cluster_state_bundle;
}
+void
+BmDistribution::set_node_state(uint32_t node_idx, bool distributor, const State& state)
+{
+ const NodeType& node_type = distributor ? NodeType::DISTRIBUTOR : NodeType::STORAGE;
+ Node node(node_type, node_idx);
+ NodeState node_state(node_type, state);
+ _pending_cluster_state.setNodeState(node, node_state);
+ if (!_has_pending_cluster_state) {
+ _pending_cluster_state.setVersion(_pending_cluster_state.getVersion() + 1);
+ _has_pending_cluster_state = true;
+ }
+}
+
+void
+BmDistribution::set_node_state(uint32_t node_idx, const State& state)
+{
+ set_node_state(node_idx, false, state);
+ set_node_state(node_idx, true, state);
+}
+
+void
+BmDistribution::commit_cluster_state_change()
+{
+ if (_has_pending_cluster_state) {
+ _cluster_state_bundle = ClusterStateBundle(_pending_cluster_state);
+ _has_pending_cluster_state = false;
+ }
+}
+
};
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.h b/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.h
index fde89a0d766..6be7592f561 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_distribution.h
@@ -19,16 +19,20 @@ class BmDistribution : public IBmDistribution
uint32_t _num_nodes;
DistributionConfigBuilder _distribution_config;
storage::lib::Distribution _distribution;
+ storage::lib::ClusterState _pending_cluster_state;
storage::lib::ClusterStateBundle _cluster_state_bundle;
-
+ bool _has_pending_cluster_state;
public:
- BmDistribution(uint32_t num_nodes);
+ BmDistribution(uint32_t num_nodes, uint32_t redundancy);
~BmDistribution() override;
uint32_t get_num_nodes() const override;
uint32_t get_service_layer_node_idx(const document::Bucket & bucket) const override;
uint32_t get_distributor_node_idx(const document::Bucket & bucket) const override;
DistributionConfig get_distribution_config() const override;
storage::lib::ClusterStateBundle get_cluster_state_bundle() const override;
+ void set_node_state(uint32_t node_idx, bool distributor, const storage::lib::State& state);
+ void set_node_state(uint32_t node_idx, const storage::lib::State& state);
+ void commit_cluster_state_change();
};
};
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.cpp
index 4d464923efa..83956dfc274 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.cpp
@@ -16,11 +16,29 @@ using vespalib::makeLambdaTask;
namespace search::bmcluster {
+namespace {
+
+bool steady_buckets_stats(const std::optional<BmBucketsStats> buckets)
+{
+ if (!buckets.has_value()) {
+ return false; // No info available
+ }
+ auto& value = buckets.value();
+ if (!value.get_valid()) {
+ return false; // Some information still missing
+ }
+ return value.get_buckets_pending() == 0u;
+}
+
+}
+
BmNodeStatsReporter::BmNodeStatsReporter(BmCluster &cluster)
: _cluster(cluster),
_executor(1, 128_Ki),
_mutex(),
_cond(),
+ _change_time(),
+ _prev_node_stats(),
_pending_report(1u),
_started(false),
_stop(false)
@@ -99,6 +117,10 @@ BmNodeStatsReporter::report()
}
vespalib::string ss(s.str());
LOG(info, "%s", ss.c_str());
+ if (!(node_stats == _prev_node_stats) || !steady_buckets_stats(total_buckets)) {
+ _change_time = std::chrono::steady_clock::now();
+ _prev_node_stats = node_stats;
+ }
}
void
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.h b/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.h
index 356d35474d1..1c8f013f5e6 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node_stats_reporter.h
@@ -10,6 +10,7 @@
namespace search::bmcluster {
class BmCluster;
+class BmNodeStats;
/*
* Class handling background reporting of node stats during feed or
@@ -20,6 +21,8 @@ class BmNodeStatsReporter {
vespalib::ThreadStackExecutor _executor;
std::mutex _mutex;
std::condition_variable _cond;
+ std::chrono::time_point<std::chrono::steady_clock> _change_time;
+ std::vector<BmNodeStats> _prev_node_stats;
uint32_t _pending_report;
bool _started;
bool _stop;
@@ -32,6 +35,7 @@ public:
void start(std::chrono::milliseconds interval);
void stop();
void report_now();
+ std::chrono::time_point<std::chrono::steady_clock> get_change_time() const noexcept { return _change_time; }
};
}
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index daaab2b823a..43743c23629 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -435,18 +435,6 @@ initialize.threads int default = 0
## before put and update operations in feed is blocked.
writefilter.attribute.address_space_limit double default = 0.9
-## Portion of enumstore address space that can be used before put and update
-## portion of feed is blocked.
-## Deprecated -> Use address_space_limit
-## TODO: remove this when enum store is removed from AttributeUsageStats
-writefilter.attribute.enumstorelimit double default = 0.9
-
-## Portion of attribute multivalue mapping address space that can be used
-## before put and update portion of feed is blocked.
-## Deprecated -> Use address_space_limit
-## TODO: remove this when multi value is removed from AttributeUsageStats
-writefilter.attribute.multivaluelimit double default = 0.9
-
## Portion of physical memory that can be resident memory in anonymous mapping
## by the proton process before put and update portion of feed is blocked.
writefilter.memorylimit double default = 0.8
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.cpp
index f0ab56562a6..d89e273df27 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.cpp
@@ -9,9 +9,7 @@ using search::AddressSpaceComponents;
namespace proton {
AttributeUsageStats::AttributeUsageStats()
- : _enumStoreUsage(AddressSpaceComponents::default_enum_store_usage()),
- _multiValueUsage(AddressSpaceComponents::default_multi_value_usage()),
- _max_usage(vespalib::AddressSpace())
+ : _max_usage(vespalib::AddressSpace())
{
}
@@ -22,8 +20,6 @@ AttributeUsageStats::merge(const search::AddressSpaceUsage &usage,
const vespalib::string &attributeName,
const vespalib::string &subDbName)
{
- _enumStoreUsage.merge(usage.enum_store_usage(), attributeName, AddressSpaceComponents::enum_store, subDbName);
- _multiValueUsage.merge(usage.multi_value_usage(), attributeName, AddressSpaceComponents::multi_value, subDbName);
for (const auto& entry : usage.get_all()) {
_max_usage.merge(entry.second, attributeName, entry.first, subDbName);
}
@@ -32,9 +28,7 @@ AttributeUsageStats::merge(const search::AddressSpaceUsage &usage,
std::ostream&
operator<<(std::ostream& out, const AttributeUsageStats& rhs)
{
- out << "{enum_store=" << rhs.enumStoreUsage() <<
- ", multi_value=" << rhs.multiValueUsage() <<
- ", max_address_space_usage=" << rhs.max_address_space_usage() << "}";
+ out << "{max_address_space_usage=" << rhs.max_address_space_usage() << "}";
return out;
}
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.h b/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.h
index 762cc324f89..1411c626bfb 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.h
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attribute_usage_stats.h
@@ -8,14 +8,11 @@
namespace proton {
/**
- * Class representing aggregated attribute usage, with info about
- * the most bloated attributes with regards to enum store and
- * multivalue mapping.
+ * Class representing aggregated max address space usage
+ * among components in attributes vectors in all sub databases.
*/
class AttributeUsageStats
{
- AddressSpaceUsageStats _enumStoreUsage;
- AddressSpaceUsageStats _multiValueUsage;
AddressSpaceUsageStats _max_usage;
public:
@@ -25,14 +22,10 @@ public:
const vespalib::string &attributeName,
const vespalib::string &subDbName);
- const AddressSpaceUsageStats& enumStoreUsage() const { return _enumStoreUsage; }
- const AddressSpaceUsageStats& multiValueUsage() const { return _multiValueUsage; }
const AddressSpaceUsageStats& max_address_space_usage() const { return _max_usage; }
bool operator==(const AttributeUsageStats& rhs) const {
- return (_enumStoreUsage == rhs._enumStoreUsage) &&
- (_multiValueUsage == rhs._multiValueUsage) &&
- (_max_usage == rhs._max_usage);
+ return (_max_usage == rhs._max_usage);
}
};
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
index 7bd07dba505..d44f9ff6d2e 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
@@ -92,10 +92,6 @@ DocumentDBTaggedMetrics::AttributeMetrics::~AttributeMetrics() = default;
DocumentDBTaggedMetrics::AttributeMetrics::ResourceUsageMetrics::ResourceUsageMetrics(MetricSet *parent)
: MetricSet("resource_usage", {}, "Metrics for various attribute vector resources usage", parent),
- enumStore("enum_store", {}, "The highest relative amount of enum store address space used among "
- "all enumerated attribute vectors in this document db (value in the range [0, 1])", this),
- multiValue("multi_value", {}, "The highest relative amount of multi-value address space used among "
- "all multi-value attribute vectors in this document db (value in the range [0, 1])", this),
address_space("address_space", {}, "The max relative address space used among "
"components in all attribute vectors in this document db (value in the range [0, 1])", this),
feedingBlocked("feeding_blocked", {}, "Whether feeding is blocked due to attribute resource limits being reached (value is either 0 or 1)", this)
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
index 8d225115c37..04e16cd5cb7 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
@@ -86,8 +86,6 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet
{
struct ResourceUsageMetrics : metrics::MetricSet
{
- metrics::DoubleValueMetric enumStore;
- metrics::DoubleValueMetric multiValue;
metrics::DoubleValueMetric address_space;
metrics::LongValueMetric feedingBlocked;
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp
index 956e9ea198e..753bd1cd148 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp
@@ -304,11 +304,7 @@ DocumentDBMetricsUpdater::updateAttributeResourceUsageMetrics(DocumentDBTaggedMe
{
AttributeUsageStats stats = _writeFilter.getAttributeUsageStats();
bool feedBlocked = !_writeFilter.acceptWriteOperation();
- double enumStoreUsed = stats.enumStoreUsage().getUsage().usage();
- double multiValueUsed = stats.multiValueUsage().getUsage().usage();
double address_space_used = stats.max_address_space_usage().getUsage().usage();
- metrics.resourceUsage.enumStore.set(enumStoreUsed);
- metrics.resourceUsage.multiValue.set(multiValueUsed);
metrics.resourceUsage.address_space.set(address_space_used);
metrics.resourceUsage.feedingBlocked.set(feedBlocked ? 1 : 0);
}
diff --git a/searchlib/src/vespa/searchlib/attribute/address_space_usage.cpp b/searchlib/src/vespa/searchlib/attribute/address_space_usage.cpp
index da2e376719c..6783ea84354 100644
--- a/searchlib/src/vespa/searchlib/attribute/address_space_usage.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/address_space_usage.cpp
@@ -12,15 +12,6 @@ AddressSpaceUsage::AddressSpaceUsage()
{
}
-AddressSpaceUsage::AddressSpaceUsage(const AddressSpace& enum_store_usage,
- const AddressSpace& multi_value_usage)
- : _map()
-{
- // TODO: Remove this constructor and instead add usage for each relevant component explicit.
- set(AddressSpaceComponents::enum_store, enum_store_usage);
- set(AddressSpaceComponents::multi_value, multi_value_usage);
-}
-
void
AddressSpaceUsage::set(const vespalib::string& component, const vespalib::AddressSpace& usage)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/address_space_usage.h b/searchlib/src/vespa/searchlib/attribute/address_space_usage.h
index 9a92bb5d858..3fe24e39a14 100644
--- a/searchlib/src/vespa/searchlib/attribute/address_space_usage.h
+++ b/searchlib/src/vespa/searchlib/attribute/address_space_usage.h
@@ -20,8 +20,6 @@ private:
public:
AddressSpaceUsage();
- AddressSpaceUsage(const vespalib::AddressSpace& enum_store_usage,
- const vespalib::AddressSpace& multi_value_usage);
void set(const vespalib::string& component, const vespalib::AddressSpace& usage);
vespalib::AddressSpace get(const vespalib::string& component) const;
const AddressSpaceMap& get_all() const { return _map; }
diff --git a/slobrok/CMakeLists.txt b/slobrok/CMakeLists.txt
index c6c6313cf68..92fc393418f 100644
--- a/slobrok/CMakeLists.txt
+++ b/slobrok/CMakeLists.txt
@@ -22,6 +22,7 @@ vespa_define_module(
src/tests/local_rpc_monitor_map
src/tests/mirrorapi
src/tests/registerapi
+ src/tests/rpc_mapping_monitor
src/tests/service_map_history
src/tests/service_map_mirror
src/tests/standalone
diff --git a/slobrok/src/tests/rpc_mapping_monitor/CMakeLists.txt b/slobrok/src/tests/rpc_mapping_monitor/CMakeLists.txt
new file mode 100644
index 00000000000..a5de3338309
--- /dev/null
+++ b/slobrok/src/tests/rpc_mapping_monitor/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(slobrok_rpc_mapping_monitor_test_app TEST
+ SOURCES
+ rpc_mapping_monitor_test.cpp
+ DEPENDS
+ slobrok_slobrokserver
+ GTest::GTest
+)
+vespa_add_test(NAME slobrok_rpc_mapping_monitor_test_app COMMAND slobrok_rpc_mapping_monitor_test_app)
diff --git a/slobrok/src/tests/rpc_mapping_monitor/rpc_mapping_monitor_test.cpp b/slobrok/src/tests/rpc_mapping_monitor/rpc_mapping_monitor_test.cpp
new file mode 100644
index 00000000000..f503453f934
--- /dev/null
+++ b/slobrok/src/tests/rpc_mapping_monitor/rpc_mapping_monitor_test.cpp
@@ -0,0 +1,224 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/slobrok/server/rpc_mapping_monitor.h>
+#include <vespa/fnet/transport_debugger.h>
+#include <vespa/fnet/transport_thread.h>
+#include <vespa/fnet/frt/supervisor.h>
+#include <vespa/vespalib/util/require.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <map>
+
+using namespace vespalib;
+using namespace slobrok;
+using vespalib::make_string_short::fmt;
+
+// simple rpc server implementing the required slobrok call-back API
+struct Server : FRT_Invokable {
+ fnet::frt::StandaloneFRT frt;
+ std::vector<vespalib::string> names;
+ size_t inject_fail_cnt;
+ FNET_Connection *last_conn;
+ void set_last_conn(FNET_Connection *conn) {
+ if (last_conn) {
+ last_conn->SubRef();
+ }
+ last_conn = conn;
+ if (last_conn) {
+ last_conn->AddRef();
+ }
+ }
+ Server(fnet::TimeTools::SP time_tools) : frt(TransportConfig().time_tools(time_tools)), names(),
+ inject_fail_cnt(0), last_conn(nullptr)
+ {
+ FRT_ReflectionBuilder rb(&frt.supervisor());
+ rb.DefineMethod("slobrok.callback.listNamesServed", "", "S", FRT_METHOD(Server::rpc_listNamesServed), this);
+ rb.DefineMethod("slobrok.callback.notifyUnregistered", "s", "", FRT_METHOD(Server::rpc_notifyUnregistered), this);
+ REQUIRE(frt.supervisor().Listen(0));
+ }
+ ~Server() { set_last_conn(nullptr); }
+ vespalib::string spec() const { return fmt("tcp/%d", frt.supervisor().GetListenPort()); }
+ FNET_Transport &transport() { return *frt.supervisor().GetTransport(); }
+ void rpc_listNamesServed(FRT_RPCRequest *req) {
+ set_last_conn(req->GetConnection());
+ if (inject_fail_cnt > 0) {
+ req->SetError(FRTE_RPC_METHOD_FAILED, "fail injected by unit test");
+ --inject_fail_cnt;
+ } else {
+ FRT_Values &dst = *req->GetReturn();
+ FRT_StringValue *names_out = dst.AddStringArray(names.size());
+ for (size_t i = 0; i < names.size(); ++i) {
+ dst.SetString(&names_out[i], names[i].c_str());
+ }
+ }
+ }
+ void rpc_notifyUnregistered(FRT_RPCRequest *) {}
+};
+
+enum class State { ANY, UP, DOWN };
+
+// Run-Length-Encoded historic state samples for a single service mapping
+struct States {
+ struct Entry {
+ State state;
+ size_t cnt;
+ };
+ std::vector<Entry> hist;
+ State state() const { return hist.back().state; }
+ States() : hist({{State::ANY, 0}}) {}
+ void sample(State state) {
+ if (state == hist.back().state) {
+ ++hist.back().cnt;
+ } else {
+ hist.push_back(Entry{state, 1});
+ }
+ }
+ size_t samples(State state = State::ANY) const {
+ size_t n = 0;
+ for (const auto &entry: hist) {
+ if ((entry.state == state) || (state == State::ANY)) {
+ n += entry.cnt;
+ }
+ }
+ return n;
+ }
+};
+
+// history of which call-backs we have gotten so far
+struct History : MappingMonitorOwner {
+ std::map<ServiceMapping, States> map;
+ void up(const ServiceMapping &mapping) override { map[mapping].sample(State::UP); }
+ void down(const ServiceMapping &mapping) override { map[mapping].sample(State::DOWN); }
+};
+
+struct RpcMappingMonitorTest : public ::testing::Test {
+ fnet::TransportDebugger debugger;
+ fnet::frt::StandaloneFRT my_frt;
+ Server a;
+ Server b;
+ History hist;
+ std::unique_ptr<RpcMappingMonitor> monitor;
+ ServiceMapping foo_a;
+ ServiceMapping bar_a;
+ ServiceMapping baz_b;
+ RpcMappingMonitorTest()
+ : debugger(),
+ my_frt(TransportConfig().time_tools(debugger.time_tools())),
+ a(debugger.time_tools()),
+ b(debugger.time_tools()),
+ hist(),
+ monitor(),
+ foo_a("foo", a.spec()),
+ bar_a("bar", a.spec()),
+ baz_b("baz", b.spec())
+ {
+ debugger.attach({*my_frt.supervisor().GetTransport(), a.transport(), b.transport()});
+ monitor = std::make_unique<RpcMappingMonitor>(my_frt.supervisor(), hist);
+ a.names.push_back(foo_a.name);
+ a.names.push_back(bar_a.name);
+ b.names.push_back(baz_b.name);
+ }
+ ~RpcMappingMonitorTest() {
+ monitor.reset();
+ debugger.detach();
+ }
+};
+
+TEST_F(RpcMappingMonitorTest, services_can_be_monitored) {
+ monitor->start(foo_a, false);
+ monitor->start(bar_a, false);
+ monitor->start(baz_b, false);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return ((hist.map[foo_a].samples() >= 3) &&
+ (hist.map[bar_a].samples() >= 3) &&
+ (hist.map[baz_b].samples() >= 3)); }));
+ EXPECT_EQ(hist.map[foo_a].samples(State::DOWN), 0);
+ EXPECT_EQ(hist.map[bar_a].samples(State::DOWN), 0);
+ EXPECT_EQ(hist.map[baz_b].samples(State::DOWN), 0);
+}
+
+TEST_F(RpcMappingMonitorTest, hurry_means_faster) {
+ monitor->start(foo_a, false);
+ monitor->start(baz_b, true);
+ auto t0 = debugger.time();
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return ((hist.map[baz_b].samples() > 0)); }));
+ EXPECT_EQ(hist.map[foo_a].samples(), 0);
+ auto t1 = debugger.time();
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return ((hist.map[foo_a].samples() > 0)); }));
+ auto t2 = debugger.time();
+ fprintf(stderr, "hurry: ~%zu ms, normal: ~%zu ms\n", count_ms(t1-t0), count_ms(t2-t0));
+ EXPECT_GT((t2 - t0), 10 * (t1 - t0));
+ EXPECT_EQ(hist.map[foo_a].state(), State::UP);
+ EXPECT_EQ(hist.map[baz_b].state(), State::UP);
+}
+
+TEST_F(RpcMappingMonitorTest, stop_means_stop) {
+ monitor->start(foo_a, false);
+ monitor->start(baz_b, true);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return ((hist.map[baz_b].samples() == 1)); }));
+ monitor->stop(baz_b);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return ((hist.map[foo_a].samples() == 3)); }));
+ EXPECT_EQ(hist.map[baz_b].samples(), 1);
+ EXPECT_EQ(hist.map[foo_a].state(), State::UP);
+ EXPECT_EQ(hist.map[baz_b].state(), State::UP);
+}
+
+TEST_F(RpcMappingMonitorTest, health_checks_may_fail) {
+ ServiceMapping bad_spec("foo", "this spec is invalid");
+ ServiceMapping failed_ping("foo", a.spec());
+ ServiceMapping missing_name("foo", b.spec());
+ a.inject_fail_cnt = 2;
+ monitor->start(bad_spec, true);
+ monitor->start(failed_ping, true);
+ monitor->start(missing_name, true);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return (hist.map[failed_ping].state() == State::UP); }));
+ EXPECT_EQ(hist.map[bad_spec].state(), State::DOWN);
+ EXPECT_EQ(hist.map[missing_name].state(), State::DOWN);
+ EXPECT_EQ(hist.map[failed_ping].samples(State::DOWN), 2);
+ EXPECT_EQ(hist.map[bad_spec].samples(State::UP), 0);
+ EXPECT_EQ(hist.map[missing_name].samples(State::UP), 0);
+}
+
+TEST_F(RpcMappingMonitorTest, loss_of_idle_connection_is_detected_and_recovered) {
+ monitor->start(foo_a, true);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return (hist.map[foo_a].state() == State::UP); }));
+ ASSERT_TRUE(a.last_conn);
+ a.last_conn->Owner()->Close(a.last_conn);
+ a.set_last_conn(nullptr);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return (hist.map[foo_a].state() == State::DOWN); }));
+ // down without new rpc check, will re-connect and come back up
+ EXPECT_FALSE(a.last_conn);
+ EXPECT_TRUE(debugger.step_until([&]() {
+ return (hist.map[foo_a].state() == State::UP); }));
+ EXPECT_EQ(hist.map[foo_a].samples(State::DOWN), 1);
+}
+
+TEST_F(RpcMappingMonitorTest, up_connection_is_reused) {
+ monitor->start(foo_a, true);
+ EXPECT_TRUE(debugger.step_until([&]() { return (a.last_conn); }));
+ auto my_conn = a.last_conn;
+ a.last_conn = nullptr;
+ EXPECT_TRUE(debugger.step_until([&]() { return (a.last_conn); }));
+ EXPECT_EQ(a.last_conn, my_conn);
+ my_conn->SubRef();
+ EXPECT_EQ(hist.map[foo_a].state(), State::UP);
+}
+
+TEST_F(RpcMappingMonitorTest, detect_ping_interval) {
+ monitor->start(foo_a, true);
+ EXPECT_TRUE(debugger.step_until([&]() { return (a.last_conn); }));
+ auto t1 = debugger.time();
+ a.set_last_conn(nullptr);
+ EXPECT_TRUE(debugger.step_until([&]() { return (a.last_conn); }));
+ auto t2 = debugger.time();
+ fprintf(stderr, "ping interval: ~%zu ms\n", count_ms(t2-t1));
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/slobrok/src/vespa/slobrok/server/mapping_monitor.h b/slobrok/src/vespa/slobrok/server/mapping_monitor.h
index 4ac89e7521a..1a83b32f87f 100644
--- a/slobrok/src/vespa/slobrok/server/mapping_monitor.h
+++ b/slobrok/src/vespa/slobrok/server/mapping_monitor.h
@@ -12,7 +12,7 @@ struct MappingMonitorOwner {
virtual void up(const ServiceMapping& mapping) = 0;
virtual void down(const ServiceMapping& mapping) = 0;
protected:
- ~MappingMonitorOwner() = default;
+ virtual ~MappingMonitorOwner() = default;
};
struct MappingMonitor {
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index 67a7fed8d0b..5a5422156d1 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -15,15 +15,12 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
distributor_stripe_pool_test.cpp
distributor_stripe_test.cpp
distributor_stripe_test_util.cpp
- distributortestutil.cpp
externaloperationhandlertest.cpp
garbagecollectiontest.cpp
getoperationtest.cpp
gtest_runner.cpp
idealstatemanagertest.cpp
joinbuckettest.cpp
- legacy_bucket_db_updater_test.cpp
- legacy_distributor_test.cpp
maintenanceschedulertest.cpp
mergelimitertest.cpp
mergeoperationtest.cpp
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.cpp b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
index 043f996a4a1..4b7e73d3e43 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
@@ -20,7 +20,6 @@ namespace storage::distributor {
DistributorStripeTestUtil::DistributorStripeTestUtil()
: _config(),
_node(),
- _threadPool(),
_stripe(),
_sender(),
_senderDown(),
@@ -37,18 +36,14 @@ void
DistributorStripeTestUtil::createLinks()
{
_node.reset(new TestDistributorApp(_config.getConfigId()));
- _threadPool = framework::TickingThreadPool::createDefault("distributor");
_metrics = std::make_shared<DistributorMetricSet>();
_ideal_state_metrics = std::make_shared<IdealStateMetricSet>();
_stripe = std::make_unique<DistributorStripe>(_node->getComponentRegister(),
*_metrics,
*_ideal_state_metrics,
_node->node_identity(),
- *_threadPool,
- *this,
_messageSender,
*this,
- false,
_done_initializing);
}
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.h b/storage/src/tests/distributor/distributor_stripe_test_util.h
index ccade98fd01..e73d1a3baa1 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.h
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.h
@@ -212,7 +212,6 @@ public:
protected:
vdstestlib::DirConfig _config;
std::unique_ptr<TestDistributorApp> _node;
- std::unique_ptr<framework::TickingThreadPool> _threadPool;
std::shared_ptr<DistributorMetricSet> _metrics;
std::shared_ptr<IdealStateMetricSet> _ideal_state_metrics;
std::unique_ptr<DistributorStripe> _stripe;
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
deleted file mode 100644
index 1e24bd72c9b..00000000000
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "distributortestutil.h"
-#include <vespa/config-stor-distribution.h>
-#include <vespa/document/test/make_bucket_space.h>
-#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/storage/distributor/top_level_distributor.h>
-#include <vespa/storage/distributor/distributor_bucket_space.h>
-#include <vespa/storage/distributor/distributor_stripe.h>
-#include <vespa/storage/distributor/distributor_stripe_component.h>
-#include <vespa/storage/distributor/distributor_stripe_pool.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vespalib/text/stringtokenizer.h>
-
-using document::test::makeBucketSpace;
-using document::test::makeDocumentBucket;
-
-namespace storage::distributor {
-
-DistributorTestUtil::DistributorTestUtil()
- : _messageSender(_sender, _senderDown),
- _num_distributor_stripes(0) // TODO STRIPE change default
-{
- _config = getStandardConfig(false);
-}
-DistributorTestUtil::~DistributorTestUtil() { }
-
-void
-DistributorTestUtil::createLinks()
-{
- _node.reset(new TestDistributorApp(_config.getConfigId()));
- _threadPool = framework::TickingThreadPool::createDefault("distributor");
- _stripe_pool = std::make_unique<DistributorStripePool>();
- _distributor.reset(new TopLevelDistributor(
- _node->getComponentRegister(),
- _node->node_identity(),
- *_threadPool,
- *_stripe_pool,
- *this,
- _num_distributor_stripes,
- _hostInfo,
- &_messageSender));
- _component.reset(new storage::DistributorComponent(_node->getComponentRegister(), "distrtestutil"));
-};
-
-void
-DistributorTestUtil::setupDistributor(int redundancy,
- int nodeCount,
- const std::string& systemState,
- uint32_t earlyReturn,
- bool requirePrimaryToBeWritten)
-{
- setup_distributor(redundancy, nodeCount, lib::ClusterStateBundle(lib::ClusterState(systemState)), earlyReturn, requirePrimaryToBeWritten);
-}
-
-void
-DistributorTestUtil::setup_distributor(int redundancy,
- int node_count,
- const lib::ClusterStateBundle& state,
- uint32_t early_return,
- bool require_primary_to_be_written)
-{
- lib::Distribution::DistributionConfigBuilder config(
- lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
- config.redundancy = redundancy;
- config.initialRedundancy = early_return;
- config.ensurePrimaryPersisted = require_primary_to_be_written;
- auto distribution = std::make_shared<lib::Distribution>(config);
- _node->getComponentRegister().setDistribution(distribution);
- enable_distributor_cluster_state(state);
- // This is for all intents and purposes a hack to avoid having the
- // distributor treat setting the distribution explicitly as a signal that
- // it should send RequestBucketInfo to all configured nodes.
- // If we called storage_distribution_changed followed by enableDistribution
- // explicitly (which is what happens in "real life"), that is what would
- // take place.
- // The inverse case of this can be explicitly accomplished by calling
- // triggerDistributionChange().
- // This isn't pretty, folks, but it avoids breaking the world for now,
- // as many tests have implicit assumptions about this being the behavior.
- _distributor->propagateDefaultDistribution(distribution);
-}
-
-void
-DistributorTestUtil::setRedundancy(uint32_t redundancy)
-{
- auto distribution = std::make_shared<lib::Distribution>(
- lib::Distribution::getDefaultDistributionConfig(
- redundancy, 100));
- // Same rationale for not triggering a full distribution change as
- // in setupDistributor()
- _node->getComponentRegister().setDistribution(distribution);
- _distributor->propagateDefaultDistribution(std::move(distribution));
-}
-
-void
-DistributorTestUtil::triggerDistributionChange(lib::Distribution::SP distr)
-{
- _node->getComponentRegister().setDistribution(std::move(distr));
- _distributor->storageDistributionChanged();
- _distributor->enableNextDistribution();
-}
-
-void
-DistributorTestUtil::receive_set_system_state_command(const vespalib::string& state_str)
-{
- auto state_cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(state_str));
- _distributor->handleMessage(state_cmd); // TODO move semantics
-}
-
-void
-DistributorTestUtil::handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg)
-{
- _distributor->handleMessage(msg);
-}
-
-void
-DistributorTestUtil::setTypeRepo(const std::shared_ptr<const document::DocumentTypeRepo> &repo)
-{
- _node->getComponentRegister().setDocumentTypeRepo(repo);
-}
-
-void
-DistributorTestUtil::close()
-{
- _component.reset(0);
- if (_distributor.get()) {
- _distributor->onClose();
- }
- _sender.clear();
- _node.reset(0);
- _config = getStandardConfig(false);
-}
-
-namespace {
- std::string dumpVector(const std::vector<uint16_t>& vec) {
- std::ostringstream ost;
- for (uint32_t i = 0; i < vec.size(); ++i) {
- if (i != 0) {
- ost << ",";
- }
- ost << vec[i];
- }
- return ost.str();
- }
-}
-
-std::string
-DistributorTestUtil::getNodes(document::BucketId id)
-{
- BucketDatabase::Entry entry = getBucket(id);
-
- if (!entry.valid()) {
- return id.toString();
- } else {
- std::vector<uint16_t> nodes = entry->getNodes();
- std::sort(nodes.begin(), nodes.end());
-
- std::ostringstream ost;
- ost << id << ": " << dumpVector(nodes);
- return ost.str();
- }
-}
-
-std::string
-DistributorTestUtil::getIdealStr(document::BucketId id, const lib::ClusterState& state)
-{
- if (!getDistributorBucketSpace().owns_bucket_in_state(state, id)) {
- return id.toString();
- }
-
- std::vector<uint16_t> nodes;
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, nodes);
- std::sort(nodes.begin(), nodes.end());
- std::ostringstream ost;
- ost << id << ": " << dumpVector(nodes);
- return ost.str();
-}
-
-void
-DistributorTestUtil::addIdealNodes(const lib::ClusterState& state,
- const document::BucketId& id)
-{
- BucketDatabase::Entry entry = getBucket(id);
-
- if (!entry.valid()) {
- entry = BucketDatabase::Entry(id);
- }
-
- std::vector<uint16_t> res;
- assert(_component.get());
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, res);
-
- for (uint32_t i = 0; i < res.size(); ++i) {
- if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
- lib::State::MAINTENANCE)
- {
- entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
- toVector<uint16_t>(0));
- }
- }
-
- getBucketDatabase().update(entry);
-}
-
-void DistributorTestUtil::addNodesToBucketDB(const document::Bucket& bucket, const std::string& nodeStr) {
- BucketDatabase::Entry entry = getBucket(bucket);
-
- if (!entry.valid()) {
- entry = BucketDatabase::Entry(bucket.getBucketId());
- }
-
- entry->clear();
-
- vespalib::StringTokenizer tokenizer(nodeStr, ",");
- for (uint32_t i = 0; i < tokenizer.size(); ++i) {
- vespalib::StringTokenizer tok2(tokenizer[i], "=");
- vespalib::StringTokenizer tok3(tok2[1], "/");
-
- api::BucketInfo info(atoi(tok3[0].data()),
- atoi(tok3.size() > 1 ? tok3[1].data() : tok3[0].data()),
- atoi(tok3.size() > 2 ? tok3[2].data() : tok3[0].data()));
-
- size_t flagsIdx = 3;
-
- // Meta info override? For simplicity, require both meta count and size
- if (tok3.size() > 4 && (!tok3[3].empty() && isdigit(tok3[3][0]))) {
- info.setMetaCount(atoi(tok3[3].data()));
- info.setUsedFileSize(atoi(tok3[4].data()));
- flagsIdx = 5;
- }
-
- if ((tok3.size() > flagsIdx + 1) && tok3[flagsIdx + 1] == "a") {
- info.setActive();
- } else {
- info.setActive(false);
- }
- if ((tok3.size() > flagsIdx + 2) && tok3[flagsIdx + 2] == "r") {
- info.setReady();
- } else {
- info.setReady(false);
- }
-
- uint16_t idx = atoi(tok2[0].data());
- BucketCopy node(
- 0,
- idx,
- info);
-
- // Allow user to manually override trusted and active.
- if (tok3.size() > flagsIdx && tok3[flagsIdx] == "t") {
- node.setTrusted();
- }
-
- entry->addNodeManual(node);
- }
-
- getBucketDatabase(bucket.getBucketSpace()).update(entry);
-}
-
-void
-DistributorTestUtil::addNodesToBucketDB(const document::BucketId& id,
- const std::string& nodeStr)
-{
- addNodesToBucketDB(document::Bucket(makeBucketSpace(), id), nodeStr);
-}
-
-void
-DistributorTestUtil::removeFromBucketDB(const document::BucketId& id)
-{
- getBucketDatabase().remove(id);
-}
-
-void
-DistributorTestUtil::addIdealNodes(const document::BucketId& id)
-{
- // TODO STRIPE roundabout way of getting state bundle..!
- addIdealNodes(*operation_context().cluster_state_bundle().getBaselineClusterState(), id);
-}
-
-void
-DistributorTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- uint32_t checksum,
- uint32_t count,
- uint32_t size,
- bool trusted,
- bool active)
-{
- api::BucketInfo info(checksum, count, size);
- insertBucketInfo(id, node, info, trusted, active);
-}
-
-void
-DistributorTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- const api::BucketInfo& info,
- bool trusted,
- bool active)
-{
- BucketDatabase::Entry entry = getBucketDatabase().get(id);
- if (!entry.valid()) {
- entry = BucketDatabase::Entry(id, BucketInfo());
- }
-
- api::BucketInfo info2(info);
- if (active) {
- info2.setActive();
- }
- BucketCopy copy(operation_context().generate_unique_timestamp(), node, info2);
-
- entry->addNode(copy.setTrusted(trusted), toVector<uint16_t>(0));
-
- getBucketDatabase().update(entry);
-}
-
-std::string
-DistributorTestUtil::dumpBucket(const document::BucketId& bid)
-{
- return getBucketDatabase().get(bid).toString();
-}
-
-void
-DistributorTestUtil::sendReply(Operation& op,
- int idx,
- api::ReturnCode::Result result)
-{
- if (idx == -1) {
- idx = _sender.commands().size() - 1;
- }
- assert(idx >= 0 && idx < static_cast<int>(_sender.commands().size()));
-
- std::shared_ptr<api::StorageCommand> cmd = _sender.command(idx);
- api::StorageReply::SP reply(cmd->makeReply().release());
- reply->setResult(result);
- op.receive(_sender, reply);
-}
-
-BucketDatabase::Entry DistributorTestUtil::getBucket(const document::Bucket& bucket) const {
- return getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId());
-}
-
-BucketDatabase::Entry
-DistributorTestUtil::getBucket(const document::BucketId& bId) const
-{
- return getBucketDatabase().get(bId);
-}
-
-void
-DistributorTestUtil::disableBucketActivationInConfig(bool disable)
-{
- vespa::config::content::core::StorDistributormanagerConfigBuilder config;
- config.disableBucketActivation = disable;
- getConfig().configure(config);
-}
-
-StripeBucketDBUpdater&
-DistributorTestUtil::getBucketDBUpdater() {
- return _distributor->bucket_db_updater();
-}
-IdealStateManager&
-DistributorTestUtil::getIdealStateManager() {
- return _distributor->ideal_state_manager();
-}
-ExternalOperationHandler&
-DistributorTestUtil::getExternalOperationHandler() {
- return _distributor->external_operation_handler();
-}
-
-const storage::distributor::DistributorNodeContext&
-DistributorTestUtil::node_context() const {
- return _distributor->distributor_component();
-}
-
-storage::distributor::DistributorStripeOperationContext&
-DistributorTestUtil::operation_context() {
- return _distributor->distributor_component();
-}
-
-const DocumentSelectionParser&
-DistributorTestUtil::doc_selection_parser() const {
- return _distributor->distributor_component();
-}
-
-bool
-DistributorTestUtil::tick() {
- framework::ThreadWaitInfo res(
- framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN);
- {
- framework::TickingLockGuard lock(
- _distributor->_threadPool.freezeCriticalTicks());
- res.merge(_distributor->doCriticalTick(0));
- }
- res.merge(_distributor->doNonCriticalTick(0));
- return !res.waitWanted();
-}
-
-DistributorConfiguration&
-DistributorTestUtil::getConfig() {
- // TODO STRIPE avoid const cast
- return const_cast<DistributorConfiguration&>(_distributor->getConfig());
-}
-
-DistributorBucketSpace &
-DistributorTestUtil::getDistributorBucketSpace()
-{
- return getBucketSpaceRepo().get(makeBucketSpace());
-}
-
-BucketDatabase&
-DistributorTestUtil::getBucketDatabase() {
- return getDistributorBucketSpace().getBucketDatabase();
-}
-
-BucketDatabase& DistributorTestUtil::getBucketDatabase(document::BucketSpace space) {
- return getBucketSpaceRepo().get(space).getBucketDatabase();
-}
-
-const BucketDatabase&
-DistributorTestUtil::getBucketDatabase() const {
- return getBucketSpaceRepo().get(makeBucketSpace()).getBucketDatabase();
-}
-
-const BucketDatabase& DistributorTestUtil::getBucketDatabase(document::BucketSpace space) const {
- return getBucketSpaceRepo().get(space).getBucketDatabase();
-}
-
-DistributorBucketSpaceRepo &
-DistributorTestUtil::getBucketSpaceRepo() {
- return _distributor->getBucketSpaceRepo();
-}
-
-const DistributorBucketSpaceRepo &
-DistributorTestUtil::getBucketSpaceRepo() const {
- return _distributor->getBucketSpaceRepo();
-}
-
-DistributorBucketSpaceRepo &
-DistributorTestUtil::getReadOnlyBucketSpaceRepo() {
- return _distributor->getReadOnlyBucketSpaceRepo();
-}
-
-const DistributorBucketSpaceRepo &
-DistributorTestUtil::getReadOnlyBucketSpaceRepo() const {
- return _distributor->getReadOnlyBucketSpaceRepo();
-}
-
-bool
-DistributorTestUtil::distributor_is_in_recovery_mode() const noexcept {
- return _distributor->isInRecoveryMode();
-}
-
-const lib::ClusterStateBundle&
-DistributorTestUtil::current_distributor_cluster_state_bundle() const noexcept {
- return getDistributor().getClusterStateBundle();
-}
-
-std::string
-DistributorTestUtil::active_ideal_state_operations() const {
- return _distributor->getActiveIdealStateOperations();
-}
-
-const PendingMessageTracker&
-DistributorTestUtil::pending_message_tracker() const noexcept {
- return _distributor->getPendingMessageTracker();
-}
-
-PendingMessageTracker&
-DistributorTestUtil::pending_message_tracker() noexcept {
- return _distributor->getPendingMessageTracker();
-}
-
-std::chrono::steady_clock::duration
-DistributorTestUtil::db_memory_sample_interval() const noexcept {
- return _distributor->db_memory_sample_interval();
-}
-
-const lib::Distribution&
-DistributorTestUtil::getDistribution() const {
- return getBucketSpaceRepo().get(makeBucketSpace()).getDistribution();
-}
-
-std::vector<document::BucketSpace>
-DistributorTestUtil::getBucketSpaces() const
-{
- std::vector<document::BucketSpace> res;
- for (const auto &repo : getBucketSpaceRepo()) {
- res.push_back(repo.first);
- }
- return res;
-}
-
-void
-DistributorTestUtil::enableDistributorClusterState(vespalib::stringref state)
-{
- getBucketDBUpdater().simulate_cluster_state_bundle_activation(
- lib::ClusterStateBundle(lib::ClusterState(state)));
-}
-
-void
-DistributorTestUtil::enable_distributor_cluster_state(const lib::ClusterStateBundle& state)
-{
- getBucketDBUpdater().simulate_cluster_state_bundle_activation(state);
-}
-
-void
-DistributorTestUtil::setSystemState(const lib::ClusterState& systemState) {
- _distributor->enableClusterStateBundle(lib::ClusterStateBundle(systemState));
-}
-
-}
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
deleted file mode 100644
index 9457bfeba83..00000000000
--- a/storage/src/tests/distributor/distributortestutil.h
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include "distributor_message_sender_stub.h"
-#include <tests/common/dummystoragelink.h>
-#include <tests/common/testhelper.h>
-#include <tests/common/teststorageapp.h>
-#include <vespa/storage/common/hostreporter/hostinfo.h>
-#include <vespa/storage/frameworkimpl/component/distributorcomponentregisterimpl.h>
-#include <vespa/storage/storageutil/utils.h>
-#include <vespa/storageapi/message/state.h>
-#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
-
-namespace storage {
-
-namespace framework { struct TickingThreadPool; }
-
-namespace distributor {
-
-class TopLevelDistributor;
-class DistributorBucketSpace;
-class DistributorBucketSpaceRepo;
-class DistributorNodeContext;
-class DistributorStripe;
-class DistributorStripeComponent;
-class DistributorStripeOperationContext;
-class DistributorStripePool;
-class DocumentSelectionParser;
-class ExternalOperationHandler;
-class IdealStateManager;
-class Operation;
-class StripeBucketDBUpdater;
-
-// TODO STRIPE rename to DistributorStripeTestUtil?
-class DistributorTestUtil : private DoneInitializeHandler
-{
-public:
- DistributorTestUtil();
- ~DistributorTestUtil();
-
- /**
- * Sets up the storage link chain.
- */
- void createLinks();
- void setTypeRepo(const std::shared_ptr<const document::DocumentTypeRepo> &repo);
-
- void close();
-
- /**
- * Returns a string with the nodes currently stored in the bucket
- * database for the given bucket.
- */
- std::string getNodes(document::BucketId id);
-
- /**
- * Returns a string with the ideal state nodes for the given bucket.
- */
- std::string getIdealStr(document::BucketId id, const lib::ClusterState& state);
-
- /**
- * Adds the ideal nodes for the given bucket and the given cluster state
- * to the bucket database.
- */
- void addIdealNodes(const lib::ClusterState& state, const document::BucketId& id);
-
- /**
- * Adds all the ideal nodes for the given bucket to the bucket database.
- */
- void addIdealNodes(const document::BucketId& id);
-
- /**
- * Parses the given string to a set of node => bucket info data,
- * and inserts them as nodes in the given bucket.
- * Format:
- * "node1=checksum/docs/size,node2=checksum/docs/size"
- */
- void addNodesToBucketDB(const document::Bucket& bucket, const std::string& nodeStr);
- // As the above, but always inserts into default bucket space
- void addNodesToBucketDB(const document::BucketId& id, const std::string& nodeStr);
-
- /**
- * Removes the given bucket from the bucket database.
- */
- void removeFromBucketDB(const document::BucketId& id);
-
- /**
- * Inserts the given bucket information for the given bucket and node in
- * the bucket database.
- */
- void insertBucketInfo(document::BucketId id,
- uint16_t node,
- uint32_t checksum,
- uint32_t count,
- uint32_t size,
- bool trusted = false,
- bool active = false);
-
- /**
- * Inserts the given bucket information for the given bucket and node in
- * the bucket database.
- */
- void insertBucketInfo(document::BucketId id,
- uint16_t node,
- const api::BucketInfo& info,
- bool trusted = false,
- bool active = false);
-
- std::string dumpBucket(const document::BucketId& bucket);
-
- /**
- * Replies to message idx sent upwards with the given result code.
- * If idx = -1, replies to the last command received upwards.
- */
- void sendReply(Operation& op,
- int idx = -1,
- api::ReturnCode::Result result = api::ReturnCode::OK);
-
- StripeBucketDBUpdater& getBucketDBUpdater();
- IdealStateManager& getIdealStateManager();
- ExternalOperationHandler& getExternalOperationHandler();
- const storage::distributor::DistributorNodeContext& node_context() const;
- storage::distributor::DistributorStripeOperationContext& operation_context();
- const DocumentSelectionParser& doc_selection_parser() const;
-
- TopLevelDistributor& getDistributor() noexcept { return *_distributor; }
- const TopLevelDistributor& getDistributor() const noexcept { return *_distributor; }
-
- bool tick();
-
- DistributorConfiguration& getConfig();
-
- vdstestlib::DirConfig& getDirConfig() {
- return _config;
- }
-
- // TODO explicit notion of bucket spaces for tests
- DistributorBucketSpace &getDistributorBucketSpace();
- BucketDatabase& getBucketDatabase(); // Implicit default space only
- BucketDatabase& getBucketDatabase(document::BucketSpace space);
- const BucketDatabase& getBucketDatabase() const; // Implicit default space only
- const BucketDatabase& getBucketDatabase(document::BucketSpace space) const;
- DistributorBucketSpaceRepo &getBucketSpaceRepo();
- const DistributorBucketSpaceRepo &getBucketSpaceRepo() const;
- DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo();
- const DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo() const;
- [[nodiscard]] bool distributor_is_in_recovery_mode() const noexcept;
- [[nodiscard]] const lib::ClusterStateBundle& current_distributor_cluster_state_bundle() const noexcept;
- [[nodiscard]] std::string active_ideal_state_operations() const;
- [[nodiscard]] const PendingMessageTracker& pending_message_tracker() const noexcept;
- [[nodiscard]] PendingMessageTracker& pending_message_tracker() noexcept;
- [[nodiscard]] std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
-
- const lib::Distribution& getDistribution() const;
- // "End to end" distribution change trigger, which will invoke the bucket
- // DB updater as expected based on the previous and new cluster state
- // and config.
- void triggerDistributionChange(std::shared_ptr<lib::Distribution> distr);
-
- framework::defaultimplementation::FakeClock& getClock() { return _node->getClock(); }
- DistributorComponentRegister& getComponentRegister() { return _node->getComponentRegister(); }
- DistributorComponentRegisterImpl& getComponentRegisterImpl() { return _node->getComponentRegister(); }
-
- StorageComponent& getComponent() {
- if (_component.get() == 0) {
- _component.reset(new storage::DistributorComponent(
- _node->getComponentRegister(), "distributor_test_utils"));
- }
- return *_component;
- }
-
- void setupDistributor(int redundancy,
- int nodeCount,
- const std::string& systemState,
- uint32_t earlyReturn = false,
- bool requirePrimaryToBeWritten = true);
-
- void setup_distributor(int redundancy,
- int node_count,
- const lib::ClusterStateBundle& state,
- uint32_t early_return = false,
- bool require_primary_to_be_written = true);
-
- void setRedundancy(uint32_t redundancy);
-
- void notifyDoneInitializing() override {}
-
- // Must implement this for storage server interface for now
- virtual api::Timestamp getUniqueTimestamp() {
- return _component->getUniqueTimestamp();
- }
-
- void disableBucketActivationInConfig(bool disable);
-
- BucketDatabase::Entry getBucket(const document::Bucket& bucket) const;
- // Gets bucket entry from default space only
- BucketDatabase::Entry getBucket(const document::BucketId& bId) const;
-
- std::vector<document::BucketSpace> getBucketSpaces() const;
-
- DistributorMessageSenderStub& sender() noexcept { return _sender; }
- const DistributorMessageSenderStub& sender() const noexcept { return _sender; }
-
- void setSystemState(const lib::ClusterState& systemState);
-
- // Invokes full cluster state transition pipeline rather than directly applying
- // the state and just pretending everything has been completed.
- void receive_set_system_state_command(const vespalib::string& state_str);
-
- void handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg);
-
- // Must be called prior to createLinks() to have any effect
- void set_num_distributor_stripes(uint32_t n_stripes) noexcept {
- _num_distributor_stripes = n_stripes;
- }
-protected:
- vdstestlib::DirConfig _config;
- std::unique_ptr<TestDistributorApp> _node;
- std::unique_ptr<framework::TickingThreadPool> _threadPool;
- std::unique_ptr<DistributorStripePool> _stripe_pool;
- std::unique_ptr<TopLevelDistributor> _distributor;
- std::unique_ptr<storage::DistributorComponent> _component;
- DistributorMessageSenderStub _sender;
- DistributorMessageSenderStub _senderDown;
- HostInfo _hostInfo;
-
- struct MessageSenderImpl : public ChainedMessageSender {
- DistributorMessageSenderStub& _sender;
- DistributorMessageSenderStub& _senderDown;
- MessageSenderImpl(DistributorMessageSenderStub& up, DistributorMessageSenderStub& down)
- : _sender(up), _senderDown(down) {}
-
- void sendUp(const std::shared_ptr<api::StorageMessage>& msg) override {
- _sender.send(msg);
- }
- void sendDown(const std::shared_ptr<api::StorageMessage>& msg) override {
- _senderDown.send(msg);
- }
- };
- MessageSenderImpl _messageSender;
- uint32_t _num_distributor_stripes;
-
- void enableDistributorClusterState(vespalib::stringref state);
- void enable_distributor_cluster_state(const lib::ClusterStateBundle& state);
-};
-
-}
-
-}
diff --git a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
deleted file mode 100644
index 5fa3ae5840b..00000000000
--- a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
+++ /dev/null
@@ -1,2893 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/storageapi/message/persistence.h>
-#include <vespa/storage/distributor/top_level_bucket_db_updater.h>
-#include <vespa/storage/distributor/bucket_space_distribution_context.h>
-#include <vespa/storage/distributor/distributormetricsset.h>
-#include <vespa/storage/distributor/pending_bucket_space_db_transition.h>
-#include <vespa/storage/distributor/outdated_nodes_map.h>
-#include <vespa/storage/storageutil/distributorstatecache.h>
-#include <tests/distributor/distributortestutil.h>
-#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/document/test/make_bucket_space.h>
-#include <vespa/document/bucket/fixed_bucket_spaces.h>
-#include <vespa/storage/distributor/simpleclusterinformation.h>
-#include <vespa/storage/distributor/top_level_distributor.h>
-#include <vespa/storage/distributor/distributor_stripe.h>
-#include <vespa/storage/distributor/distributor_bucket_space.h>
-#include <vespa/vespalib/gtest/gtest.h>
-#include <vespa/vespalib/text/stringtokenizer.h>
-#include <vespa/vespalib/util/benchmark_timer.h>
-#include <sstream>
-#include <iomanip>
-
-using namespace storage::api;
-using namespace storage::lib;
-using document::test::makeDocumentBucket;
-using document::test::makeBucketSpace;
-using document::BucketSpace;
-using document::FixedBucketSpaces;
-using document::BucketId;
-using document::Bucket;
-
-using namespace ::testing;
-
-namespace storage::distributor {
-
-namespace {
-
-std::string
-getStringList(std::string s, uint32_t count)
-{
- std::ostringstream ost;
- for (uint32_t i = 0; i < count; ++i) {
- if (i > 0) {
- ost << ",";
- }
- ost << s;
- }
- return ost.str();
-}
-
-std::string
-getRequestBucketInfoStrings(uint32_t count)
-{
- return getStringList("Request bucket info", count);
-}
-
-}
-
-// TODO STRIPE: Remove this test when legacy mode is gone.
-class LegacyBucketDBUpdaterTest : public Test,
- public DistributorTestUtil
-{
-public:
- LegacyBucketDBUpdaterTest();
- ~LegacyBucketDBUpdaterTest() override;
-
- auto &defaultDistributorBucketSpace() { return getBucketSpaceRepo().get(makeBucketSpace()); }
-
- bool bucketExistsThatHasNode(int bucketCount, uint16_t node) const;
-
- ClusterInformation::CSP createClusterInfo(const std::string& clusterStateString) {
- lib::ClusterState baselineClusterState(clusterStateString);
- lib::ClusterStateBundle clusterStateBundle(baselineClusterState);
- ClusterInformation::CSP clusterInfo(
- new SimpleClusterInformation(
- getBucketDBUpdater().node_context().node_index(),
- clusterStateBundle,
- "ui"));
- for (auto* repo : {&mutable_repo(), &read_only_repo()}) {
- for (auto& space : *repo) {
- space.second->setClusterState(clusterStateBundle.getDerivedClusterState(space.first));
- }
- }
- return clusterInfo;
- }
-
- DistributorBucketSpaceRepo& mutable_repo() noexcept { return getBucketSpaceRepo(); }
- // Note: not calling this "immutable_repo" since it may actually be modified by the pending
- // cluster state component (just not by operations), so it would not have the expected semantics.
- DistributorBucketSpaceRepo& read_only_repo() noexcept { return getReadOnlyBucketSpaceRepo(); }
-
- BucketDatabase& mutable_default_db() noexcept {
- return mutable_repo().get(FixedBucketSpaces::default_space()).getBucketDatabase();
- }
- BucketDatabase& mutable_global_db() noexcept {
- return mutable_repo().get(FixedBucketSpaces::global_space()).getBucketDatabase();
- }
- BucketDatabase& read_only_default_db() noexcept {
- return read_only_repo().get(FixedBucketSpaces::default_space()).getBucketDatabase();
- }
- BucketDatabase& read_only_global_db() noexcept {
- return read_only_repo().get(FixedBucketSpaces::global_space()).getBucketDatabase();
- }
-
- static std::string getNodeList(std::vector<uint16_t> nodes, size_t count);
-
- std::string getNodeList(std::vector<uint16_t> nodes);
-
- std::vector<uint16_t>
- expandNodeVec(const std::vector<uint16_t> &nodes);
-
- std::vector<document::BucketSpace> _bucketSpaces;
-
- size_t messageCount(size_t messagesPerBucketSpace) const {
- return messagesPerBucketSpace * _bucketSpaces.size();
- }
-
- void trigger_completed_but_not_yet_activated_transition(
- vespalib::stringref initial_state, uint32_t initial_buckets, uint32_t initial_expected_msgs,
- vespalib::stringref pending_state, uint32_t pending_buckets, uint32_t pending_expected_msgs);
-
-public:
- using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
- void SetUp() override {
- createLinks();
- _bucketSpaces = getBucketSpaces();
- // Disable deferred activation by default (at least for now) to avoid breaking the entire world.
- getBucketDBUpdater().set_stale_reads_enabled(false);
- };
-
- void TearDown() override {
- close();
- }
-
- std::shared_ptr<RequestBucketInfoReply> getFakeBucketReply(
- const lib::ClusterState& state,
- const RequestBucketInfoCommand& cmd,
- int storageIndex,
- uint32_t bucketCount,
- uint32_t invalidBucketCount = 0)
- {
- auto sreply = std::make_shared<RequestBucketInfoReply>(cmd);
- sreply->setAddress(storageAddress(storageIndex));
-
- api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
-
- for (uint32_t i=0; i<bucketCount + invalidBucketCount; i++) {
- if (!getDistributorBucketSpace().owns_bucket_in_state(state, document::BucketId(16, i))) {
- continue;
- }
-
- std::vector<uint16_t> nodes;
- defaultDistributorBucketSpace().getDistribution().getIdealNodes(
- lib::NodeType::STORAGE,
- state,
- document::BucketId(16, i),
- nodes);
-
- for (uint32_t j=0; j<nodes.size(); j++) {
- if (nodes[j] == storageIndex) {
- if (i >= bucketCount) {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo()));
- } else {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(10,1,1)));
- }
- }
- }
- }
-
- return sreply;
- }
-
- void fakeBucketReply(const lib::ClusterState &state,
- const api::StorageCommand &cmd,
- uint32_t bucketCount,
- uint32_t invalidBucketCount = 0)
- {
- ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
- const api::StorageMessageAddress &address(*cmd.getAddress());
- getBucketDBUpdater().onRequestBucketInfoReply(
- getFakeBucketReply(state,
- dynamic_cast<const RequestBucketInfoCommand &>(cmd),
- address.getIndex(),
- bucketCount,
- invalidBucketCount));
- }
-
- void sendFakeReplyForSingleBucketRequest(
- const api::RequestBucketInfoCommand& rbi)
- {
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- const document::BucketId& bucket(rbi.getBuckets()[0]);
-
- std::shared_ptr<api::RequestBucketInfoReply> reply(
- new api::RequestBucketInfoReply(rbi));
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(bucket,
- api::BucketInfo(20, 10, 12, 50, 60, true, true)));
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
- }
-
- std::string verifyBucket(document::BucketId id, const lib::ClusterState& state) {
- BucketDatabase::Entry entry = getBucketDatabase().get(id);
- if (!entry.valid()) {
- return vespalib::make_string("%s doesn't exist in DB",
- id.toString().c_str());
- }
-
- std::vector<uint16_t> nodes;
- defaultDistributorBucketSpace().getDistribution().getIdealNodes(
- lib::NodeType::STORAGE,
- state,
- document::BucketId(id),
- nodes);
-
- if (nodes.size() != entry->getNodeCount()) {
- return vespalib::make_string("Bucket Id %s has %d nodes in "
- "ideal state, but has only %d in DB",
- id.toString().c_str(),
- (int)nodes.size(),
- (int)entry->getNodeCount());
- }
-
- for (uint32_t i = 0; i<nodes.size(); i++) {
- bool found = false;
-
- for (uint32_t j = 0; j<entry->getNodeCount(); j++) {
- if (nodes[i] == entry->getNodeRef(j).getNode()) {
- found = true;
- }
- }
-
- if (!found) {
- return vespalib::make_string(
- "Bucket Id %s has no copy from node %d",
- id.toString().c_str(),
- nodes[i]);
- }
- }
-
- return "";
- }
-
-
- void verifyInvalid(document::BucketId id, int storageNode) {
- BucketDatabase::Entry entry = getBucketDatabase().get(id);
-
- ASSERT_TRUE(entry.valid());
-
- bool found = false;
- for (uint32_t j = 0; j<entry->getNodeCount(); j++) {
- if (entry->getNodeRef(j).getNode() == storageNode) {
- ASSERT_FALSE(entry->getNodeRef(j).valid());
- found = true;
- }
- }
-
- ASSERT_TRUE(found);
- }
-
- struct OrderByIncreasingNodeIndex {
- template <typename T>
- bool operator()(const T& lhs, const T& rhs) {
- return (lhs->getAddress()->getIndex()
- < rhs->getAddress()->getIndex());
- }
- };
-
- void sortSentMessagesByIndex(DistributorMessageSenderStub& sender,
- size_t sortFromOffset = 0)
- {
- std::sort(sender.commands().begin() + sortFromOffset,
- sender.commands().end(),
- OrderByIncreasingNodeIndex());
- }
-
- void setSystemState(const lib::ClusterState& state) {
- const size_t sizeBeforeState = _sender.commands().size();
- getBucketDBUpdater().onSetSystemState(
- std::make_shared<api::SetSystemStateCommand>(state));
- // A lot of test logic has the assumption that all messages sent as a
- // result of cluster state changes will be in increasing index order
- // (for simplicity, not because this is required for correctness).
- // Only sort the messages that arrived as a result of the state, don't
- // jumble the sorting with any existing messages.
- sortSentMessagesByIndex(_sender, sizeBeforeState);
- }
-
- void set_cluster_state_bundle(const lib::ClusterStateBundle& state) {
- const size_t sizeBeforeState = _sender.commands().size();
- getBucketDBUpdater().onSetSystemState(
- std::make_shared<api::SetSystemStateCommand>(state));
- sortSentMessagesByIndex(_sender, sizeBeforeState);
- }
-
- bool activate_cluster_state_version(uint32_t version) {
- return getBucketDBUpdater().onActivateClusterStateVersion(
- std::make_shared<api::ActivateClusterStateVersionCommand>(version));
- }
-
- void assert_has_activate_cluster_state_reply_with_actual_version(uint32_t version) {
- ASSERT_EQ(size_t(1), _sender.replies().size());
- auto* response = dynamic_cast<api::ActivateClusterStateVersionReply*>(_sender.replies().back().get());
- ASSERT_TRUE(response != nullptr);
- ASSERT_EQ(version, response->actualVersion());
- _sender.clear();
- }
-
- void completeBucketInfoGathering(const lib::ClusterState& state,
- size_t expectedMsgs,
- uint32_t bucketCount = 1,
- uint32_t invalidBucketCount = 0)
- {
- ASSERT_EQ(expectedMsgs, _sender.commands().size());
-
- for (uint32_t i = 0; i < _sender.commands().size(); i++) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.command(i),
- bucketCount, invalidBucketCount));
- }
- }
-
- void setAndEnableClusterState(const lib::ClusterState& state,
- uint32_t expectedMsgs,
- uint32_t nBuckets)
- {
- _sender.clear();
- setSystemState(state);
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(state, expectedMsgs, nBuckets));
- }
-
- void completeStateTransitionInSeconds(const std::string& stateStr,
- uint32_t seconds,
- uint32_t expectedMsgs)
- {
- _sender.clear();
- lib::ClusterState state(stateStr);
- setSystemState(state);
- getClock().addSecondsToTime(seconds);
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(state, expectedMsgs));
- }
-
- uint64_t lastTransitionTimeInMillis() {
- return uint64_t(getDistributor().getMetrics().stateTransitionTime.getLast());
- }
-
- void setStorageNodes(uint32_t numStorageNodes) {
- _sender.clear();
-
- lib::ClusterState newState(
- vespalib::make_string("distributor:1 storage:%d", numStorageNodes));
-
- setSystemState(newState);
-
- for (uint32_t i=0; i< messageCount(numStorageNodes); i++) {
- ASSERT_EQ(_sender.command(i)->getType(), MessageType::REQUESTBUCKETINFO);
-
- const api::StorageMessageAddress *address = _sender.command(i)->getAddress();
- ASSERT_EQ((uint32_t)(i / _bucketSpaces.size()), (uint32_t)address->getIndex());
- }
- }
-
- void initializeNodesAndBuckets(uint32_t numStorageNodes,
- uint32_t numBuckets)
- {
- ASSERT_NO_FATAL_FAILURE(setStorageNodes(numStorageNodes));
-
- vespalib::string state(vespalib::make_string(
- "distributor:1 storage:%d", numStorageNodes));
- lib::ClusterState newState(state);
-
- for (uint32_t i=0; i< messageCount(numStorageNodes); i++) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(newState, *_sender.command(i), numBuckets));
- }
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, state));
- }
-
- bool bucketHasNode(document::BucketId id, uint16_t node) const {
- BucketDatabase::Entry entry = getBucket(id);
- assert(entry.valid());
-
- for (uint32_t j=0; j<entry->getNodeCount(); j++) {
- if (entry->getNodeRef(j).getNode() == node) {
- return true;
- }
- }
-
- return false;
- }
-
- api::StorageMessageAddress storageAddress(uint16_t node) {
- static vespalib::string _storage("storage");
- return api::StorageMessageAddress(&_storage, lib::NodeType::STORAGE, node);
- }
-
- std::string getSentNodes(const std::string& oldClusterState,
- const std::string& newClusterState);
-
- std::string getSentNodesDistributionChanged(
- const std::string& oldClusterState);
-
- std::vector<uint16_t> getSentNodesWithPreemption(
- const std::string& oldClusterState,
- uint32_t expectedOldStateMessages,
- const std::string& preemptedClusterState,
- const std::string& newClusterState);
-
- std::vector<uint16_t> getSendSet() const;
-
- std::string mergeBucketLists(
- const lib::ClusterState& oldState,
- const std::string& existingData,
- const lib::ClusterState& newState,
- const std::string& newData,
- bool includeBucketInfo = false);
-
- std::string mergeBucketLists(
- const std::string& existingData,
- const std::string& newData,
- bool includeBucketInfo = false);
-
- void assertCorrectBuckets(int numBuckets, const std::string& stateStr) {
- lib::ClusterState state(stateStr);
- for (int i=0; i<numBuckets; i++) {
- ASSERT_EQ(getIdealStr(document::BucketId(16, i), state),
- getNodes(document::BucketId(16, i)));
- }
- }
-
- void setDistribution(const std::string& distConfig) {
- triggerDistributionChange(
- std::make_shared<lib::Distribution>(distConfig));
- }
-
- std::string getDistConfig6Nodes2Groups() const {
- return ("redundancy 2\n"
- "group[3]\n"
- "group[0].name \"invalid\"\n"
- "group[0].index \"invalid\"\n"
- "group[0].partitions 1|*\n"
- "group[0].nodes[0]\n"
- "group[1].name rack0\n"
- "group[1].index 0\n"
- "group[1].nodes[3]\n"
- "group[1].nodes[0].index 0\n"
- "group[1].nodes[1].index 1\n"
- "group[1].nodes[2].index 2\n"
- "group[2].name rack1\n"
- "group[2].index 1\n"
- "group[2].nodes[3]\n"
- "group[2].nodes[0].index 3\n"
- "group[2].nodes[1].index 4\n"
- "group[2].nodes[2].index 5\n");
- }
-
- std::string getDistConfig6Nodes4Groups() const {
- return ("redundancy 2\n"
- "group[4]\n"
- "group[0].name \"invalid\"\n"
- "group[0].index \"invalid\"\n"
- "group[0].partitions 1|*\n"
- "group[0].nodes[0]\n"
- "group[1].name rack0\n"
- "group[1].index 0\n"
- "group[1].nodes[2]\n"
- "group[1].nodes[0].index 0\n"
- "group[1].nodes[1].index 1\n"
- "group[2].name rack1\n"
- "group[2].index 1\n"
- "group[2].nodes[2]\n"
- "group[2].nodes[0].index 2\n"
- "group[2].nodes[1].index 3\n"
- "group[3].name rack2\n"
- "group[3].index 2\n"
- "group[3].nodes[2]\n"
- "group[3].nodes[0].index 4\n"
- "group[3].nodes[1].index 5\n");
- }
-
- std::string getDistConfig3Nodes1Group() const {
- return ("redundancy 2\n"
- "group[2]\n"
- "group[0].name \"invalid\"\n"
- "group[0].index \"invalid\"\n"
- "group[0].partitions 1|*\n"
- "group[0].nodes[0]\n"
- "group[1].name rack0\n"
- "group[1].index 0\n"
- "group[1].nodes[3]\n"
- "group[1].nodes[0].index 0\n"
- "group[1].nodes[1].index 1\n"
- "group[1].nodes[2].index 2\n");
- }
-
- struct PendingClusterStateFixture {
- DistributorMessageSenderStub sender;
- framework::defaultimplementation::FakeClock clock;
- std::unique_ptr<PendingClusterState> state;
-
- PendingClusterStateFixture(
- LegacyBucketDBUpdaterTest& owner,
- const std::string& oldClusterState,
- const std::string& newClusterState)
- {
- std::shared_ptr<api::SetSystemStateCommand> cmd(
- new api::SetSystemStateCommand(
- lib::ClusterState(newClusterState)));
-
- ClusterInformation::CSP clusterInfo(
- owner.createClusterInfo(oldClusterState));
-
- OutdatedNodesMap outdatedNodesMap;
- state = PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender,
- owner.getBucketSpaceRepo(),
- cmd, outdatedNodesMap, api::Timestamp(1));
- }
-
- PendingClusterStateFixture(
- LegacyBucketDBUpdaterTest& owner,
- const std::string& oldClusterState)
- {
- ClusterInformation::CSP clusterInfo(
- owner.createClusterInfo(oldClusterState));
-
- state = PendingClusterState::createForDistributionChange(
- clock, clusterInfo, sender, owner.getBucketSpaceRepo(), api::Timestamp(1));
- }
- };
-
- std::unique_ptr<PendingClusterStateFixture> createPendingStateFixtureForStateChange(
- const std::string& oldClusterState,
- const std::string& newClusterState)
- {
- return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState, newClusterState);
- }
-
- std::unique_ptr<PendingClusterStateFixture> createPendingStateFixtureForDistributionChange(
- const std::string& oldClusterState)
- {
- return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState);
- }
-
- uint32_t populate_bucket_db_via_request_bucket_info_for_benchmarking();
-
- void complete_recovery_mode() {
- _distributor->scanAllBuckets();
- }
-};
-
-LegacyBucketDBUpdaterTest::LegacyBucketDBUpdaterTest()
- : DistributorTestUtil(),
- _bucketSpaces()
-{
-}
-
-LegacyBucketDBUpdaterTest::~LegacyBucketDBUpdaterTest() = default;
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, normal_usage) {
- setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
-
- ASSERT_EQ(messageCount(3), _sender.commands().size());
-
- // Ensure distribution hash is set correctly
- ASSERT_EQ(
- defaultDistributorBucketSpace().getDistribution()
- .getNodeGraph().getDistributionConfigHash(),
- dynamic_cast<const RequestBucketInfoCommand&>(
- *_sender.command(0)).getDistributionHash());
-
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
- *_sender.command(0), 10));
-
- _sender.clear();
-
- // Optimization for not refetching unneeded data after cluster state
- // change is only implemented after completion of previous cluster state
- setSystemState(lib::ClusterState("distributor:2 .0.s:i storage:3"));
-
- ASSERT_EQ(messageCount(3), _sender.commands().size());
- // Expect reply of first set SystemState request.
- ASSERT_EQ(size_t(1), _sender.replies().size());
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
- lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
- messageCount(3), 10));
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, distributor_change) {
- int numBuckets = 100;
-
- // First sends request
- setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
- messageCount(3), numBuckets));
- _sender.clear();
-
- // No change from initializing to up (when done with last job)
- setSystemState(lib::ClusterState("distributor:2 storage:3"));
- ASSERT_EQ(size_t(0), _sender.commands().size());
- _sender.clear();
-
- // Adding node. No new read requests, but buckets thrown
- setSystemState(lib::ClusterState("distributor:3 storage:3"));
- ASSERT_EQ(size_t(0), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:3 storage:3"));
- _sender.clear();
-
- // Removing distributor. Need to refetch new data from all nodes.
- setSystemState(lib::ClusterState("distributor:2 storage:3"));
- ASSERT_EQ(messageCount(3), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:2 storage:3"),
- messageCount(3), numBuckets));
- _sender.clear();
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) {
- std::string distConfig(getDistConfig6Nodes2Groups());
- setDistribution(distConfig);
- int numBuckets = 100;
-
- setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(messageCount(6), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:6 storage:6"),
- messageCount(6), numBuckets));
- _sender.clear();
-
- // Distributor going down in other group, no change
- setSystemState(lib::ClusterState("distributor:6 .5.s:d storage:6"));
- ASSERT_EQ(size_t(0), _sender.commands().size());
- _sender.clear();
-
- setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(size_t(0), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:6 storage:6"));
- _sender.clear();
-
- // Unchanged grouping cause no change.
- setDistribution(distConfig);
- ASSERT_EQ(size_t(0), _sender.commands().size());
-
- // Changed grouping cause change
- setDistribution(getDistConfig6Nodes4Groups());
-
- ASSERT_EQ(messageCount(6), _sender.commands().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) {
- setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i"));
-
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- // Not yet passing on system state.
- ASSERT_EQ(size_t(0), _senderDown.commands().size());
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- _bucketSpaces.size(), 10, 10));
-
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:1 storage:1"));
-
- for (int i=10; i<20; i++) {
- ASSERT_NO_FATAL_FAILURE(verifyInvalid(document::BucketId(16, i), 0));
- }
-
- // Pass on cluster state and recheck buckets now.
- ASSERT_EQ(size_t(1), _senderDown.commands().size());
-
- _sender.clear();
- _senderDown.clear();
-
- setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
-
- // Send a new request bucket info up.
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- _bucketSpaces.size(), 20));
-
- // Pass on cluster state and recheck buckets now.
- ASSERT_EQ(size_t(1), _senderDown.commands().size());
-
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) {
- setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
-
- // 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- {
- for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
- std::shared_ptr<api::RequestBucketInfoReply> reply =
- getFakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- *((RequestBucketInfoCommand*)_sender.command(i).get()),
- 0,
- 10);
- reply->setResult(api::ReturnCode::NOT_CONNECTED);
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
- }
-
- // Trigger that delayed message is sent
- getClock().addSecondsToTime(10);
- getBucketDBUpdater().resendDelayedMessages();
- }
-
- // Should be resent.
- ASSERT_EQ(getRequestBucketInfoStrings(messageCount(2)), _sender.getCommands());
-
- ASSERT_EQ(size_t(0), _senderDown.commands().size());
-
- for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
- *_sender.command(_bucketSpaces.size() + i), 10));
- }
-
- for (int i=0; i<10; i++) {
- EXPECT_EQ(std::string(""),
- verifyBucket(document::BucketId(16, i),
- lib::ClusterState("distributor:1 storage:1")));
- }
-
- // Set system state should now be passed on
- EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, down_while_init) {
- ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
-
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.command(0), 5));
-
- setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
-
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.command(2), 5));
-
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
- *_sender.command(1), 5));
-}
-
-bool
-LegacyBucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
-{
- for (int i=1; i<bucketCount; i++) {
- if (bucketHasNode(document::BucketId(16, i), node)) {
- return true;
- }
- }
-
- return false;
-}
-
-std::string
-LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
-{
- std::ostringstream ost;
- bool first = true;
- for (const auto &node : nodes) {
- for (uint32_t i = 0; i < count; ++i) {
- if (!first) {
- ost << ",";
- }
- ost << node;
- first = false;
- }
- }
- return ost.str();
-}
-
-std::string
-LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes)
-{
- return getNodeList(std::move(nodes), _bucketSpaces.size());
-}
-
-std::vector<uint16_t>
-LegacyBucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
-{
- std::vector<uint16_t> res;
- size_t count = _bucketSpaces.size();
- for (const auto &node : nodes) {
- for (uint32_t i = 0; i < count; ++i) {
- res.push_back(node);
- }
- }
- return res;
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, node_down) {
- ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
- enableDistributorClusterState("distributor:1 storage:3");
-
- for (int i=1; i<100; i++) {
- addIdealNodes(document::BucketId(16, i));
- }
-
- EXPECT_TRUE(bucketExistsThatHasNode(100, 1));
-
- setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
-
- EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
- ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
- enableDistributorClusterState("distributor:1 storage:3");
-
- for (int i=1; i<100; i++) {
- addIdealNodes(document::BucketId(16, i));
- }
-
- EXPECT_TRUE(bucketExistsThatHasNode(100, 1));
-
- setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:m"));
-
- EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) {
- ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
-
- lib::ClusterState systemState("distributor:1 storage:3");
- document::BucketId bid(16, 1);
-
- addNodesToBucketDB(bid, "0=3,1=2,2=3");
-
- setSystemState(lib::ClusterState("distributor:1 storage:3 .1.s:d"));
-
- EXPECT_EQ(
- std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false)"),
- dumpBucket(bid));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) {
- lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1");
- setSystemState(systemState);
-
- ASSERT_EQ(messageCount(2), _sender.commands().size());
- ASSERT_EQ(size_t(0), _senderDown.commands().size());
-
- getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
-
- for (uint32_t i = 0; i < messageCount(2); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(systemState, *_sender.command(i), 100));
- }
-
- // Now we can pass on system state.
- ASSERT_EQ(size_t(1), _senderDown.commands().size());
- EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, bit_change) {
- std::vector<document::BucketId> bucketlist;
-
- {
- setSystemState(lib::ClusterState("bits:14 storage:1 distributor:2"));
-
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
- const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi));
- auto sreply = std::make_shared<RequestBucketInfoReply>(req);
- sreply->setAddress(storageAddress(0));
- auto& vec = sreply->getBucketInfo();
- if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
- int cnt=0;
- for (int i=0; cnt < 2; i++) {
- lib::Distribution distribution = defaultDistributorBucketSpace().getDistribution();
- std::vector<uint16_t> distributors;
- if (distribution.getIdealDistributorNode(
- lib::ClusterState("bits:14 storage:1 distributor:2"),
- document::BucketId(16, i))
- == 0)
- {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(10,1,1)));
-
- bucketlist.push_back(document::BucketId(16, i));
- cnt++;
- }
- }
- }
-
- getBucketDBUpdater().onRequestBucketInfoReply(sreply);
- }
- }
-
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(bucketlist[0]));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(bucketlist[1]));
-
- {
- _sender.clear();
- setSystemState(lib::ClusterState("bits:16 storage:1 distributor:2"));
-
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
- for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
-
- ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
- const auto &req = dynamic_cast<const RequestBucketInfoCommand &>(*_sender.command(bsi));
- auto sreply = std::make_shared<RequestBucketInfoReply>(req);
- sreply->setAddress(storageAddress(0));
- sreply->setResult(api::ReturnCode::OK);
- if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
- api::RequestBucketInfoReply::EntryVector &vec = sreply->getBucketInfo();
-
- for (uint32_t i = 0; i < 3; ++i) {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(10,1,1)));
- }
-
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, 4),
- api::BucketInfo(10,1,1)));
- }
-
- getBucketDBUpdater().onRequestBucketInfoReply(sreply);
- }
- }
-
- EXPECT_EQ(std::string("BucketId(0x4000000000000000) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 0)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 2)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000004) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 4)));
-
- {
- _sender.clear();
- setSystemState(lib::ClusterState("storage:1 distributor:2 .1.s:i"));
- }
-
- {
- _sender.clear();
- setSystemState(lib::ClusterState("storage:1 distributor:2"));
- }
-};
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) {
- ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
-
- _sender.clear();
-
- getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
-
- ASSERT_EQ(size_t(1), _sender.commands().size());
-
- uint16_t index = 0;
- {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
- auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- const api::StorageMessageAddress *address = _sender.command(0)->getAddress();
- index = address->getIndex();
- reply->setResult(api::ReturnCode::NOT_CONNECTED);
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
- // Trigger that delayed message is sent
- getClock().addSecondsToTime(10);
- getBucketDBUpdater().resendDelayedMessages();
- }
-
- ASSERT_EQ(size_t(2), _sender.commands().size());
-
- setSystemState(
- lib::ClusterState(vespalib::make_string("distributor:1 storage:3 .%d.s:d", index)));
-
- // Recheck bucket.
- {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(1));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
- auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->setResult(api::ReturnCode::NOT_CONNECTED);
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
- }
-
- // Should not retry since node is down.
- EXPECT_EQ(size_t(2), _sender.commands().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, recheck_node) {
- ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
-
- _sender.clear();
-
- getBucketDBUpdater().recheckBucketInfo(1, makeDocumentBucket(document::BucketId(16, 3)));
-
- ASSERT_EQ(size_t(1), _sender.commands().size());
-
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 3), rbi.getBuckets()[0]);
-
- auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(document::BucketId(16, 3),
- api::BucketInfo(20, 10, 12, 50, 60, true, true)));
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
-
- lib::ClusterState state("distributor:1 storage:3");
- for (uint32_t i = 0; i < 3; i++) {
- EXPECT_EQ(getIdealStr(document::BucketId(16, i), state),
- getNodes(document::BucketId(16, i)));
- }
-
- for (uint32_t i = 4; i < 5; i++) {
- EXPECT_EQ(getIdealStr(document::BucketId(16, i), state),
- getNodes(document::BucketId(16, i)));
- }
-
- BucketDatabase::Entry entry = getBucketDatabase().get(document::BucketId(16, 3));
- ASSERT_TRUE(entry.valid());
-
- const BucketCopy* copy = entry->getNode(1);
- ASSERT_TRUE(copy != nullptr);
- EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) {
- enableDistributorClusterState("distributor:1 storage:1");
-
- addNodesToBucketDB(document::BucketId(16, 1), "0=1234");
- _sender.replies().clear();
-
- {
- api::BucketInfo info(1, 2, 3, 4, 5, true, true);
- auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
- makeDocumentBucket(document::BucketId(16, 1)), info));
- cmd->setSourceIndex(0);
- getBucketDBUpdater().onNotifyBucketChange(cmd);
- }
-
- {
- api::BucketInfo info(10, 11, 12, 13, 14, false, false);
- auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
- makeDocumentBucket(document::BucketId(16, 2)), info));
- cmd->setSourceIndex(0);
- getBucketDBUpdater().onNotifyBucketChange(cmd);
- }
-
- // Must receive reply
- ASSERT_EQ(size_t(2), _sender.replies().size());
-
- for (int i = 0; i < 2; ++i) {
- ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY,
- _sender.reply(i)->getType());
- }
-
- // No database update until request bucket info replies have been received.
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x4d2,docs=1234/1234,bytes=1234/1234,"
- "trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
- EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(document::BucketId(16, 2)));
-
- ASSERT_EQ(size_t(2), _sender.commands().size());
-
- std::vector<api::BucketInfo> infos;
- infos.push_back(api::BucketInfo(4567, 200, 2000, 400, 4000, true, true));
- infos.push_back(api::BucketInfo(8999, 300, 3000, 500, 5000, false, false));
-
- for (int i = 0; i < 2; ++i) {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(i));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, i + 1), rbi.getBuckets()[0]);
-
- auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(document::BucketId(16, i + 1),
- infos[i]));
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
- }
-
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x11d7,docs=200/400,bytes=2000/4000,trusted=true,active=true,ready=true)"),
- dumpBucket(document::BucketId(16, 1)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0x2327,docs=300/500,bytes=3000/5000,trusted=true,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 2)));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) {
- enableDistributorClusterState("distributor:1 storage:2");
-
- addNodesToBucketDB(document::BucketId(16, 1), "1=1234");
-
- _sender.replies().clear();
-
- {
- api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
- auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
- makeDocumentBucket(document::BucketId(16, 1)), info));
- cmd->setSourceIndex(0);
- getBucketDBUpdater().onNotifyBucketChange(cmd);
- }
- // Enable here to avoid having request bucket info be silently swallowed
- // (send_request_bucket_info drops message if node is down).
- enableDistributorClusterState("distributor:1 storage:2 .0.s:d");
-
- ASSERT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
-
- ASSERT_EQ(size_t(1), _sender.replies().size());
- ASSERT_EQ(MessageType::NOTIFYBUCKETCHANGE_REPLY, _sender.reply(0)->getType());
-
- // Currently, this pending operation will be auto-flushed when the cluster state
- // changes so the behavior is still correct. Keep this test around to prevent
- // regressions here.
- ASSERT_EQ(size_t(1), _sender.commands().size());
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 1), rbi.getBuckets()[0]);
-
- auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(
- document::BucketId(16, 1),
- api::BucketInfo(8999, 300, 3000, 500, 5000, false, false)));
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
-
- // No change
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=1,crc=0x4d2,docs=1234/1234,bytes=1234/1234,trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1)));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-/**
- * Test that NotifyBucketChange received while there's a pending cluster state
- * waits until the cluster state has been enabled as current before it sends off
- * the single bucket info requests. This is to prevent a race condition where
- * the replies to bucket info requests for buckets that would be owned by the
- * distributor in the pending state but not by the current state would be
- * discarded when attempted inserted into the bucket database.
- */
-TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
- setSystemState(lib::ClusterState("distributor:1 storage:1"));
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- {
- api::BucketInfo info(8999, 300, 3000, 500, 5000, false, false);
- auto cmd(std::make_shared<api::NotifyBucketChangeCommand>(
- makeDocumentBucket(document::BucketId(16, 1)), info));
- cmd->setSourceIndex(0);
- getBucketDBUpdater().onNotifyBucketChange(cmd);
- }
-
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(lib::ClusterState("distributor:1 storage:1"),
- _bucketSpaces.size(), 10));
-
- ASSERT_EQ(_bucketSpaces.size() + 1, _sender.commands().size());
-
- {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(_bucketSpaces.size()));
- ASSERT_EQ(size_t(1), rbi.getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 1), rbi.getBuckets()[0]);
- }
- _sender.clear();
-
- // Queue must be cleared once pending state is enabled.
- {
- lib::ClusterState state("distributor:1 storage:2");
- uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, expectedMsgs, dummyBucketsToReturn));
- }
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
- {
- auto& rbi = dynamic_cast<RequestBucketInfoCommand&>(*_sender.command(0));
- EXPECT_EQ(size_t(0), rbi.getBuckets().size());
- }
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, merge_reply) {
- enableDistributorClusterState("distributor:1 storage:3");
-
- addNodesToBucketDB(document::BucketId(16, 1234),
- "0=1234,1=1234,2=1234");
-
- std::vector<api::MergeBucketCommand::Node> nodes;
- nodes.push_back(api::MergeBucketCommand::Node(0));
- nodes.push_back(api::MergeBucketCommand::Node(1));
- nodes.push_back(api::MergeBucketCommand::Node(2));
-
- api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 1234)), nodes, 0);
-
- auto reply = std::make_shared<api::MergeBucketReply>(cmd);
-
- _sender.clear();
- getBucketDBUpdater().onMergeBucketReply(reply);
-
- ASSERT_EQ(size_t(3), _sender.commands().size());
-
- for (uint32_t i = 0; i < 3; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
-
- ASSERT_TRUE(req.get() != nullptr);
- ASSERT_EQ(size_t(1), req->getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 1234), req->getBuckets()[0]);
-
- auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
- reqreply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(document::BucketId(16, 1234),
- api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
-
- getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
- }
-
- EXPECT_EQ(std::string("BucketId(0x40000000000004d2) : "
- "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x1e,docs=300/300,bytes=3000/3000,trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1234)));
-};
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) {
- enableDistributorClusterState("distributor:1 storage:3");
- std::vector<api::MergeBucketCommand::Node> nodes;
-
- addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
-
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.push_back(api::MergeBucketCommand::Node(i));
- }
-
- api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 1234)), nodes, 0);
-
- auto reply = std::make_shared<api::MergeBucketReply>(cmd);
-
- setSystemState(lib::ClusterState("distributor:1 storage:2"));
-
- _sender.clear();
- getBucketDBUpdater().onMergeBucketReply(reply);
-
- ASSERT_EQ(size_t(2), _sender.commands().size());
-
- for (uint32_t i = 0; i < 2; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
-
- ASSERT_TRUE(req.get() != nullptr);
- ASSERT_EQ(size_t(1), req->getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 1234), req->getBuckets()[0]);
-
- auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
- reqreply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(
- document::BucketId(16, 1234),
- api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
- getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
- }
-
- EXPECT_EQ(std::string("BucketId(0x40000000000004d2) : "
- "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1234)));
-};
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
- enableDistributorClusterState("distributor:1 storage:3");
- std::vector<api::MergeBucketCommand::Node> nodes;
-
- addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
-
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.push_back(api::MergeBucketCommand::Node(i));
- }
-
- api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 1234)), nodes, 0);
-
- auto reply(std::make_shared<api::MergeBucketReply>(cmd));
-
- _sender.clear();
- getBucketDBUpdater().onMergeBucketReply(reply);
-
- ASSERT_EQ(size_t(3), _sender.commands().size());
-
- setSystemState(lib::ClusterState("distributor:1 storage:2"));
-
- for (uint32_t i = 0; i < 3; i++) {
- auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
-
- ASSERT_TRUE(req.get() != nullptr);
- ASSERT_EQ(size_t(1), req->getBuckets().size());
- EXPECT_EQ(document::BucketId(16, 1234), req->getBuckets()[0]);
-
- auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
- reqreply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(
- document::BucketId(16, 1234),
- api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
- getBucketDBUpdater().onRequestBucketInfoReply(reqreply);
- }
-
- EXPECT_EQ(std::string("BucketId(0x40000000000004d2) : "
- "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)"),
- dumpBucket(document::BucketId(16, 1234)));
-};
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, flush) {
- enableDistributorClusterState("distributor:1 storage:3");
- _sender.clear();
-
- addNodesToBucketDB(document::BucketId(16, 1234), "0=1234,1=1234,2=1234");
-
- std::vector<api::MergeBucketCommand::Node> nodes;
- for (uint32_t i = 0; i < 3; ++i) {
- nodes.push_back(api::MergeBucketCommand::Node(i));
- }
-
- api::MergeBucketCommand cmd(makeDocumentBucket(document::BucketId(16, 1234)), nodes, 0);
-
- auto reply(std::make_shared<api::MergeBucketReply>(cmd));
-
- _sender.clear();
- getBucketDBUpdater().onMergeBucketReply(reply);
-
- ASSERT_EQ(size_t(3), _sender.commands().size());
- ASSERT_EQ(size_t(0), _senderDown.replies().size());
-
- getBucketDBUpdater().flush();
- // Flushing should drop all merge bucket replies
- EXPECT_EQ(size_t(0), _senderDown.commands().size());
-}
-
-std::string
-LegacyBucketDBUpdaterTest::getSentNodes(
- const std::string& oldClusterState,
- const std::string& newClusterState)
-{
- auto fixture = createPendingStateFixtureForStateChange(
- oldClusterState, newClusterState);
-
- sortSentMessagesByIndex(fixture->sender);
-
- std::ostringstream ost;
- for (uint32_t i = 0; i < fixture->sender.commands().size(); i++) {
- auto& req = dynamic_cast<RequestBucketInfoCommand&>(*fixture->sender.command(i));
-
- if (i > 0) {
- ost << ",";
- }
-
- ost << req.getAddress()->getIndex();
- }
-
- return ost.str();
-}
-
-std::string
-LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged(
- const std::string& oldClusterState)
-{
- DistributorMessageSenderStub sender;
-
- framework::defaultimplementation::FakeClock clock;
- ClusterInformation::CSP clusterInfo(createClusterInfo(oldClusterState));
- std::unique_ptr<PendingClusterState> state(
- PendingClusterState::createForDistributionChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(), api::Timestamp(1)));
-
- sortSentMessagesByIndex(sender);
-
- std::ostringstream ost;
- for (uint32_t i = 0; i < sender.commands().size(); i++) {
- auto& req = dynamic_cast<RequestBucketInfoCommand&>(*sender.command(i));
-
- if (i > 0) {
- ost << ",";
- }
-
- ost << req.getAddress()->getIndex();
- }
-
- return ost.str();
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) {
- EXPECT_EQ(getNodeList({0, 1, 2}),
- getSentNodes("cluster:d",
- "distributor:1 storage:3"));
-
- EXPECT_EQ(getNodeList({0, 1}),
- getSentNodes("cluster:d",
- "distributor:1 storage:3 .2.s:m"));
-
- EXPECT_EQ(getNodeList({2}),
- getSentNodes("distributor:1 storage:2",
- "distributor:1 storage:3"));
-
- EXPECT_EQ(getNodeList({2, 3, 4, 5}),
- getSentNodes("distributor:1 storage:2",
- "distributor:1 storage:6"));
-
- EXPECT_EQ(getNodeList({0, 1, 2}),
- getSentNodes("distributor:4 storage:3",
- "distributor:3 storage:3"));
-
- EXPECT_EQ(getNodeList({0, 1, 2, 3}),
- getSentNodes("distributor:4 storage:3",
- "distributor:4 .2.s:d storage:4"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:4 storage:3",
- "distributor:4 .0.s:d storage:4"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:3 storage:3",
- "distributor:4 storage:3"));
-
- EXPECT_EQ(getNodeList({2}),
- getSentNodes("distributor:3 storage:3 .2.s:i",
- "distributor:3 storage:3"));
-
- EXPECT_EQ(getNodeList({1}),
- getSentNodes("distributor:3 storage:3 .1.s:d",
- "distributor:3 storage:3"));
-
- EXPECT_EQ(getNodeList({1, 2, 4}),
- getSentNodes("distributor:3 storage:4 .1.s:d .2.s:i",
- "distributor:3 storage:5"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:1 storage:3",
- "cluster:d"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:1 storage:3",
- "distributor:1 storage:3"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:1 storage:3",
- "cluster:d distributor:1 storage:6"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:3 storage:3",
- "distributor:3 .2.s:m storage:3"));
-
- EXPECT_EQ(getNodeList({0, 1, 2}),
- getSentNodes("distributor:3 .2.s:m storage:3",
- "distributor:3 .2.s:d storage:3"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:3 .2.s:m storage:3",
- "distributor:3 storage:3"));
-
- EXPECT_EQ(getNodeList({0, 1, 2}),
- getSentNodesDistributionChanged("distributor:3 storage:3"));
-
- EXPECT_EQ(getNodeList({0, 1}),
- getSentNodes("distributor:10 storage:2",
- "distributor:10 .1.s:d storage:2"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:2 storage:2",
- "distributor:3 .2.s:i storage:2"));
-
- EXPECT_EQ(getNodeList({0, 1, 2}),
- getSentNodes("distributor:3 storage:3",
- "distributor:3 .2.s:s storage:3"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:3 .2.s:s storage:3",
- "distributor:3 .2.s:d storage:3"));
-
- EXPECT_EQ(getNodeList({1}),
- getSentNodes("distributor:3 storage:3 .1.s:m",
- "distributor:3 storage:3"));
-
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:3 storage:3",
- "distributor:3 storage:3 .1.s:m"));
-};
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) {
- DistributorMessageSenderStub sender;
-
- auto cmd(std::make_shared<api::SetSystemStateCommand>(
- lib::ClusterState("distributor:1 storage:3")));
-
- framework::defaultimplementation::FakeClock clock;
- ClusterInformation::CSP clusterInfo(createClusterInfo("cluster:d"));
- OutdatedNodesMap outdatedNodesMap;
- std::unique_ptr<PendingClusterState> state(
- PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(),
- cmd, outdatedNodesMap, api::Timestamp(1)));
-
- ASSERT_EQ(messageCount(3), sender.commands().size());
-
- sortSentMessagesByIndex(sender);
-
- std::ostringstream ost;
- for (uint32_t i = 0; i < sender.commands().size(); i++) {
- auto* req = dynamic_cast<RequestBucketInfoCommand*>(sender.command(i).get());
- ASSERT_TRUE(req != nullptr);
-
- auto rep = std::make_shared<RequestBucketInfoReply>(*req);
-
- rep->getBucketInfo().push_back(
- RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(i, i, i, i, i)));
-
- ASSERT_TRUE(state->onRequestBucketInfoReply(rep));
- ASSERT_EQ((i == (sender.commands().size() - 1)), state->done());
- }
-
- auto& pendingTransition = state->getPendingBucketSpaceDbTransition(makeBucketSpace());
- EXPECT_EQ(3, (int)pendingTransition.results().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
- std::string config(getDistConfig6Nodes4Groups());
- config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
- setDistribution(config);
-
- // Group config has nodes {0, 1}, {2, 3}, {4, 5}
- // We're node index 0.
-
- // Entire group 1 goes down. Must refetch from all nodes.
- EXPECT_EQ(getNodeList({0, 1, 2, 3, 4, 5}),
- getSentNodes("distributor:6 storage:6",
- "distributor:6 .2.s:d .3.s:d storage:6"));
-
- // But don't fetch if not the entire group is down.
- EXPECT_EQ(std::string(""),
- getSentNodes("distributor:6 storage:6",
- "distributor:6 .2.s:d storage:6"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
- std::string config(getDistConfig6Nodes4Groups());
- config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
- setDistribution(config);
-
- // Group is down, but config says to not do anything about it.
- EXPECT_EQ(getNodeList({0, 1, 2, 3, 4, 5}, _bucketSpaces.size() - 1),
- getSentNodes("distributor:6 storage:6",
- "distributor:6 .2.s:d .3.s:d storage:6"));
-}
-
-namespace {
-
-void
-parseInputData(const std::string& data,
- uint64_t timestamp,
- PendingClusterState& state,
- bool includeBucketInfo)
-{
- vespalib::StringTokenizer tokenizer(data, "|");
- for (uint32_t i = 0; i < tokenizer.size(); i++) {
- vespalib::StringTokenizer tok2(tokenizer[i], ":");
-
- uint16_t node = atoi(tok2[0].data());
-
- state.setNodeReplied(node);
- auto& pendingTransition = state.getPendingBucketSpaceDbTransition(makeBucketSpace());
-
- vespalib::StringTokenizer tok3(tok2[1], ",");
- for (uint32_t j = 0; j < tok3.size(); j++) {
- if (includeBucketInfo) {
- vespalib::StringTokenizer tok4(tok3[j], "/");
-
- pendingTransition.addNodeInfo(
- document::BucketId(16, atoi(tok4[0].data())),
- BucketCopy(
- timestamp,
- node,
- api::BucketInfo(
- atoi(tok4[1].data()),
- atoi(tok4[2].data()),
- atoi(tok4[3].data()),
- atoi(tok4[2].data()),
- atoi(tok4[3].data()))));
- } else {
- pendingTransition.addNodeInfo(
- document::BucketId(16, atoi(tok3[j].data())),
- BucketCopy(timestamp,
- node,
- api::BucketInfo(3, 3, 3, 3, 3)));
- }
- }
- }
-}
-
-struct BucketDumper : public BucketDatabase::EntryProcessor
-{
- std::ostringstream ost;
- bool _includeBucketInfo;
-
- explicit BucketDumper(bool includeBucketInfo)
- : _includeBucketInfo(includeBucketInfo)
- {
- }
-
- bool process(const BucketDatabase::ConstEntryRef& e) override {
- document::BucketId bucketId(e.getBucketId());
-
- ost << (uint32_t)bucketId.getRawId() << ":";
- for (uint32_t i = 0; i < e->getNodeCount(); ++i) {
- if (i > 0) {
- ost << ",";
- }
- const BucketCopy& copy(e->getNodeRef(i));
- ost << copy.getNode();
- if (_includeBucketInfo) {
- ost << "/" << copy.getChecksum()
- << "/" << copy.getDocumentCount()
- << "/" << copy.getTotalDocumentSize()
- << "/" << (copy.trusted() ? "t" : "u");
- }
- }
- ost << "|";
- return true;
- }
-};
-
-}
-
-std::string
-LegacyBucketDBUpdaterTest::mergeBucketLists(
- const lib::ClusterState& oldState,
- const std::string& existingData,
- const lib::ClusterState& newState,
- const std::string& newData,
- bool includeBucketInfo)
-{
- framework::defaultimplementation::FakeClock clock;
- framework::MilliSecTimer timer(clock);
-
- DistributorMessageSenderStub sender;
- OutdatedNodesMap outdatedNodesMap;
-
- {
- auto cmd(std::make_shared<api::SetSystemStateCommand>(oldState));
-
- api::Timestamp beforeTime(1);
-
- ClusterInformation::CSP clusterInfo(createClusterInfo("cluster:d"));
- std::unique_ptr<PendingClusterState> state(
- PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(),
- cmd, outdatedNodesMap, beforeTime));
-
- parseInputData(existingData, beforeTime, *state, includeBucketInfo);
- state->mergeIntoBucketDatabases();
- }
-
- BucketDumper dumper_tmp(true);
- getBucketDatabase().forEach(dumper_tmp);
-
- {
- auto cmd(std::make_shared<api::SetSystemStateCommand>(
- lib::ClusterState(newState)));
-
- api::Timestamp afterTime(2);
-
- ClusterInformation::CSP clusterInfo(createClusterInfo(oldState.toString()));
- std::unique_ptr<PendingClusterState> state(
- PendingClusterState::createForClusterStateChange(
- clock, clusterInfo, sender, getBucketSpaceRepo(),
- cmd, outdatedNodesMap, afterTime));
-
- parseInputData(newData, afterTime, *state, includeBucketInfo);
- state->mergeIntoBucketDatabases();
- }
-
- BucketDumper dumper(includeBucketInfo);
- auto &bucketDb(defaultDistributorBucketSpace().getBucketDatabase());
- bucketDb.forEach(dumper);
- bucketDb.clear();
- return dumper.ost.str();
-}
-
-std::string
-LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
- const std::string& newData,
- bool includeBucketInfo)
-{
- return mergeBucketLists(
- lib::ClusterState("distributor:1 storage:3"),
- existingData,
- lib::ClusterState("distributor:1 storage:3"),
- newData,
- includeBucketInfo);
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) {
- // Simple initializing case - ask all nodes for info
- EXPECT_EQ(
- // Result is on the form: [bucket w/o count bits]:[node indexes]|..
- std::string("4:0,1|2:0,1|6:1,2|1:0,2|5:2,0|3:2,1|"),
- // Input is on the form: [node]:[bucket w/o count bits]|...
- mergeBucketLists(
- "",
- "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6"));
-
- // New node came up
- EXPECT_EQ(
- std::string("4:0,1|2:0,1|6:1,2,3|1:0,2,3|5:2,0,3|3:2,1,3|"),
- mergeBucketLists(
- "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
- "3:1,3,5,6"));
-
- // Node came up with some buckets removed and some added
- // Buckets that were removed should not be removed as the node
- // didn't lose a disk.
- EXPECT_EQ(
- std::string("8:0|4:0,1|2:0,1|6:1,0,2|1:0,2|5:2,0|3:2,1|"),
- mergeBucketLists(
- "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
- "0:1,2,6,8"));
-
- // Bucket info format is "bucketid/checksum/count/size"
- // Node went from initializing to up and invalid bucket went to empty.
- EXPECT_EQ(
- std::string("2:0/0/0/0/t|"),
- mergeBucketLists(
- "0:2/0/0/1",
- "0:2/0/0/0",
- true));
-
- EXPECT_EQ(std::string("5:1/2/3/4/u,0/0/0/0/u|"),
- mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
- // Node went from initializing to up and non-invalid bucket changed.
- EXPECT_EQ(
- std::string("2:0/2/3/4/t|3:0/2/4/6/t|"),
- mergeBucketLists(
- lib::ClusterState("distributor:1 storage:1 .0.s:i"),
- "0:2/1/2/3,3/2/4/6",
- lib::ClusterState("distributor:1 storage:1"),
- "0:2/2/3/4,3/2/4/6",
- true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
- document::BucketId bucket(16, 3);
- lib::ClusterState stateBefore("distributor:1 storage:1");
- {
- uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn));
- }
- _sender.clear();
-
- getBucketDBUpdater().recheckBucketInfo(0, makeDocumentBucket(bucket));
-
- ASSERT_EQ(size_t(1), _sender.commands().size());
- std::shared_ptr<api::RequestBucketInfoCommand> rbi(
- std::dynamic_pointer_cast<RequestBucketInfoCommand>(
- _sender.command(0)));
-
- lib::ClusterState stateAfter("distributor:3 storage:3");
-
- {
- uint32_t expectedMsgs = messageCount(2), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateAfter, expectedMsgs, dummyBucketsToReturn));
- }
- EXPECT_FALSE(getDistributorBucketSpace().get_bucket_ownership_flags(bucket).owned_in_current_state());
-
- ASSERT_NO_FATAL_FAILURE(sendFakeReplyForSingleBucketRequest(*rbi));
-
- EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
- document::BucketId bucket(16, 3);
- lib::ClusterState stateBefore("distributor:1 storage:1");
- {
- uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn));
- }
- _sender.clear();
-
- getBucketDBUpdater().recheckBucketInfo(0, makeDocumentBucket(bucket));
-
- ASSERT_EQ(size_t(1), _sender.commands().size());
- std::shared_ptr<api::RequestBucketInfoCommand> rbi(
- std::dynamic_pointer_cast<RequestBucketInfoCommand>(
- _sender.command(0)));
-
- lib::ClusterState stateAfter("distributor:3 storage:3");
- // Set, but _don't_ enable cluster state. We want it to be pending.
- setSystemState(stateAfter);
- EXPECT_TRUE(getDistributorBucketSpace().get_bucket_ownership_flags(bucket).owned_in_current_state());
- EXPECT_FALSE(getDistributorBucketSpace().get_bucket_ownership_flags(bucket).owned_in_pending_state());
-
- ASSERT_NO_FATAL_FAILURE(sendFakeReplyForSingleBucketRequest(*rbi));
-
- EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-/*
- * If we get a distribution config change, it's important that cluster states that
- * arrive after this--but _before_ the pending cluster state has finished--must trigger
- * a full bucket info fetch no matter what the cluster state change was! Otherwise, we
- * will with a high likelihood end up not getting the complete view of the buckets in
- * the cluster.
- */
-TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
- lib::ClusterState stateBefore("distributor:6 storage:6");
- {
- uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn));
- }
- _sender.clear();
- std::string distConfig(getDistConfig6Nodes2Groups());
- setDistribution(distConfig);
-
- sortSentMessagesByIndex(_sender);
- ASSERT_EQ(messageCount(6), _sender.commands().size());
- // Suddenly, a wild cluster state change appears! Even though this state
- // does not in itself imply any bucket changes, it will still overwrite the
- // pending cluster state and thus its state of pending bucket info requests.
- setSystemState(lib::ClusterState("distributor:6 .2.t:12345 storage:6"));
-
- ASSERT_EQ(messageCount(12), _sender.commands().size());
-
- // Send replies for first messageCount(6) (outdated requests).
- int numBuckets = 10;
- for (uint32_t i = 0; i < messageCount(6); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 storage:6"),
- *_sender.command(i), numBuckets));
- }
- // No change from these.
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(1, "distributor:6 storage:6"));
-
- // Send for current pending.
- for (uint32_t i = 0; i < messageCount(6); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 .2.t:12345 storage:6"),
- *_sender.command(i + messageCount(6)),
- numBuckets));
- }
- ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:6 storage:6"));
- _sender.clear();
-
- // No more pending global fetch; this should be a no-op state.
- setSystemState(lib::ClusterState("distributor:6 .3.t:12345 storage:6"));
- EXPECT_EQ(size_t(0), _sender.commands().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
- _sender.clear();
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- complete_recovery_mode();
- EXPECT_FALSE(distributor_is_in_recovery_mode());
-
- std::string distConfig(getDistConfig6Nodes4Groups());
- setDistribution(distConfig);
- sortSentMessagesByIndex(_sender);
- // No replies received yet, still no recovery mode.
- EXPECT_FALSE(distributor_is_in_recovery_mode());
-
- ASSERT_EQ(messageCount(6), _sender.commands().size());
- uint32_t numBuckets = 10;
- for (uint32_t i = 0; i < messageCount(6); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:6 storage:6"),
- *_sender.command(i), numBuckets));
- }
-
- // Pending cluster state (i.e. distribution) has been enabled, which should
- // cause recovery mode to be entered.
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- complete_recovery_mode();
- EXPECT_FALSE(distributor_is_in_recovery_mode());
-}
-
-namespace {
-
-template <typename Func>
-struct FunctorProcessor : BucketDatabase::EntryProcessor {
- Func _f;
-
- template <typename F>
- explicit FunctorProcessor(F&& f) : _f(std::forward<F>(f)) {}
-
- bool process(const BucketDatabase::ConstEntryRef& e) override {
- _f(e);
- return true;
- }
-};
-
-template <typename Func>
-std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) {
- return std::make_unique<FunctorProcessor<Func>>(std::forward<Func>(f));
-}
-
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
- setDistribution(getDistConfig3Nodes1Group());
-
- constexpr uint32_t n_buckets = 100;
- ASSERT_NO_FATAL_FAILURE(
- setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), n_buckets));
- _sender.clear();
-
- // Config implies a different node set than the current cluster state, so it's crucial that
- // DB pruning is _not_ elided. Yes, this is inherently racing with cluster state changes and
- // should be changed to be atomic and controlled by the cluster controller instead of config.
- // But this is where we currently are.
- setDistribution(getDistConfig6Nodes2Groups());
-
- getBucketDatabase().forEach(*func_processor([&](const auto& e) {
- EXPECT_TRUE(getDistributorBucketSpace().get_bucket_ownership_flags(e.getBucketId()).owned_in_pending_state());
- }));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
- getClock().setAbsoluteTimeInSeconds(101234);
- lib::ClusterState stateBefore("distributor:1 storage:1");
- {
- uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn));
- }
-
- // setAndEnableClusterState adds n buckets with id (16, i)
- document::BucketId bucket(16, 0);
- BucketDatabase::Entry e(getBucket(bucket));
- ASSERT_TRUE(e.valid());
- EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
- {
- lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
- uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 0;
- // This step is required to make the distributor ready for accepting
- // the below explicit database insertion towards node 0.
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs,
- dummyBucketsToReturn));
- }
- _sender.clear();
- getClock().setAbsoluteTimeInSeconds(1000);
- lib::ClusterState state("distributor:1 storage:1");
- setSystemState(state);
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
-
- // Before replying with the bucket info, simulate the arrival of a mutation
- // reply that alters the state of the bucket with information that will be
- // more recent that what is returned by the bucket info. This information
- // must not be lost when the bucket info is later merged into the database.
- document::BucketId bucket(16, 1);
- constexpr uint64_t insertionTimestamp = 1001ULL * 1000000;
- api::BucketInfo wantedInfo(5, 6, 7);
- getBucketDBUpdater().operation_context().update_bucket_database(
- makeDocumentBucket(bucket),
- BucketCopy(insertionTimestamp, 0, wantedInfo),
- DatabaseUpdate::CREATE_IF_NONEXISTING);
-
- getClock().setAbsoluteTimeInSeconds(1002);
- constexpr uint32_t bucketsReturned = 10; // Buckets (16, 0) ... (16, 9)
- // Return bucket information which on the timeline might originate from
- // anywhere between [1000, 1002]. Our assumption is that any mutations
- // taking place after t=1000 must have its reply received and processed
- // by this distributor and timestamped strictly higher than t=1000 (modulo
- // clock skew, of course, but that is outside the scope of this). A mutation
- // happening before t=1000 but receiving a reply at t>1000 does not affect
- // correctness, as this should contain the same bucket info as that
- // contained in the full bucket reply and the DB update is thus idempotent.
- for (uint32_t i = 0; i < _bucketSpaces.size(); ++i) {
- ASSERT_NO_FATAL_FAILURE(fakeBucketReply(state, *_sender.command(i), bucketsReturned));
- }
-
- BucketDatabase::Entry e(getBucket(bucket));
- ASSERT_EQ(uint32_t(1), e->getNodeCount());
- EXPECT_EQ(wantedInfo, e->getNodeRef(0).getBucketInfo());
-}
-
-std::vector<uint16_t>
-LegacyBucketDBUpdaterTest::getSendSet() const
-{
- std::vector<uint16_t> nodes;
- std::transform(_sender.commands().begin(),
- _sender.commands().end(),
- std::back_inserter(nodes),
- [](auto& cmd)
- {
- auto& req(dynamic_cast<const api::RequestBucketInfoCommand&>(*cmd));
- return req.getAddress()->getIndex();
- });
- return nodes;
-}
-
-std::vector<uint16_t>
-LegacyBucketDBUpdaterTest::getSentNodesWithPreemption(
- const std::string& oldClusterState,
- uint32_t expectedOldStateMessages,
- const std::string& preemptedClusterState,
- const std::string& newClusterState)
-{
- lib::ClusterState stateBefore(oldClusterState);
- uint32_t dummyBucketsToReturn = 10;
- // FIXME cannot chain assertion checks in non-void function
- setAndEnableClusterState(lib::ClusterState(oldClusterState),
- expectedOldStateMessages,
- dummyBucketsToReturn);
-
- _sender.clear();
-
- setSystemState(lib::ClusterState(preemptedClusterState));
- _sender.clear();
- // Do not allow the pending state to become the active state; trigger a
- // new transition without ACKing the info requests first. This will
- // overwrite the pending state entirely.
- setSystemState(lib::ClusterState(newClusterState));
- return getSendSet();
-}
-
-using nodeVec = std::vector<uint16_t>;
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-/*
- * If we don't carry over the set of nodes that we need to fetch from,
- * a naive comparison between the active state and the new state will
- * make it appear to the distributor that nothing has changed, as any
- * database modifications caused by intermediate states will not be
- * accounted for (basically the ABA problem in a distributed setting).
- */
-TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
- EXPECT_EQ(
- expandNodeVec({0, 1, 2, 3, 4, 5}),
- getSentNodesWithPreemption("version:1 distributor:6 storage:6",
- messageCount(6),
- "version:2 distributor:6 .5.s:d storage:6",
- "version:3 distributor:6 storage:6"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
- EXPECT_EQ(
- expandNodeVec({2, 3}),
- getSentNodesWithPreemption(
- "version:1 distributor:6 storage:6 .2.s:d",
- messageCount(5),
- "version:2 distributor:6 storage:6 .2.s:d .3.s:d",
- "version:3 distributor:6 storage:6"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
- EXPECT_EQ(
- expandNodeVec({2}),
- getSentNodesWithPreemption(
- "version:1 distributor:6 storage:6",
- messageCount(6),
- "version:2 distributor:6 storage:6 .2.s:d",
- "version:3 distributor:6 storage:6"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
- EXPECT_EQ(
- nodeVec{},
- getSentNodesWithPreemption(
- "version:1 distributor:6 storage:6 .2.s:d",
- messageCount(5),
- "version:2 distributor:6 storage:6", // Sends to 2.
- "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
- // Even though 100 nodes are preempted, not all of these should be part
- // of the request afterwards when only 6 are part of the state.
- EXPECT_EQ(
- expandNodeVec({0, 1, 2, 3, 4, 5}),
- getSentNodesWithPreemption(
- "version:1 distributor:6 storage:100",
- messageCount(100),
- "version:2 distributor:5 .4.s:d storage:100",
- "version:3 distributor:6 storage:6"));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
- lib::ClusterState stateBefore(
- "version:1 distributor:6 storage:6 .1.t:1234");
- uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 10;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(stateBefore, expectedMsgs, dummyBucketsToReturn));
- _sender.clear();
- // New cluster state that should not by itself trigger any new fetches,
- // unless outdated node set is somehow not cleared after an enabled
- // (completed) cluster state has been set.
- lib::ClusterState stateAfter("version:3 distributor:6 storage:6");
- setSystemState(stateAfter);
- EXPECT_EQ(size_t(0), _sender.commands().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest (despite being disabled)
-// XXX test currently disabled since distribution config currently isn't used
-// at all in order to deduce the set of nodes to send to. This might not matter
-// in practice since it is assumed that the cluster state matching the new
-// distribution config will follow very shortly after the config has been
-// applied to the node. The new cluster state will then send out requests to
-// the correct node set.
-TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
- uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"),
- expectedMsgs, dummyBucketsToReturn));
- _sender.clear();
-
- // Intentionally trigger a racing config change which arrives before the
- // new cluster state representing it.
- std::string distConfig(getDistConfig3Nodes1Group());
- setDistribution(distConfig);
- sortSentMessagesByIndex(_sender);
-
- EXPECT_EQ((nodeVec{0, 1, 2}), getSendSet());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-/**
- * Test scenario where a cluster is downsized by removing a subset of the nodes
- * from the distribution configuration. The system must be able to deal with
- * a scenario where the set of nodes between two cluster states across a config
- * change may differ.
- *
- * See VESPA-790 for details.
- */
-TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
- uint32_t expectedMsgs = messageCount(3), dummyBucketsToReturn = 1;
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"),
- expectedMsgs, dummyBucketsToReturn));
- _sender.clear();
-
- // Cluster goes from {0, 1, 2} -> {0, 1}. This leaves us with a config
- // that does not contain node 2 while the _active_ cluster state still
- // contains this node.
- const char* downsizeCfg =
- "redundancy 2\n"
- "distributor_auto_ownership_transfer_on_whole_group_down true\n"
- "group[2]\n"
- "group[0].name \"invalid\"\n"
- "group[0].index \"invalid\"\n"
- "group[0].partitions 1|*\n"
- "group[0].nodes[0]\n"
- "group[1].name rack0\n"
- "group[1].index 0\n"
- "group[1].nodes[2]\n"
- "group[1].nodes[0].index 0\n"
- "group[1].nodes[1].index 1\n";
-
- setDistribution(downsizeCfg);
- sortSentMessagesByIndex(_sender);
- _sender.clear();
-
- // Attempt to apply state with {0, 1} set. This will compare the new state
- // with the previous state, which still has node 2.
- expectedMsgs = messageCount(2);
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:2"),
- expectedMsgs, dummyBucketsToReturn));
-
- EXPECT_EQ(expandNodeVec({0, 1}), getSendSet());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
- auto fixture = createPendingStateFixtureForStateChange(
- "distributor:2 storage:2", "distributor:1 storage:2");
- EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
-
- fixture = createPendingStateFixtureForStateChange(
- "distributor:2 storage:2", "distributor:2 .1.s:d storage:2");
- EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
- auto fixture = createPendingStateFixtureForStateChange(
- "distributor:2 storage:2", "distributor:2 storage:1");
- EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
-
- fixture = createPendingStateFixtureForStateChange(
- "distributor:2 storage:2", "distributor:2 storage:2 .1.s:d");
- EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
- auto fixture = createPendingStateFixtureForDistributionChange(
- "distributor:2 storage:2");
- EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
- ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
-
- EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
- ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
- ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1)));
-
- EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
- lib::ClusterState state("distributor:2 storage:2");
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1));
-
- _sender.clear();
- std::string distConfig(getDistConfig3Nodes1Group());
- setDistribution(distConfig);
- getClock().addSecondsToTime(4);
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(state, messageCount(2)));
- EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
- _sender.clear();
- lib::ClusterState state("distributor:2 storage:2");
- setSystemState(state);
- getClock().addSecondsToTime(5);
- // Pre-empted with new state here, which will push out the old pending
- // state and replace it with a new one. We should still count the time
- // used processing the old state.
- ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(3)));
-
- EXPECT_EQ(uint64_t(8000), lastTransitionTimeInMillis());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-/*
- * Brief reminder on test DSL for checking bucket merge operations:
- *
- * mergeBucketLists() takes as input strings of the format
- * <node>:<raw bucket id>/<checksum>/<num docs>/<doc size>|<node>:
- * and returns a string describing the bucket DB post-merge with the format
- * <raw bucket id>:<node>/<checksum>/<num docs>/<doc size>,<node>:....|<raw bucket id>:....
- *
- * Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake.
- */
-
-TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
- // Replacing bucket information for content node 0 should not mark existing
- // untrusted replica as trusted as a side effect.
- EXPECT_EQ(
- std::string("5:1/7/8/9/u,0/1/2/3/u|"),
- mergeBucketLists(
- lib::ClusterState("distributor:1 storage:3 .0.s:i"),
- "0:5/0/0/0|1:5/7/8/9",
- lib::ClusterState("distributor:1 storage:3 .0.s:u"),
- "0:5/1/2/3|1:5/7/8/9", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
- EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
- mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
- EXPECT_EQ(std::string("5:0/1/2/3/t|"),
- mergeBucketLists("", "0:5/1/2/3", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
- EXPECT_EQ(std::string("5:0/1/2/3/t|"),
- mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
- EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
- mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
- EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"),
- mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
- // This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted
- // in that _all_ content nodes are considered outdated when distributor changes take place,
- // and therefore a slightly different code path is taken. In particular, bucket info for
- // outdated nodes gets removed before possibly being re-added (if present in the bucket info
- // response).
- EXPECT_EQ(
- std::string("5:1/7/8/9/u,0/1/2/3/u|"),
- mergeBucketLists(
- lib::ClusterState("distributor:2 storage:3"),
- "0:5/1/2/3|1:5/7/8/9",
- lib::ClusterState("distributor:1 storage:3"),
- "0:5/1/2/3|1:5/7/8/9", true));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
- std::string distConfig(getDistConfig6Nodes2Groups());
- setDistribution(distConfig);
-
- const vespalib::string current_hash = "(0d*|*(0;0;1;2)(1;3;4;5))";
- const vespalib::string legacy_hash = "(0d3|3|*(0;0;1;2)(1;3;4;5))";
-
- setSystemState(lib::ClusterState("distributor:6 storage:6"));
- ASSERT_EQ(messageCount(6), _sender.commands().size());
-
- api::RequestBucketInfoCommand* global_req = nullptr;
- for (auto& cmd : _sender.commands()) {
- auto& req_cmd = dynamic_cast<api::RequestBucketInfoCommand&>(*cmd);
- if (req_cmd.getBucketSpace() == document::FixedBucketSpaces::global_space()) {
- global_req = &req_cmd;
- break;
- }
- }
- ASSERT_TRUE(global_req != nullptr);
- ASSERT_EQ(current_hash, global_req->getDistributionHash());
-
- auto reply = std::make_shared<api::RequestBucketInfoReply>(*global_req);
- reply->setResult(api::ReturnCode::REJECTED);
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
-
- getClock().addSecondsToTime(10);
- getBucketDBUpdater().resendDelayedMessages();
-
- // Should now be a resent request with legacy distribution hash
- ASSERT_EQ(messageCount(6) + 1, _sender.commands().size());
- auto& legacy_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
- ASSERT_EQ(legacy_hash, legacy_req.getDistributionHash());
-
- // Now if we reject it _again_ we should cycle back to the current hash
- // in case it wasn't a hash-based rejection after all. And the circle of life continues.
- reply = std::make_shared<api::RequestBucketInfoReply>(legacy_req);
- reply->setResult(api::ReturnCode::REJECTED);
- getBucketDBUpdater().onRequestBucketInfoReply(reply);
-
- getClock().addSecondsToTime(10);
- getBucketDBUpdater().resendDelayedMessages();
-
- ASSERT_EQ(messageCount(6) + 2, _sender.commands().size());
- auto& new_current_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
- ASSERT_EQ(current_hash, new_current_req.getDistributionHash());
-}
-
-namespace {
-
-template <typename Func>
-void for_each_bucket(const BucketDatabase& db, const document::BucketSpace& space, Func&& f) {
- BucketId last(0);
- auto e = db.getNext(last);
- while (e.valid()) {
- f(space, e);
- e = db.getNext(e.getBucketId());
- }
-}
-
-template <typename Func>
-void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) {
- for (const auto& space : repo) {
- for_each_bucket(space.second->getBucketDatabase(), space.first, f);
- }
-}
-
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
-
- lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
- set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
-
- ASSERT_EQ(messageCount(4), _sender.commands().size());
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(initial_state, messageCount(4), n_buckets));
- _sender.clear();
-
- EXPECT_EQ(size_t(n_buckets), mutable_default_db().size());
- EXPECT_EQ(size_t(n_buckets), mutable_global_db().size());
- EXPECT_EQ(size_t(0), read_only_default_db().size());
- EXPECT_EQ(size_t(0), read_only_global_db().size());
-
- lib::ClusterState pending_state("distributor:2 storage:4");
-
- std::unordered_set<Bucket, Bucket::hash> buckets_not_owned_in_pending_state;
- for_each_bucket(mutable_repo(), [&](const auto& space, const auto& entry) {
- if (!getDistributorBucketSpace().owns_bucket_in_state(pending_state, entry.getBucketId())) {
- buckets_not_owned_in_pending_state.insert(Bucket(space, entry.getBucketId()));
- }
- });
- EXPECT_FALSE(buckets_not_owned_in_pending_state.empty());
-
- set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true)); // Now requires activation
-
- const auto buckets_not_owned_per_space = (buckets_not_owned_in_pending_state.size() / 2); // 2 spaces
- const auto expected_mutable_buckets = n_buckets - buckets_not_owned_per_space;
- EXPECT_EQ(expected_mutable_buckets, mutable_default_db().size());
- EXPECT_EQ(expected_mutable_buckets, mutable_global_db().size());
- EXPECT_EQ(buckets_not_owned_per_space, read_only_default_db().size());
- EXPECT_EQ(buckets_not_owned_per_space, read_only_global_db().size());
-
- for_each_bucket(read_only_repo(), [&](const auto& space, const auto& entry) {
- EXPECT_TRUE(buckets_not_owned_in_pending_state.find(Bucket(space, entry.getBucketId()))
- != buckets_not_owned_in_pending_state.end());
- });
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
- constexpr uint32_t n_buckets = 10;
- // No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will
- // cause some buckets to be entirely unavailable.
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
- "version:2 distributor:1 storage:4 .0.s:d .1.s:m", n_buckets, 0);
-
- EXPECT_EQ(size_t(0), read_only_default_db().size());
- EXPECT_EQ(size_t(0), read_only_global_db().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
- getBucketDBUpdater().set_stale_reads_enabled(false);
-
- lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
- set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
-
- ASSERT_EQ(messageCount(4), _sender.commands().size());
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(initial_state, messageCount(4), n_buckets));
- _sender.clear();
-
- // Nothing in read-only DB after first bulk load of buckets.
- EXPECT_EQ(size_t(0), read_only_default_db().size());
- EXPECT_EQ(size_t(0), read_only_global_db().size());
-
- lib::ClusterState pending_state("distributor:2 storage:4");
- setSystemState(pending_state);
- // No buckets should be moved into read only db after ownership changes.
- EXPECT_EQ(size_t(0), read_only_default_db().size());
- EXPECT_EQ(size_t(0), read_only_global_db().size());
-}
-
-void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
- vespalib::stringref initial_state_str,
- uint32_t initial_buckets,
- uint32_t initial_expected_msgs,
- vespalib::stringref pending_state_str,
- uint32_t pending_buckets,
- uint32_t pending_expected_msgs)
-{
- lib::ClusterState initial_state(initial_state_str);
- setSystemState(initial_state);
- ASSERT_EQ(messageCount(initial_expected_msgs), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
- initial_state, messageCount(initial_expected_msgs), initial_buckets));
- _sender.clear();
-
- lib::ClusterState pending_state(pending_state_str); // Ownership change
- set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true));
- ASSERT_EQ(messageCount(pending_expected_msgs), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(
- pending_state, messageCount(pending_expected_msgs), pending_buckets));
- _sender.clear();
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
- "version:2 distributor:1 storage:4", n_buckets, 4));
-
- // Version should not be switched over yet
- EXPECT_EQ(uint32_t(1), current_distributor_cluster_state_bundle().getVersion());
-
- EXPECT_EQ(uint64_t(0), mutable_default_db().size());
- EXPECT_EQ(uint64_t(0), mutable_global_db().size());
-
- EXPECT_FALSE(activate_cluster_state_version(2));
-
- EXPECT_EQ(uint32_t(2), current_distributor_cluster_state_bundle().getVersion());
- EXPECT_EQ(uint64_t(n_buckets), mutable_default_db().size());
- EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
- "version:2 distributor:2 storage:4", n_buckets, 0));
- EXPECT_FALSE(activate_cluster_state_version(2));
-
- EXPECT_EQ(uint64_t(0), read_only_default_db().size());
- EXPECT_EQ(uint64_t(0), read_only_global_db().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
- "version:2 distributor:1 .0.s:d storage:4", n_buckets, 0));
-
- // State not yet activated, so read-only DBs have got all the buckets we used to have.
- EXPECT_EQ(uint64_t(0), mutable_default_db().size());
- EXPECT_EQ(uint64_t(0), mutable_global_db().size());
- EXPECT_EQ(uint64_t(n_buckets), read_only_default_db().size());
- EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:4 distributor:1 storage:4", n_buckets, 4,
- "version:5 distributor:2 storage:4", n_buckets, 0));
-
- EXPECT_TRUE(activate_cluster_state_version(4)); // Too old version
- ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
-
- EXPECT_TRUE(activate_cluster_state_version(6)); // More recent version than what has been observed
- ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
- getBucketDBUpdater().set_stale_reads_enabled(true);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
- "version:2 distributor:1 storage:4", n_buckets, 4));
- // Activate version 2; no pending cluster state after this.
- EXPECT_FALSE(activate_cluster_state_version(2));
-
- // No pending cluster state for version 3, just passed through to be implicitly bounced by state manager.
- // Note: state manager is not modelled in this test, so we just check that the message handler returns
- // false (meaning "didn't take message ownership") and there's no auto-generated reply.
- EXPECT_FALSE(activate_cluster_state_version(3));
- EXPECT_EQ(size_t(0), _sender.replies().size());
-}
-
-// TODO STRIPE disabled benchmark tests are NOT migrated to new test suite
-TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
- // Need to trigger an initial edge to complete first bucket scan
- ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"),
- messageCount(1), 0));
- _sender.clear();
-
- lib::ClusterState state("distributor:1 storage:1");
- setSystemState(state);
-
- constexpr uint32_t superbuckets = 1u << 16u;
- constexpr uint32_t sub_buckets = 14;
- constexpr uint32_t n_buckets = superbuckets * sub_buckets;
-
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
- for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- ASSERT_EQ(_sender.command(bsi)->getType(), MessageType::REQUESTBUCKETINFO);
- const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(bsi));
-
- auto sreply = std::make_shared<RequestBucketInfoReply>(req);
- sreply->setAddress(storageAddress(0));
- auto& vec = sreply->getBucketInfo();
- if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
- for (uint32_t sb = 0; sb < superbuckets; ++sb) {
- for (uint64_t i = 0; i < sub_buckets; ++i) {
- document::BucketId bucket(48, (i << 32ULL) | sb);
- vec.push_back(api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(10, 1, 1)));
- }
- }
- }
-
- vespalib::BenchmarkTimer timer(1.0);
- // Global space has no buckets but will serve as a trigger for merging
- // buckets into the DB. This lets us measure the overhead of just this part.
- if (req.getBucketSpace() == FixedBucketSpaces::global_space()) {
- timer.before();
- }
- getBucketDBUpdater().onRequestBucketInfoReply(sreply);
- if (req.getBucketSpace() == FixedBucketSpaces::global_space()) {
- timer.after();
- fprintf(stderr, "Took %g seconds to merge %u buckets into DB\n", timer.min_time(), n_buckets);
- }
- }
-
- EXPECT_EQ(size_t(n_buckets), mutable_default_db().size());
- EXPECT_EQ(size_t(0), mutable_global_db().size());
-}
-
-uint32_t LegacyBucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
- // Need to trigger an initial edge to complete first bucket scan
- setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0);
- _sender.clear();
-
- lib::ClusterState state("distributor:1 storage:1");
- setSystemState(state);
-
- constexpr uint32_t superbuckets = 1u << 16u;
- constexpr uint32_t sub_buckets = 14;
- constexpr uint32_t n_buckets = superbuckets * sub_buckets;
-
- assert(_bucketSpaces.size() == _sender.commands().size());
- for (uint32_t bsi = 0; bsi < _bucketSpaces.size(); ++bsi) {
- assert(_sender.command(bsi)->getType() == MessageType::REQUESTBUCKETINFO);
- const auto& req = dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(bsi));
-
- auto sreply = std::make_shared<RequestBucketInfoReply>(req);
- sreply->setAddress(storageAddress(0));
- auto& vec = sreply->getBucketInfo();
- if (req.getBucketSpace() == FixedBucketSpaces::default_space()) {
- for (uint32_t sb = 0; sb < superbuckets; ++sb) {
- for (uint64_t i = 0; i < sub_buckets; ++i) {
- document::BucketId bucket(48, (i << 32ULL) | sb);
- vec.push_back(api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(10, 1, 1)));
- }
- }
- }
- getBucketDBUpdater().onRequestBucketInfoReply(sreply);
- }
-
- assert(mutable_default_db().size() == n_buckets);
- assert(mutable_global_db().size() == 0);
- return n_buckets;
-}
-
-TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
- const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
-
- lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via ownership
- vespalib::BenchmarkTimer timer(1.0);
- timer.before();
- setSystemState(no_op_state);
- timer.after();
- fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
-}
-
-TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
- const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
-
- // TODO this benchmark is void if we further restrict the pruning elision logic to allow
- // elision when storage nodes come online.
- lib::ClusterState no_op_state("distributor:1 storage:2"); // Not removing any buckets
- vespalib::BenchmarkTimer timer(1.0);
- timer.before();
- setSystemState(no_op_state);
- timer.after();
- fprintf(stderr, "Took %g seconds to scan %u buckets with no-op action\n", timer.min_time(), n_buckets);
-}
-
-TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
- const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
-
- lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via all replicas gone
- vespalib::BenchmarkTimer timer(1.0);
- timer.before();
- setSystemState(no_op_state);
- timer.after();
- fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
- auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
- auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");
-
- lib::ClusterStateBundle initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default},
- {FixedBucketSpaces::global_space(), initial_baseline}});
- set_cluster_state_bundle(initial_bundle);
-
- auto* state = getBucketDBUpdater().pendingClusterStateOrNull(FixedBucketSpaces::default_space());
- ASSERT_TRUE(state != nullptr);
- EXPECT_EQ(*initial_default, *state);
-
- state = getBucketDBUpdater().pendingClusterStateOrNull(FixedBucketSpaces::global_space());
- ASSERT_TRUE(state != nullptr);
- EXPECT_EQ(*initial_baseline, *state);
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(*initial_baseline, messageCount(1), 0));
-
- state = getBucketDBUpdater().pendingClusterStateOrNull(FixedBucketSpaces::default_space());
- EXPECT_TRUE(state == nullptr);
-
- state = getBucketDBUpdater().pendingClusterStateOrNull(FixedBucketSpaces::global_space());
- EXPECT_TRUE(state == nullptr);
-}
-
-struct LegacyBucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
- lib::ClusterState empty_state;
- std::shared_ptr<lib::ClusterState> initial_baseline;
- std::shared_ptr<lib::ClusterState> initial_default;
- lib::ClusterStateBundle initial_bundle;
- Bucket default_bucket;
- Bucket global_bucket;
-
- LegacyBucketDBUpdaterSnapshotTest()
- : LegacyBucketDBUpdaterTest(),
- empty_state(),
- initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")),
- initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")),
- initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default},
- {FixedBucketSpaces::global_space(), initial_baseline}}),
- default_bucket(FixedBucketSpaces::default_space(), BucketId(16, 1234)),
- global_bucket(FixedBucketSpaces::global_space(), BucketId(16, 1234))
- {
- }
- ~LegacyBucketDBUpdaterSnapshotTest() override;
-
- void SetUp() override {
- LegacyBucketDBUpdaterTest::SetUp();
- getBucketDBUpdater().set_stale_reads_enabled(true);
- };
-
- // Assumes that the distributor owns all buckets, so it may choose any arbitrary bucket in the bucket space
- uint32_t buckets_in_snapshot_matching_current_db(DistributorBucketSpaceRepo& repo, BucketSpace bucket_space) {
- auto rs = getBucketDBUpdater().read_snapshot_for_bucket(Bucket(bucket_space, BucketId(16, 1234)));
- if (!rs.is_routable()) {
- return 0;
- }
- auto guard = rs.steal_read_guard();
- uint32_t found_buckets = 0;
- for_each_bucket(repo, [&](const auto& space, const auto& entry) {
- if (space == bucket_space) {
- auto entries = guard->find_parents_and_self(entry.getBucketId());
- if (entries.size() == 1) {
- ++found_buckets;
- }
- }
- });
- return found_buckets;
- }
-};
-
-LegacyBucketDBUpdaterSnapshotTest::~LegacyBucketDBUpdaterSnapshotTest() = default;
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) {
- auto rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
- EXPECT_FALSE(rs.is_routable());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) {
- auto rs = getBucketDBUpdater().read_snapshot_for_bucket(global_bucket);
- EXPECT_FALSE(rs.is_routable());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) {
- set_cluster_state_bundle(initial_bundle);
- // State currently pending, empty initial state is active
-
- auto def_rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
- EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), empty_state.toString());
- EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), empty_state.toString());
- ASSERT_TRUE(def_rs.context().has_pending_state_transition());
- EXPECT_EQ(def_rs.context().pending_cluster_state()->toString(), initial_default->toString());
-
- auto global_rs = getBucketDBUpdater().read_snapshot_for_bucket(global_bucket);
- EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), empty_state.toString());
- EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), empty_state.toString());
- ASSERT_TRUE(global_rs.context().has_pending_state_transition());
- EXPECT_EQ(global_rs.context().pending_cluster_state()->toString(), initial_baseline->toString());
-
- ASSERT_NO_FATAL_FAILURE(completeBucketInfoGathering(*initial_baseline, messageCount(1), 0));
- // State now activated, no pending
-
- def_rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
- EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), initial_default->toString());
- EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), initial_default->toString());
- EXPECT_FALSE(def_rs.context().has_pending_state_transition());
-
- global_rs = getBucketDBUpdater().read_snapshot_for_bucket(global_bucket);
- EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), initial_baseline->toString());
- EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), initial_default->toString());
- EXPECT_FALSE(global_rs.context().has_pending_state_transition());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) {
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
- "version:2 distributor:1 storage:4", n_buckets, 4));
- EXPECT_FALSE(activate_cluster_state_version(2));
- EXPECT_EQ(buckets_in_snapshot_matching_current_db(mutable_repo(), FixedBucketSpaces::default_space()),
- n_buckets);
- EXPECT_EQ(buckets_in_snapshot_matching_current_db(mutable_repo(), FixedBucketSpaces::global_space()),
- n_buckets);
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) {
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
- "version:2 distributor:2 .0.s:d storage:4", 0, 0));
- EXPECT_FALSE(activate_cluster_state_version(2));
- // We're down in state 2 and therefore do not own any buckets
- auto def_rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
- EXPECT_FALSE(def_rs.is_routable());
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) {
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
- "version:2 distributor:2 .0.s:d storage:4", 0, 0));
- EXPECT_EQ(buckets_in_snapshot_matching_current_db(read_only_repo(), FixedBucketSpaces::default_space()),
- n_buckets);
- EXPECT_EQ(buckets_in_snapshot_matching_current_db(read_only_repo(), FixedBucketSpaces::global_space()),
- n_buckets);
-}
-
-// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
-TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) {
- getBucketDBUpdater().set_stale_reads_enabled(false);
- constexpr uint32_t n_buckets = 10;
- ASSERT_NO_FATAL_FAILURE(
- trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
- "version:2 distributor:2 .0.s:d storage:4", 0, 0));
- auto def_rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
- EXPECT_FALSE(def_rs.is_routable());
-}
-
-}
diff --git a/storage/src/tests/distributor/legacy_distributor_test.cpp b/storage/src/tests/distributor/legacy_distributor_test.cpp
deleted file mode 100644
index 90d64ddb130..00000000000
--- a/storage/src/tests/distributor/legacy_distributor_test.cpp
+++ /dev/null
@@ -1,1326 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/storage/distributor/idealstatemetricsset.h>
-#include <vespa/storageapi/message/persistence.h>
-#include <vespa/storageapi/message/bucketsplitting.h>
-#include <vespa/storageapi/message/visitor.h>
-#include <vespa/storageapi/message/removelocation.h>
-#include <vespa/storageframework/defaultimplementation/thread/threadpoolimpl.h>
-#include <tests/distributor/distributortestutil.h>
-#include <vespa/document/bucket/fixed_bucket_spaces.h>
-#include <vespa/document/fieldset/fieldsets.h>
-#include <vespa/document/test/make_document_bucket.h>
-#include <vespa/document/test/make_bucket_space.h>
-#include <vespa/storage/config/config-stor-distributormanager.h>
-#include <vespa/storage/distributor/top_level_distributor.h>
-#include <vespa/storage/distributor/distributor_stripe.h>
-#include <vespa/storage/distributor/distributor_status.h>
-#include <vespa/storage/distributor/distributor_bucket_space.h>
-#include <vespa/storage/distributor/distributormetricsset.h>
-#include <vespa/vespalib/text/stringtokenizer.h>
-#include <vespa/metrics/updatehook.h>
-#include <thread>
-#include <vespa/vespalib/gtest/gtest.h>
-#include <gmock/gmock.h>
-
-using document::test::makeDocumentBucket;
-using document::test::makeBucketSpace;
-using document::FixedBucketSpaces;
-using document::BucketSpace;
-using document::Bucket;
-using document::BucketId;
-using namespace ::testing;
-
-namespace storage::distributor {
-
-// TODO STRIPE: Remove this test when legacy mode is gone.
-struct LegacyDistributorTest : Test, DistributorTestUtil {
- LegacyDistributorTest();
- ~LegacyDistributorTest() override;
-
- // TODO handle edge case for window between getnodestate reply already
- // sent and new request not yet received
-
- void assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node, const vespalib::string &bucketSpace,
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats &stats);
- std::vector<document::BucketSpace> _bucketSpaces;
-
- void SetUp() override {
- createLinks();
- _bucketSpaces = getBucketSpaces();
- };
-
- void TearDown() override {
- close();
- }
-
- // Simple type aliases to make interfacing with certain utility functions
- // easier. Note that this is only for readability and does not provide any
- // added type safety.
- using NodeCount = int;
- using Redundancy = int;
-
- using ConfigBuilder = vespa::config::content::core::StorDistributormanagerConfigBuilder;
-
- void configureDistributor(const ConfigBuilder& config) {
- getConfig().configure(config);
- _distributor->enable_next_config_if_changed();
- }
-
- auto currentReplicaCountingMode() const noexcept {
- return _distributor->bucket_db_metric_updater().getMinimumReplicaCountingMode();
- }
-
- std::string testOp(std::shared_ptr<api::StorageMessage> msg)
- {
- _distributor->handleMessage(msg);
-
- std::string tmp = _sender.getCommands();
- _sender.clear();
- return tmp;
- }
-
- void tickDistributorNTimes(uint32_t n) {
- for (uint32_t i = 0; i < n; ++i) {
- tick();
- }
- }
-
- typedef bool ResetTrusted;
-
- std::string updateBucketDB(const std::string& firstState,
- const std::string& secondState,
- bool resetTrusted = false)
- {
- std::vector<std::string> states(toVector<std::string>(firstState, secondState));
-
- for (uint32_t i = 0; i < states.size(); ++i) {
- std::vector<uint16_t> removedNodes;
- std::vector<BucketCopy> changedNodes;
-
- vespalib::StringTokenizer tokenizer(states[i], ",");
- for (uint32_t j = 0; j < tokenizer.size(); ++j) {
- vespalib::StringTokenizer tokenizer2(tokenizer[j], ":");
-
- bool trusted = false;
- if (tokenizer2.size() > 2) {
- trusted = true;
- }
-
- uint16_t node = atoi(tokenizer2[0].data());
- if (tokenizer2[1] == "r") {
- removedNodes.push_back(node);
- } else {
- uint32_t checksum = atoi(tokenizer2[1].data());
- changedNodes.push_back(
- BucketCopy(
- i + 1,
- node,
- api::BucketInfo(
- checksum,
- checksum / 2,
- checksum / 4)).setTrusted(trusted));
- }
- }
-
- operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(document::BucketId(16, 1)), removedNodes);
-
- uint32_t flags(DatabaseUpdate::CREATE_IF_NONEXISTING
- | (resetTrusted ? DatabaseUpdate::RESET_TRUSTED : 0));
-
- operation_context().update_bucket_database(makeDocumentBucket(document::BucketId(16, 1)),
- changedNodes,
- flags);
- }
-
- std::string retVal = dumpBucket(document::BucketId(16, 1));
- getBucketDatabase().clear();
- return retVal;
- }
-
- size_t explicit_node_state_reply_send_invocations() const noexcept {
- return _node->getNodeStateUpdater().explicit_node_state_reply_send_invocations();
- }
-
- StatusReporterDelegate& distributor_status_delegate() {
- // TODO STRIPE
- return _distributor->_stripe->_distributorStatusDelegate;
- }
-
- framework::TickingThreadPool& distributor_thread_pool() {
- return _distributor->_threadPool;
- }
-
- const std::vector<std::shared_ptr<DistributorStatus>>& distributor_status_todos() {
- // TODO STRIPE
- return _distributor->_stripe->_statusToDo;
- }
-
- TopLevelDistributor::MetricUpdateHook distributor_metric_update_hook() {
- return _distributor->_metricUpdateHook;
- }
-
- SimpleMaintenanceScanner::PendingMaintenanceStats distributor_maintenance_stats() {
- return _distributor->pending_maintenance_stats();
- }
-
- BucketSpacesStatsProvider::PerNodeBucketSpacesStats distributor_bucket_spaces_stats() {
- return _distributor->getBucketSpacesStats();
- }
-
- DistributorHostInfoReporter& distributor_host_info_reporter() {
- return _distributor->_hostInfoReporter;
- }
-
- bool distributor_handle_message(const std::shared_ptr<api::StorageMessage>& msg) {
- return _distributor->handleMessage(msg);
- }
-
- uint64_t db_sample_interval_sec() const noexcept {
- return std::chrono::duration_cast<std::chrono::seconds>(_distributor->db_memory_sample_interval()).count();
- }
-
- void configure_stale_reads_enabled(bool enabled) {
- ConfigBuilder builder;
- builder.allowStaleReadsDuringClusterStateTransitions = enabled;
- configureDistributor(builder);
- }
-
- void configure_update_fast_path_restart_enabled(bool enabled) {
- ConfigBuilder builder;
- builder.restartWithFastUpdatePathIfAllGetTimestampsAreConsistent = enabled;
- configureDistributor(builder);
- }
-
- void configure_merge_operations_disabled(bool disabled) {
- ConfigBuilder builder;
- builder.mergeOperationsDisabled = disabled;
- configureDistributor(builder);
- }
-
- void configure_use_weak_internal_read_consistency(bool use_weak) {
- ConfigBuilder builder;
- builder.useWeakInternalReadConsistencyForClientGets = use_weak;
- configureDistributor(builder);
- }
-
- void configure_metadata_update_phase_enabled(bool enabled) {
- ConfigBuilder builder;
- builder.enableMetadataOnlyFetchPhaseForInconsistentUpdates = enabled;
- configureDistributor(builder);
- }
-
- void configure_prioritize_global_bucket_merges(bool enabled) {
- ConfigBuilder builder;
- builder.prioritizeGlobalBucketMerges = enabled;
- configureDistributor(builder);
- }
-
- void configure_max_activation_inhibited_out_of_sync_groups(uint32_t n_groups) {
- ConfigBuilder builder;
- builder.maxActivationInhibitedOutOfSyncGroups = n_groups;
- configureDistributor(builder);
- }
-
- void configureMaxClusterClockSkew(int seconds);
- void replyToSingleRequestBucketInfoCommandWith1Bucket();
- void sendDownDummyRemoveCommand();
- void assertSingleBouncedRemoveReplyPresent();
- void assertNoMessageBounced();
- void configure_mutation_sequencing(bool enabled);
- void configure_merge_busy_inhibit_duration(int seconds);
- void do_test_pending_merge_getnodestate_reply_edge(BucketSpace space);
-
- void set_up_and_start_get_op_with_stale_reads_enabled(bool enabled);
-};
-
-LegacyDistributorTest::LegacyDistributorTest()
- : Test(),
- DistributorTestUtil(),
- _bucketSpaces()
-{
-}
-
-LegacyDistributorTest::~LegacyDistributorTest() = default;
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, operation_generation) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
-
- document::BucketId bid;
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
-
- EXPECT_EQ("Remove", testOp(std::make_shared<api::RemoveCommand>(
- makeDocumentBucket(bid),
- document::DocumentId("id:m:test:n=1:foo"),
- api::Timestamp(1234))));
-
- auto cmd = std::make_shared<api::CreateVisitorCommand>(makeBucketSpace(), "foo", "bar", "");
- cmd->addBucketToBeVisited(document::BucketId(16, 1));
- cmd->addBucketToBeVisited(document::BucketId());
-
- EXPECT_EQ("Visitor Create", testOp(cmd));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, operations_generated_and_started_without_duplicates) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
-
- for (uint32_t i = 0; i < 6; ++i) {
- addNodesToBucketDB(document::BucketId(16, i), "0=1");
- }
-
- tickDistributorNTimes(20);
-
- ASSERT_FALSE(tick());
-
- ASSERT_EQ(6, _sender.commands().size());
-}
-
-
-// Migrated to DistributorStripeTest
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, recovery_mode_on_cluster_state_change) {
- setupDistributor(Redundancy(1), NodeCount(2),
- "storage:1 .0.s:d distributor:1");
- enableDistributorClusterState("storage:1 distributor:1");
-
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- for (uint32_t i = 0; i < 3; ++i) {
- addNodesToBucketDB(document::BucketId(16, i), "0=1");
- }
- for (int i = 0; i < 3; ++i) {
- tick();
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- }
- tick();
- EXPECT_FALSE(distributor_is_in_recovery_mode());
-
- enableDistributorClusterState("storage:2 distributor:1");
- EXPECT_TRUE(distributor_is_in_recovery_mode());
-}
-
-TEST_F(LegacyDistributorTest, distributor_considered_initialized_once_self_observed_up) {
- setupDistributor(Redundancy(1), NodeCount(2), "distributor:1 .0.s:d storage:1"); // We're down D:
- EXPECT_FALSE(_distributor->done_initializing());
- enableDistributorClusterState("distributor:1 storage:1"); // We're up :D
- EXPECT_TRUE(_distributor->done_initializing());
- enableDistributorClusterState("distributor:1 .0.s:d storage:1"); // And down again :I but that does not change init state
- EXPECT_TRUE(_distributor->done_initializing());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, operations_are_throttled) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
- getConfig().setMinPendingMaintenanceOps(1);
- getConfig().setMaxPendingMaintenanceOps(1);
-
- for (uint32_t i = 0; i < 6; ++i) {
- addNodesToBucketDB(document::BucketId(16, i), "0=1");
- }
- tickDistributorNTimes(20);
- ASSERT_EQ(1, _sender.commands().size());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, handle_unknown_maintenance_reply) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
-
- {
- auto cmd = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(document::BucketId(16, 1234)));
- auto reply = std::make_shared<api::SplitBucketReply>(*cmd);
- ASSERT_TRUE(_distributor->handleReply(reply));
- }
-
- {
- // RemoveLocationReply must be treated as a maintenance reply since
- // it's what GC is currently built around.
- auto cmd = std::make_shared<api::RemoveLocationCommand>(
- "false", makeDocumentBucket(document::BucketId(30, 1234)));
- auto reply = std::shared_ptr<api::StorageReply>(cmd->makeReply());
- ASSERT_TRUE(_distributor->handleReply(reply));
- }
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, contains_time_statement) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
-
- EXPECT_FALSE(getConfig().containsTimeStatement(""));
- EXPECT_FALSE(getConfig().containsTimeStatement("testdoctype1"));
- EXPECT_FALSE(getConfig().containsTimeStatement("testdoctype1.headerfield > 42"));
- EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield > now()"));
- EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield > now() - 3600"));
- EXPECT_TRUE(getConfig().containsTimeStatement("testdoctype1.headerfield == now() - 3600"));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, update_bucket_database) {
- enableDistributorClusterState("distributor:1 storage:3");
-
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
- updateBucketDB("0:456,1:456,2:789", "2:r"));
-
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
- updateBucketDB("0:456,1:456", "2:456"));
-
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x34a,docs=421/421,bytes=210/210,trusted=false,active=false,ready=false)",
- updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:842,2:333"));
-
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false), "
- "node(idx=1,crc=0x315,docs=394/394,bytes=197/197,trusted=true,active=false,ready=false)",
- updateBucketDB("0:456:t,1:456:t,2:123", "0:789,1:789,2:333"));
-
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=true,active=false,ready=false)",
- updateBucketDB("0:456:t,1:456:t", "0:r,1:r,2:333"));
-
- // Copies are in sync so should still be trusted even if explicitly reset.
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false), "
- "node(idx=1,crc=0x1c8,docs=228/228,bytes=114/114,trusted=true,active=false,ready=false)",
- updateBucketDB("0:456,1:456", "2:456", ResetTrusted(true)));
-
- // When resetting, first inserted copy should not end up as implicitly trusted.
- EXPECT_EQ("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x1c8,docs=228/228,bytes=114/114,trusted=false,active=false,ready=false), "
- "node(idx=2,crc=0x14d,docs=166/166,bytes=83/83,trusted=false,active=false,ready=false)",
- updateBucketDB("0:456", "2:333", ResetTrusted(true)));
-}
-
-namespace {
-
-using namespace framework::defaultimplementation;
-
-class StatusRequestThread : public framework::Runnable {
- StatusReporterDelegate& _reporter;
- std::string _result;
-public:
- explicit StatusRequestThread(StatusReporterDelegate& reporter)
- : _reporter(reporter)
- {}
- void run(framework::ThreadHandle&) override {
- framework::HttpUrlPath path("/distributor?page=buckets");
- std::ostringstream stream;
- _reporter.reportStatus(stream, path);
- _result = stream.str();
- }
-
- std::string getResult() const {
- return _result;
- }
-};
-
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, tick_processes_status_requests) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
-
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
-
- // Must go via delegate since reportStatus is now just a rendering
- // function and not a request enqueuer (see Distributor::handleStatusRequest).
- StatusRequestThread thread(distributor_status_delegate());
- FakeClock clock;
- ThreadPoolImpl pool(clock);
- int ticksBeforeWait = 1;
- framework::Thread::UP tp(pool.startThread(thread, "statustest", 5ms, 5s, ticksBeforeWait));
-
- while (true) {
- std::this_thread::sleep_for(1ms);
- framework::TickingLockGuard guard(distributor_thread_pool().freezeCriticalTicks());
- if (!distributor_status_todos().empty()) {
- break;
- }
-
- }
- ASSERT_TRUE(tick());
-
- tp->interruptAndJoin();
-
- EXPECT_THAT(thread.getResult(), HasSubstr("BucketId(0x4000000000000001)"));
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- // To ensure we count all operations, not just those fitting within the
- // pending window.
- getConfig().setMinPendingMaintenanceOps(1);
- getConfig().setMaxPendingMaintenanceOps(1);
-
- // 1 bucket must be merged, 1 must be split, 1 should be activated.
- addNodesToBucketDB(document::BucketId(16, 1), "0=2/2/2/t/a,1=1/1/1");
- addNodesToBucketDB(document::BucketId(16, 2),
- "0=100/10000000/200000/t/a,1=100/10000000/200000/t");
- addNodesToBucketDB(document::BucketId(16, 3),
- "0=200/300/400/t,1=200/300/400/t");
-
- // Go many full scanner rounds to check that metrics are set, not
- // added to existing.
- tickDistributorNTimes(50);
-
- // By this point, no hook has been called so the metrics have not been
- // set.
- using MO = MaintenanceOperation;
- {
- const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
- EXPECT_EQ(0, metrics.operations[MO::MERGE_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::SPLIT_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::SET_BUCKET_STATE]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::DELETE_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::JOIN_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::GARBAGE_COLLECTION]->pending.getLast());
- }
-
- // Force trigger update hook
- std::mutex l;
- distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
- // Metrics should now be updated to the last complete working state
- {
- const IdealStateMetricSet& metrics(getIdealStateManager().getMetrics());
- EXPECT_EQ(1, metrics.operations[MO::MERGE_BUCKET]->pending.getLast());
- EXPECT_EQ(1, metrics.operations[MO::SPLIT_BUCKET]->pending.getLast());
- EXPECT_EQ(1, metrics.operations[MO::SET_BUCKET_STATE]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::DELETE_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::JOIN_BUCKET]->pending.getLast());
- EXPECT_EQ(0, metrics.operations[MO::GARBAGE_COLLECTION]->pending.getLast());
- }
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
- getClock().setAbsoluteTimeInSeconds(1000);
-
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a,1=2/2/2");
- tickDistributorNTimes(10);
-
- std::mutex l;
- distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
- auto* m = getDistributor().getMetrics().mutable_dbs.memory_usage.getMetric("used_bytes");
- ASSERT_TRUE(m != nullptr);
- auto last_used = m->getLongValue("last");
- EXPECT_GT(last_used, 0);
-
- // Add another bucket to the DB. This should increase the underlying used number of
- // bytes, but this should not be aggregated into the metrics until the sampling time
- // interval has passed. Instead, old metric gauge values should be preserved.
- addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a,1=2/2/2");
-
- const auto sample_interval_sec = db_sample_interval_sec();
- getClock().setAbsoluteTimeInSeconds(1000 + sample_interval_sec - 1); // Not there yet.
- tickDistributorNTimes(50);
- distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
-
- m = getDistributor().getMetrics().mutable_dbs.memory_usage.getMetric("used_bytes");
- auto now_used = m->getLongValue("last");
- EXPECT_EQ(now_used, last_used);
-
- getClock().setAbsoluteTimeInSeconds(1000 + sample_interval_sec + 1);
- tickDistributorNTimes(10);
- distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
-
- m = getDistributor().getMetrics().mutable_dbs.memory_usage.getMetric("used_bytes");
- now_used = m->getLongValue("last");
- EXPECT_GT(now_used, last_used);
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, priority_config_is_propagated_to_distributor_configuration) {
- using namespace vespa::config::content::core;
-
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- ConfigBuilder builder;
- builder.priorityMergeMoveToIdealNode = 1;
- builder.priorityMergeOutOfSyncCopies = 2;
- builder.priorityMergeTooFewCopies = 3;
- builder.priorityActivateNoExistingActive = 4;
- builder.priorityActivateWithExistingActive = 5;
- builder.priorityDeleteBucketCopy = 6;
- builder.priorityJoinBuckets = 7;
- builder.prioritySplitDistributionBits = 8;
- builder.prioritySplitLargeBucket = 9;
- builder.prioritySplitInconsistentBucket = 10;
- builder.priorityGarbageCollection = 11;
- builder.priorityMergeGlobalBuckets = 12;
-
- getConfig().configure(builder);
-
- const auto& mp = getConfig().getMaintenancePriorities();
- EXPECT_EQ(1, static_cast<int>(mp.mergeMoveToIdealNode));
- EXPECT_EQ(2, static_cast<int>(mp.mergeOutOfSyncCopies));
- EXPECT_EQ(3, static_cast<int>(mp.mergeTooFewCopies));
- EXPECT_EQ(4, static_cast<int>(mp.activateNoExistingActive));
- EXPECT_EQ(5, static_cast<int>(mp.activateWithExistingActive));
- EXPECT_EQ(6, static_cast<int>(mp.deleteBucketCopy));
- EXPECT_EQ(7, static_cast<int>(mp.joinBuckets));
- EXPECT_EQ(8, static_cast<int>(mp.splitDistributionBits));
- EXPECT_EQ(9, static_cast<int>(mp.splitLargeBucket));
- EXPECT_EQ(10, static_cast<int>(mp.splitInconsistentBucket));
- EXPECT_EQ(11, static_cast<int>(mp.garbageCollection));
- EXPECT_EQ(12, static_cast<int>(mp.mergeGlobalBuckets));
-}
-
-// Migrated to DistributorStripeTest
-// Explicit cluster state edge test added in TopLevelDistributorTest::cluster_state_lifecycle_is_propagated_to_stripes
-TEST_F(LegacyDistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
- setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
- lib::ClusterState newState("storage:10 distributor:10");
- auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
- // Force newState into being the pending state. According to the initial
- // state we own the bucket, but according to the pending state, we do
- // not. This must be handled correctly by the database update code.
- getBucketDBUpdater().onSetSystemState(stateCmd);
-
- document::BucketId nonOwnedBucket(16, 3);
- EXPECT_FALSE(getDistributorBucketSpace().get_bucket_ownership_flags(nonOwnedBucket).owned_in_pending_state());
- EXPECT_FALSE(getDistributorBucketSpace().check_ownership_in_pending_and_current_state(nonOwnedBucket).isOwned());
-
- std::vector<BucketCopy> copies;
- copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
- operation_context().update_bucket_database(makeDocumentBucket(nonOwnedBucket), copies,
- DatabaseUpdate::CREATE_IF_NONEXISTING);
-
- EXPECT_EQ("NONEXISTING", dumpBucket(nonOwnedBucket));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
- setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
- getClock().setAbsoluteTimeInSeconds(101234);
- document::BucketId bucket(16, 7654);
-
- std::vector<BucketCopy> copies;
- copies.emplace_back(1234, 0, api::BucketInfo(0x567, 1, 2));
- operation_context().update_bucket_database(makeDocumentBucket(bucket), copies,
- DatabaseUpdate::CREATE_IF_NONEXISTING);
- BucketDatabase::Entry e(getBucket(bucket));
- EXPECT_EQ(101234, e->getLastGarbageCollectionTime());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, merge_stats_are_accumulated_during_database_iteration) {
- setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
- // Copies out of sync. Not possible for distributor to _reliably_ tell
- // which direction(s) data will flow, so for simplicity assume that we
- // must sync both copies.
- // Note that we mark certain copies as active to prevent the bucketstate
- // checker from pre-empting the merges.
- // -> syncing[0] += 1, syncing[2] += 1
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a,2=2/2/2");
- // Must add missing node 2 for bucket
- // -> copyingOut[0] += 1, copyingIn[2] += 1
- addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
- // Moving from non-ideal node 1 to ideal node 2. Both nodes 0 and 1 will
- // be involved in this merge, but only node 1 will be tagged as source only
- // (i.e. to be deleted after the merge is completed).
- // -> copyingOut[0] += 1, movingOut[1] += 1, copyingIn[2] += 1
- addNodesToBucketDB(document::BucketId(16, 3), "0=2/2/2/t/a,1=2/2/2/t");
-
- // Go many full scanner rounds to check that stats are set, not
- // added to existing.
- tickDistributorNTimes(50);
-
- const auto& stats = distributor_maintenance_stats();
- {
- NodeMaintenanceStats wanted;
- wanted.syncing = 1;
- wanted.copyingOut = 2;
- wanted.total = 3;
- EXPECT_EQ(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
- }
- {
- NodeMaintenanceStats wanted;
- wanted.movingOut = 1;
- wanted.total = 1;
- EXPECT_EQ(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
- }
- {
- NodeMaintenanceStats wanted;
- wanted.syncing = 1;
- wanted.copyingIn = 2;
- wanted.total = 1;
- EXPECT_EQ(wanted, stats.perNodeStats.forNode(2, makeBucketSpace()));
- }
- auto bucketStats = distributor_bucket_spaces_stats();
- ASSERT_EQ(3, bucketStats.size());
- assertBucketSpaceStats(1, 3, 0, "default", bucketStats);
- assertBucketSpaceStats(0, 1, 1, "default", bucketStats);
- assertBucketSpaceStats(3, 1, 2, "default", bucketStats);
-}
-
-void
-LegacyDistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
- const vespalib::string& bucketSpace,
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
-{
- auto nodeItr = stats.find(node);
- ASSERT_TRUE(nodeItr != stats.end());
- ASSERT_EQ(1, nodeItr->second.size());
- auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
- ASSERT_TRUE(bucketSpaceItr != nodeItr->second.end());
- ASSERT_TRUE(bucketSpaceItr->second.valid());
- ASSERT_EQ(expBucketTotal, bucketSpaceItr->second.bucketsTotal());
- ASSERT_EQ(expBucketPending, bucketSpaceItr->second.bucketsPending());
-}
-
-/**
- * Since maintenance operations are prioritized differently, activation
- * pre-empts merging and other ops. If this also implies pre-empting running
- * their state checkers at all, we won't get any statistics from any other
- * operations for the bucket.
- */
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, stats_generated_for_preempted_operations) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- // For this test it suffices to have a single bucket with multiple aspects
- // wrong about it. In this case, let a bucket be both out of sync _and_
- // missing an active copy. This _should_ give a statistic with both nodes 0
- // and 1 requiring a sync. If instead merge stats generation is preempted
- // by activation, we'll see no merge stats at all.
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1,1=2/2/2");
- tickDistributorNTimes(50);
- const auto& stats = distributor_maintenance_stats();
- {
- NodeMaintenanceStats wanted;
- wanted.syncing = 1;
- wanted.total = 1;
- EXPECT_EQ(wanted, stats.perNodeStats.forNode(0, makeBucketSpace()));
- }
- {
- NodeMaintenanceStats wanted;
- wanted.syncing = 1;
- wanted.total = 1;
- EXPECT_EQ(wanted, stats.perNodeStats.forNode(1, makeBucketSpace()));
- }
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- // Default is enabled=true.
- EXPECT_TRUE(distributor_host_info_reporter().isReportingEnabled());
-
- ConfigBuilder builder;
- builder.enableHostInfoReporting = false;
- configureDistributor(builder);
-
- EXPECT_FALSE(distributor_host_info_reporter().isReportingEnabled());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::TRUSTED, currentReplicaCountingMode());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- ConfigBuilder builder;
- builder.minimumReplicaCountingMode = ConfigBuilder::MinimumReplicaCountingMode::ANY;
- configureDistributor(builder);
- EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::ANY, currentReplicaCountingMode());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- ConfigBuilder builder;
- builder.maxConsecutivelyInhibitedMaintenanceTicks = 123;
- getConfig().configure(builder);
- EXPECT_EQ(getConfig().max_consecutively_inhibited_maintenance_ticks(), 123);
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, bucket_activation_is_enabled_by_default) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
- EXPECT_FALSE(getConfig().isBucketActivationDisabled());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
- using namespace vespa::config::content::core;
-
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- ConfigBuilder builder;
- builder.disableBucketActivation = true;
- getConfig().configure(builder);
-
- EXPECT_TRUE(getConfig().isBucketActivationDisabled());
-}
-
-void
-LegacyDistributorTest::configureMaxClusterClockSkew(int seconds) {
- using namespace vespa::config::content::core;
-
- ConfigBuilder builder;
- builder.maxClusterClockSkewSec = seconds;
- getConfig().configure(builder);
- _distributor->enable_next_config_if_changed();
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- configureMaxClusterClockSkew(5);
- EXPECT_EQ(getConfig().getMaxClusterClockSkew(), std::chrono::seconds(5));
-}
-
-namespace {
-
-auto makeDummyRemoveCommand() {
- return std::make_shared<api::RemoveCommand>(
- makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("id:foo:testdoctype1:n=1:foo"),
- api::Timestamp(0));
-}
-
-auto make_dummy_get_command_for_bucket_1() {
- return std::make_shared<api::GetCommand>(
- makeDocumentBucket(document::BucketId(0)),
- document::DocumentId("id:foo:testdoctype1:n=1:foo"),
- document::AllFields::NAME);
-}
-
-}
-
-void LegacyDistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
- ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
- for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
- ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO, _sender.command(i)->getType());
- auto& bucketReq(static_cast<api::RequestBucketInfoCommand&>
- (*_sender.command(i)));
- auto bucketReply = bucketReq.makeReply();
- if (bucketReq.getBucketSpace() == FixedBucketSpaces::default_space()) {
- // Make sure we have a bucket to route our remove op to, or we'd get
- // an immediate reply anyway.
- dynamic_cast<api::RequestBucketInfoReply&>(*bucketReply)
- .getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(document::BucketId(1, 1),
- api::BucketInfo(20, 10, 12, 50, 60, true, true)));
- }
- _distributor->handleMessage(std::move(bucketReply));
- }
- _sender.commands().clear();
-}
-
-void LegacyDistributorTest::sendDownDummyRemoveCommand() {
- _distributor->handleMessage(makeDummyRemoveCommand());
-}
-
-void LegacyDistributorTest::assertSingleBouncedRemoveReplyPresent() {
- ASSERT_EQ(1, _sender.replies().size()); // Rejected remove
- ASSERT_EQ(api::MessageType::REMOVE_REPLY, _sender.reply(0)->getType());
- auto& reply(static_cast<api::RemoveReply&>(*_sender.reply(0)));
- ASSERT_EQ(api::ReturnCode::STALE_TIMESTAMP, reply.getResult().getResult());
- _sender.replies().clear();
-}
-
-void LegacyDistributorTest::assertNoMessageBounced() {
- ASSERT_EQ(0, _sender.replies().size());
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
- setupDistributor(Redundancy(2), NodeCount(2),
- "bits:1 storage:1 distributor:2");
- getClock().setAbsoluteTimeInSeconds(1000);
- configureMaxClusterClockSkew(10);
-
- receive_set_system_state_command("bits:1 storage:1 distributor:1");
- ASSERT_NO_FATAL_FAILURE(replyToSingleRequestBucketInfoCommandWith1Bucket());
- // SetSystemStateCommand sent down chain at this point.
- sendDownDummyRemoveCommand();
- ASSERT_NO_FATAL_FAILURE(assertSingleBouncedRemoveReplyPresent());
-
- // Increment time to first whole second of clock + 10 seconds of skew.
- // Should now not get any feed rejections.
- getClock().setAbsoluteTimeInSeconds(1011);
-
- sendDownDummyRemoveCommand();
- ASSERT_NO_FATAL_FAILURE(assertNoMessageBounced());
-}
-
-void LegacyDistributorTest::configure_mutation_sequencing(bool enabled) {
- using namespace vespa::config::content::core;
-
- ConfigBuilder builder;
- builder.sequenceMutatingOperations = enabled;
- getConfig().configure(builder);
- _distributor->enable_next_config_if_changed();
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, sequencing_config_is_propagated_to_distributor_config) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- // Should be enabled by default
- EXPECT_TRUE(getConfig().getSequenceMutatingOperations());
-
- // Explicitly disabled.
- configure_mutation_sequencing(false);
- EXPECT_FALSE(getConfig().getSequenceMutatingOperations());
-
- // Explicitly enabled.
- configure_mutation_sequencing(true);
- EXPECT_TRUE(getConfig().getSequenceMutatingOperations());
-}
-
-void
-LegacyDistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
- using namespace vespa::config::content::core;
-
- ConfigBuilder builder;
- builder.inhibitMergeSendingOnBusyNodeDurationSec = seconds;
- getConfig().configure(builder);
- _distributor->enable_next_config_if_changed();
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
-
- configure_merge_busy_inhibit_duration(7);
- EXPECT_EQ(getConfig().getInhibitMergesOnBusyNodeDuration(), std::chrono::seconds(7));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
- setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1");
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
-
- configure_merge_busy_inhibit_duration(100);
- auto cmd = makeDummyRemoveCommand(); // Remove is for bucket 1
- distributor_handle_message(cmd);
-
- // Should send to content node 0
- ASSERT_EQ(1, _sender.commands().size());
- ASSERT_EQ(api::MessageType::REMOVE, _sender.command(0)->getType());
- auto& fwd_cmd = dynamic_cast<api::RemoveCommand&>(*_sender.command(0));
- auto reply = fwd_cmd.makeReply();
- reply->setResult(api::ReturnCode(api::ReturnCode::BUSY));
- _distributor->handleReply(std::shared_ptr<api::StorageReply>(std::move(reply)));
-
- auto& node_info = pending_message_tracker().getNodeInfo();
-
- EXPECT_TRUE(node_info.isBusy(0));
- getClock().addSecondsToTime(99);
- EXPECT_TRUE(node_info.isBusy(0));
- getClock().addSecondsToTime(2);
- EXPECT_FALSE(node_info.isBusy(0));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
-
- std::vector<api::StorageMessage::Priority> priorities({50, 255, 10, 40, 0});
- document::DocumentId id("id:foo:testdoctype1:n=1:foo");
- vespalib::stringref field_set = "";
- for (auto pri : priorities) {
- auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(document::BucketId()), id, field_set);
- cmd->setPriority(pri);
- // onDown appends to internal message FIFO queue, awaiting hand-off.
- _distributor->onDown(cmd);
- }
- // At the hand-off point we expect client requests to be prioritized.
- // For each tick, a priority-order client request is processed and sent off.
- for (size_t i = 1; i <= priorities.size(); ++i) {
- tickDistributorNTimes(1);
- ASSERT_EQ(i, _sender.commands().size());
- }
-
- std::vector<int> expected({0, 10, 40, 50, 255});
- std::vector<int> actual;
- for (auto& msg : _sender.commands()) {
- actual.emplace_back(static_cast<int>(msg->getPriority()));
- }
- EXPECT_THAT(actual, ContainerEq(expected));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, internal_messages_are_started_in_fifo_order_batch) {
- // To test internal request ordering, we use NotifyBucketChangeCommand
- // for the reason that it explicitly updates the bucket database for
- // each individual invocation.
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
- document::BucketId bucket(16, 1);
- addNodesToBucketDB(bucket, "0=1/1/1/t");
-
- std::vector<api::StorageMessage::Priority> priorities({50, 255, 10, 40, 1});
- for (auto pri : priorities) {
- api::BucketInfo fake_info(pri, pri, pri);
- auto cmd = std::make_shared<api::NotifyBucketChangeCommand>(makeDocumentBucket(bucket), fake_info);
- cmd->setSourceIndex(0);
- cmd->setPriority(pri);
- _distributor->onDown(cmd);
- }
-
- // Doing a single tick should process all internal requests in one batch
- tickDistributorNTimes(1);
- ASSERT_EQ(5, _sender.replies().size());
-
- // The bucket info for priority 1 (last FIFO-order change command received, but
- // highest priority) should be the end-state of the bucket database, _not_ that
- // of lowest priority 255.
- BucketDatabase::Entry e(getBucket(bucket));
- EXPECT_EQ(api::BucketInfo(1, 1, 1), e.getBucketInfo().getNode(0)->getBucketInfo());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, closing_aborts_priority_queued_client_requests) {
- setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
- document::BucketId bucket(16, 1);
- addNodesToBucketDB(bucket, "0=1/1/1/t");
-
- document::DocumentId id("id:foo:testdoctype1:n=1:foo");
- vespalib::stringref field_set = "";
- for (int i = 0; i < 10; ++i) {
- auto cmd = std::make_shared<api::GetCommand>(makeDocumentBucket(document::BucketId()), id, field_set);
- _distributor->onDown(cmd);
- }
- tickDistributorNTimes(1);
- // Closing should trigger 1 abort via startet GetOperation and 9 aborts from pri queue
- _distributor->close();
- ASSERT_EQ(10, _sender.replies().size());
- for (auto& msg : _sender.replies()) {
- EXPECT_EQ(api::ReturnCode::ABORTED, dynamic_cast<api::StorageReply&>(*msg).getResult().getResult());
- }
-}
-
-namespace {
-
-void assert_invalid_stats_for_all_spaces(
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats,
- uint16_t node_index) {
- auto stats_iter = stats.find(node_index);
- ASSERT_TRUE(stats_iter != stats.cend());
- ASSERT_EQ(2, stats_iter->second.size());
- auto space_iter = stats_iter->second.find(document::FixedBucketSpaces::default_space_name());
- ASSERT_TRUE(space_iter != stats_iter->second.cend());
- ASSERT_FALSE(space_iter->second.valid());
- space_iter = stats_iter->second.find(document::FixedBucketSpaces::global_space_name());
- ASSERT_TRUE(space_iter != stats_iter->second.cend());
- ASSERT_FALSE(space_iter->second.valid());
-}
-
-}
-
-// Migrated to DistributorStripeTest
-// Cross-stripe bucket stats test added in TopLevelDistributorTest::entering_recovery_mode_resets_bucket_space_stats_across_all_stripes
-TEST_F(LegacyDistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
- // Set up a cluster state + DB contents which implies merge maintenance ops
- setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
- addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
- addNodesToBucketDB(document::BucketId(16, 3), "0=2/2/2/t/a");
-
- tickDistributorNTimes(5); // 1/3rds into second round through database
-
- enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- // Bucket space stats should now be invalid per space per node, pending stats
- // from state version 2. Exposing stats from version 1 risks reporting stale
- // information back to the cluster controller.
- const auto stats = distributor_bucket_spaces_stats();
- ASSERT_EQ(2, stats.size());
-
- assert_invalid_stats_for_all_spaces(stats, 0);
- assert_invalid_stats_for_all_spaces(stats, 2);
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
- setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
- // Should not send explicit replies during init stage
- ASSERT_EQ(0, explicit_node_state_reply_send_invocations());
- // Add a couple of buckets so we have something to iterate over
- addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
- addNodesToBucketDB(document::BucketId(16, 2), "0=1/1/1/t/a");
-
- enableDistributorClusterState("version:2 distributor:1 storage:3 .1.s:d");
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
- tickDistributorNTimes(1); // DB round not yet complete
- EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
- tickDistributorNTimes(2); // DB round complete after 2nd bucket + "scan done" discovery tick
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
- EXPECT_FALSE(distributor_is_in_recovery_mode());
- // Now out of recovery mode, subsequent round completions should not send replies
- tickDistributorNTimes(10);
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-}
-
-void LegacyDistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
- setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
- EXPECT_TRUE(distributor_is_in_recovery_mode());
- // 2 buckets with missing replicas triggering merge pending stats
- addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a");
- addNodesToBucketDB(Bucket(space, BucketId(16, 2)), "0=1/1/1/t/a");
- tickDistributorNTimes(3);
- EXPECT_FALSE(distributor_is_in_recovery_mode());
- const auto space_name = FixedBucketSpaces::to_string(space);
- assertBucketSpaceStats(2, 0, 1, space_name, _distributor->getBucketSpacesStats());
- // First completed scan sends off merge stats et al to cluster controller
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- // Edge not triggered when 1 bucket with missing replica left
- addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a,1=1/1/1/t");
- tickDistributorNTimes(3);
- assertBucketSpaceStats(1, 1, 1, space_name, _distributor->getBucketSpacesStats());
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- // Edge triggered when no more buckets with requiring merge
- addNodesToBucketDB(Bucket(space, BucketId(16, 2)), "0=1/1/1/t/a,1=1/1/1/t");
- tickDistributorNTimes(3);
- assertBucketSpaceStats(0, 2, 1, space_name, _distributor->getBucketSpacesStats());
- EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
-
- // Should only send when edge happens, not in subsequent DB iterations
- tickDistributorNTimes(10);
- EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
-
- // Going back to merges pending should _not_ send a getnodestate reply (at least for now)
- addNodesToBucketDB(Bucket(space, BucketId(16, 1)), "0=1/1/1/t/a");
- tickDistributorNTimes(3);
- assertBucketSpaceStats(1, 1, 1, space_name, _distributor->getBucketSpacesStats());
- EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
-}
-
-// TODO: rewrite into DistributorStripeTest
-TEST_F(LegacyDistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
- do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::default_space());
-}
-
-// TODO: rewrite into DistributorStripeTest
-TEST_F(LegacyDistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
- do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::global_space());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_stale_reads_enabled(true);
- EXPECT_TRUE(getExternalOperationHandler().concurrent_gets_enabled());
-
- configure_stale_reads_enabled(false);
- EXPECT_FALSE(getExternalOperationHandler().concurrent_gets_enabled());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_update_fast_path_restart_enabled(true);
- EXPECT_TRUE(getConfig().update_fast_path_restart_enabled());
-
- configure_update_fast_path_restart_enabled(false);
- EXPECT_FALSE(getConfig().update_fast_path_restart_enabled());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, merge_disabling_config_is_propagated_to_internal_config) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_merge_operations_disabled(true);
- EXPECT_TRUE(getConfig().merge_operations_disabled());
-
- configure_merge_operations_disabled(false);
- EXPECT_FALSE(getConfig().merge_operations_disabled());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_metadata_update_phase_enabled(true);
- EXPECT_TRUE(getConfig().enable_metadata_only_fetch_phase_for_inconsistent_updates());
-
- configure_metadata_update_phase_enabled(false);
- EXPECT_FALSE(getConfig().enable_metadata_only_fetch_phase_for_inconsistent_updates());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_use_weak_internal_read_consistency(true);
- EXPECT_TRUE(getConfig().use_weak_internal_read_consistency_for_client_gets());
- EXPECT_TRUE(getExternalOperationHandler().use_weak_internal_read_consistency_for_gets());
-
- configure_use_weak_internal_read_consistency(false);
- EXPECT_FALSE(getConfig().use_weak_internal_read_consistency_for_client_gets());
- EXPECT_FALSE(getExternalOperationHandler().use_weak_internal_read_consistency_for_gets());
-}
-
-void LegacyDistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
- configure_stale_reads_enabled(enabled);
-
- document::BucketId bucket(16, 1);
- addNodesToBucketDB(bucket, "0=1/1/1/t");
- _distributor->onDown(make_dummy_get_command_for_bucket_1());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) {
- set_up_and_start_get_op_with_stale_reads_enabled(true);
- ASSERT_THAT(_sender.commands(), SizeIs(1));
- EXPECT_THAT(_sender.replies(), SizeIs(0));
-
- // Reply is routed to the correct owner
- auto reply = std::shared_ptr<api::StorageReply>(_sender.command(0)->makeReply());
- _distributor->onDown(reply);
- ASSERT_THAT(_sender.commands(), SizeIs(1));
- EXPECT_THAT(_sender.replies(), SizeIs(1));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) {
- set_up_and_start_get_op_with_stale_reads_enabled(false);
- // Get has been placed into distributor queue, so no external messages are produced.
- EXPECT_THAT(_sender.commands(), SizeIs(0));
- EXPECT_THAT(_sender.replies(), SizeIs(0));
-}
-
-// Migrated to DistributorStripeTest
-// There's no need or desire to track "lockfree" Gets in the main pending message tracker,
-// as we only have to track mutations to inhibit maintenance ops safely. Furthermore,
-// the message tracker is a multi-index and therefore has some runtime cost.
-TEST_F(LegacyDistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
- set_up_and_start_get_op_with_stale_reads_enabled(true);
- Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1));
- EXPECT_FALSE(pending_message_tracker().hasPendingMessage(
- 0, bucket, api::MessageType::GET_ID));
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) {
- set_up_and_start_get_op_with_stale_reads_enabled(true);
- _distributor->close();
- ASSERT_EQ(1, _sender.replies().size());
- EXPECT_EQ(api::ReturnCode::ABORTED, _sender.reply(0)->getResult().getResult());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_prioritize_global_bucket_merges(true);
- EXPECT_TRUE(getConfig().prioritize_global_bucket_merges());
-
- configure_prioritize_global_bucket_merges(false);
- EXPECT_FALSE(getConfig().prioritize_global_bucket_merges());
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- configure_max_activation_inhibited_out_of_sync_groups(3);
- EXPECT_EQ(getConfig().max_activation_inhibited_out_of_sync_groups(), 3);
-
- configure_max_activation_inhibited_out_of_sync_groups(0);
- EXPECT_EQ(getConfig().max_activation_inhibited_out_of_sync_groups(), 0);
-}
-
-// Migrated to DistributorStripeTest
-TEST_F(LegacyDistributorTest, wanted_split_bit_count_is_lower_bounded) {
- createLinks();
- setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
-
- ConfigBuilder builder;
- builder.minsplitcount = 7;
- configureDistributor(builder);
-
- EXPECT_EQ(getConfig().getMinimalBucketSplit(), 8);
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
- set_num_distributor_stripes(4);
- createLinks();
- getClock().setAbsoluteTimeInSeconds(1000);
- // TODO STRIPE can't call this currently since it touches the bucket DB updater directly:
- // setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
-
- tickDistributorNTimes(1);
- EXPECT_EQ(0, explicit_node_state_reply_send_invocations()); // Nothing yet
- getDistributor().notify_stripe_wants_to_send_host_info(1);
- getDistributor().notify_stripe_wants_to_send_host_info(2);
- getDistributor().notify_stripe_wants_to_send_host_info(3);
-
- tickDistributorNTimes(1);
- // Still nothing. Missing initial report from stripe 0
- EXPECT_EQ(0, explicit_node_state_reply_send_invocations());
-
- getDistributor().notify_stripe_wants_to_send_host_info(0);
- tickDistributorNTimes(1);
- // All stripes have reported in, it's time to party!
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- // No further sends if stripes haven't requested it yet.
- getClock().setAbsoluteTimeInSeconds(2000);
- tickDistributorNTimes(10);
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-}
-
-// Migrated to TopLevelDistributorTest
-TEST_F(LegacyDistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
- set_num_distributor_stripes(4);
- createLinks();
- getClock().setAbsoluteTimeInSeconds(1000);
-
- for (uint16_t i = 0; i < 4; ++i) {
- getDistributor().notify_stripe_wants_to_send_host_info(i);
- }
- tickDistributorNTimes(1);
- // Bootstrap case
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- // Stripe 1 suddenly really wants to tell the cluster controller something again
- getDistributor().notify_stripe_wants_to_send_host_info(1);
- tickDistributorNTimes(1);
- // But its cry for attention is not yet honored since the delay hasn't passed.
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- getClock().addMilliSecondsToTime(999);
- tickDistributorNTimes(1);
- // 1 sec delay has still not passed
- EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
-
- getClock().addMilliSecondsToTime(1);
- tickDistributorNTimes(1);
- // But now it has
- EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
-}
-
-}
diff --git a/storage/src/tests/distributor/statusreporterdelegatetest.cpp b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
index 9e66f1920e2..3cac901619e 100644
--- a/storage/src/tests/distributor/statusreporterdelegatetest.cpp
+++ b/storage/src/tests/distributor/statusreporterdelegatetest.cpp
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <tests/common/testhelper.h>
-#include <tests/distributor/distributortestutil.h>
+#include <tests/common/teststorageapp.h>
#include <vespa/storage/distributor/statusreporterdelegate.h>
#include <vespa/vespalib/gtest/gtest.h>
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index 01f7d5a4f0a..fe8a607c9ae 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -364,7 +364,7 @@ public:
OutdatedNodesMap outdated_nodes_map;
state = PendingClusterState::createForClusterStateChange(
clock, cluster_info, sender,
- owner.top_level_bucket_space_repo(),
+ owner.bucket_space_states(),
cmd, outdated_nodes_map, api::Timestamp(1));
}
@@ -374,7 +374,7 @@ public:
{
auto cluster_info = owner.create_cluster_info(old_cluster_state);
state = PendingClusterState::createForDistributionChange(
- clock, cluster_info, sender, owner.top_level_bucket_space_repo(), api::Timestamp(1));
+ clock, cluster_info, sender, owner.bucket_space_states(), api::Timestamp(1));
}
};
@@ -630,7 +630,7 @@ TopLevelBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
}
TEST_F(TopLevelBucketDBUpdaterTest, normal_usage) {
- set_cluster_state(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
+ set_cluster_state(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3")); // FIXME init mode why?
ASSERT_EQ(message_count(3), _sender.commands().size());
@@ -638,21 +638,21 @@ TEST_F(TopLevelBucketDBUpdaterTest, normal_usage) {
ASSERT_EQ(_component->getDistribution()->getNodeGraph().getDistributionConfigHash(),
dynamic_cast<const RequestBucketInfoCommand&>(*_sender.command(0)).getDistributionHash());
- ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), // FIXME init mode why?
*_sender.command(0), 10));
_sender.clear();
// Optimization for not refetching unneeded data after cluster state
// change is only implemented after completion of previous cluster state
- set_cluster_state("distributor:2 .0.s:i storage:3");
+ set_cluster_state("distributor:2 .0.s:i storage:3"); // FIXME init mode why?
ASSERT_EQ(message_count(3), _sender.commands().size());
// Expect reply of first set SystemState request.
ASSERT_EQ(size_t(1), _sender.replies().size());
ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(
- lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), // FIXME init mode why?
message_count(3), 10));
ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(10, "distributor:2 storage:3"));
}
@@ -661,9 +661,9 @@ TEST_F(TopLevelBucketDBUpdaterTest, distributor_change) {
int num_buckets = 100;
// First sends request
- set_cluster_state("distributor:2 .0.s:i .1.s:i storage:3");
+ set_cluster_state("distributor:2 .0.s:i .1.s:i storage:3"); // FIXME init mode why?
ASSERT_EQ(message_count(3), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"),
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"), // FIXME init mode why?
message_count(3), num_buckets));
_sender.clear();
@@ -718,14 +718,14 @@ TEST_F(TopLevelBucketDBUpdaterTest, distributor_change_with_grouping) {
}
TEST_F(TopLevelBucketDBUpdaterTest, normal_usage_initializing) {
- set_cluster_state("distributor:1 .0.s:i storage:1 .0.s:i");
+ set_cluster_state("distributor:1 .0.s:i storage:1 .0.s:i"); // FIXME init mode why?
ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size());
// Not yet passing on system state.
ASSERT_EQ(size_t(0), _sender_down.commands().size());
- ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"), // FIXME init mode why?
_bucket_spaces.size(), 10, 10));
ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(10, "distributor:1 storage:1"));
@@ -740,12 +740,12 @@ TEST_F(TopLevelBucketDBUpdaterTest, normal_usage_initializing) {
_sender.clear();
_sender_down.clear();
- set_cluster_state("distributor:1 .0.s:i storage:1");
+ set_cluster_state("distributor:1 .0.s:i storage:1"); // FIXME init mode why?
// Send a new request bucket info up.
ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size());
- ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(lib::ClusterState("distributor:1 .0.s:i storage:1"), // FIXME init mode why?
_bucket_spaces.size(), 20));
// Pass on cluster state and recheck buckets now.
@@ -755,7 +755,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, normal_usage_initializing) {
}
TEST_F(TopLevelBucketDBUpdaterTest, failed_request_bucket_info) {
- set_cluster_state("distributor:1 .0.s:i storage:1");
+ set_cluster_state("distributor:1 .0.s:i storage:1"); // FIXME init mode why?
// 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size());
@@ -781,7 +781,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, failed_request_bucket_info) {
ASSERT_EQ(size_t(0), _sender_down.commands().size());
for (uint32_t i = 0; i < _bucket_spaces.size(); ++i) {
- ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 .0.s:i storage:1"),
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:1 .0.s:i storage:1"), // FIXME init mode why?
*_sender.command(_bucket_spaces.size() + i), 10));
}
@@ -1389,7 +1389,7 @@ TopLevelBucketDBUpdaterTest::get_sent_nodes_distribution_changed(const std::stri
auto cluster_info = create_cluster_info(old_cluster_state);
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForDistributionChange(
- clock, cluster_info, sender, top_level_bucket_space_repo(), api::Timestamp(1)));
+ clock, cluster_info, sender, bucket_space_states(), api::Timestamp(1)));
sort_sent_messages_by_index(sender);
@@ -1514,7 +1514,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_receive) {
OutdatedNodesMap outdated_nodes_map;
std::unique_ptr<PendingClusterState> state(
PendingClusterState::createForClusterStateChange(
- clock, cluster_info, sender, top_level_bucket_space_repo(),
+ clock, cluster_info, sender, bucket_space_states(),
cmd, outdated_nodes_map, api::Timestamp(1)));
ASSERT_EQ(message_count(3), sender.commands().size());
@@ -1670,7 +1670,7 @@ TopLevelBucketDBUpdaterTest::merge_bucket_lists(
auto cluster_info = create_cluster_info("cluster:d");
auto state = PendingClusterState::createForClusterStateChange(
- clock, cluster_info, sender, top_level_bucket_space_repo(),
+ clock, cluster_info, sender, bucket_space_states(),
cmd, outdated_nodes_map, before_time);
parse_input_data(existing_data, before_time, *state, include_bucket_info);
@@ -1690,7 +1690,7 @@ TopLevelBucketDBUpdaterTest::merge_bucket_lists(
auto cluster_info = create_cluster_info(old_state.toString());
auto state = PendingClusterState::createForClusterStateChange(
- clock, cluster_info, sender, top_level_bucket_space_repo(),
+ clock, cluster_info, sender, bucket_space_states(),
cmd, outdated_nodes_map, after_time);
parse_input_data(new_data, after_time, *state, include_bucket_info);
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
index b6e9beb38ae..6a0aa015ba4 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
@@ -265,16 +265,16 @@ TopLevelDistributorTestUtil::get_bucket(const document::BucketId& bId) const
return stripe_bucket_database(stripe_index_of_bucket(bId)).get(bId);
}
-DistributorBucketSpaceRepo&
-TopLevelDistributorTestUtil::top_level_bucket_space_repo() noexcept
+BucketSpaceStateMap&
+TopLevelDistributorTestUtil::bucket_space_states() noexcept
{
- return _distributor->_component.bucket_space_repo();
+ return _distributor->_component.bucket_space_states();
}
-const DistributorBucketSpaceRepo&
-TopLevelDistributorTestUtil::top_level_bucket_space_repo() const noexcept
+const BucketSpaceStateMap&
+TopLevelDistributorTestUtil::bucket_space_states() const noexcept
{
- return _distributor->_component.bucket_space_repo();
+ return _distributor->_component.bucket_space_states();
}
std::unique_ptr<StripeAccessGuard>
@@ -303,11 +303,6 @@ TopLevelDistributorTestUtil::total_distributor_metrics() const
return *_distributor->_total_metrics;
}
-const storage::distributor::DistributorNodeContext&
-TopLevelDistributorTestUtil::node_context() const {
- return _distributor->distributor_component();
-}
-
DistributorBucketSpace&
TopLevelDistributorTestUtil::distributor_bucket_space(const document::BucketId& id)
{
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.h b/storage/src/tests/distributor/top_level_distributor_test_util.h
index 8832f8ada6e..6efc36a8215 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.h
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.h
@@ -17,7 +17,7 @@ namespace framework { struct TickingThreadPool; }
namespace distributor {
-class TopLevelDistributor;
+class BucketSpaceStateMap;
class DistributorBucketSpace;
class DistributorBucketSpaceRepo;
class DistributorMetricSet;
@@ -26,10 +26,11 @@ class DistributorStripe;
class DistributorStripeComponent;
class DistributorStripeOperationContext;
class DistributorStripePool;
-class StripeAccessGuard;
class IdealStateMetricSet;
class Operation;
+class StripeAccessGuard;
class TopLevelBucketDBUpdater;
+class TopLevelDistributor;
class TopLevelDistributorTestUtil : private DoneInitializeHandler
{
@@ -60,16 +61,14 @@ public:
// As the above, but always inserts into default bucket space
void add_nodes_to_stripe_bucket_db(const document::BucketId& id, const std::string& nodeStr);
- // TODO STRIPE replace with BucketSpaceStateMap once legacy is gone
- DistributorBucketSpaceRepo& top_level_bucket_space_repo() noexcept;
- const DistributorBucketSpaceRepo& top_level_bucket_space_repo() const noexcept;
+ BucketSpaceStateMap& bucket_space_states() noexcept;
+ const BucketSpaceStateMap& bucket_space_states() const noexcept;
std::unique_ptr<StripeAccessGuard> acquire_stripe_guard();
TopLevelBucketDBUpdater& bucket_db_updater();
const IdealStateMetricSet& total_ideal_state_metrics() const;
const DistributorMetricSet& total_distributor_metrics() const;
- const storage::distributor::DistributorNodeContext& node_context() const;
DistributorBucketSpace& distributor_bucket_space(const document::BucketId& id);
const DistributorBucketSpace& distributor_bucket_space(const document::BucketId& id) const;
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.h b/storage/src/vespa/storage/bucketdb/bucketinfo.h
index 690fd3e36a9..eae13cfc34c 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.h
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.h
@@ -9,7 +9,6 @@ namespace storage {
namespace distributor {
class DistributorStripeTestUtil;
- class DistributorTestUtil;
class TopLevelDistributorTestUtil;
}
@@ -205,7 +204,6 @@ public:
private:
friend class distributor::DistributorStripeTestUtil;
- friend class distributor::DistributorTestUtil;
friend class distributor::TopLevelDistributorTestUtil;
/**
diff --git a/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp b/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp
index 63c408f7e1e..54c6f887a8b 100644
--- a/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp
+++ b/storage/src/vespa/storage/distributor/bucket_space_state_map.cpp
@@ -32,6 +32,22 @@ BucketSpaceStateMap::BucketSpaceStateMap()
_map.emplace(document::FixedBucketSpaces::global_space(), std::make_unique<BucketSpaceState>());
}
+const BucketSpaceState&
+BucketSpaceStateMap::get(document::BucketSpace space) const
+{
+ auto itr = _map.find(space);
+ assert(itr != _map.end());
+ return *itr->second;
+}
+
+BucketSpaceState&
+BucketSpaceStateMap::get(document::BucketSpace space)
+{
+ auto itr = _map.find(space);
+ assert(itr != _map.end());
+ return *itr->second;
+}
+
void
BucketSpaceStateMap::set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state)
{
diff --git a/storage/src/vespa/storage/distributor/bucket_space_state_map.h b/storage/src/vespa/storage/distributor/bucket_space_state_map.h
index 57eac9eac0d..ccf79e001f7 100644
--- a/storage/src/vespa/storage/distributor/bucket_space_state_map.h
+++ b/storage/src/vespa/storage/distributor/bucket_space_state_map.h
@@ -16,7 +16,6 @@ namespace storage::distributor {
/**
* Represents cluster state and distribution for a given bucket space.
- * TODO STRIPE: Make DistributorBucketSpace inherit this class.
*/
class BucketSpaceState {
private:
@@ -64,6 +63,9 @@ public:
StateMap::const_iterator begin() const { return _map.begin(); }
StateMap::const_iterator end() const { return _map.end(); }
+ const BucketSpaceState& get(document::BucketSpace space) const;
+ BucketSpaceState& get(document::BucketSpace space);
+
void set_cluster_state(std::shared_ptr<const lib::ClusterState> cluster_state);
void set_distribution(std::shared_ptr<const lib::Distribution> distribution);
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index 37e7dc86e43..9ec4d31eb32 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -22,8 +22,8 @@ DistributorBucketSpace::DistributorBucketSpace()
{
}
-DistributorBucketSpace::DistributorBucketSpace(uint16_t node_index, bool use_bucket_db)
- : _bucketDatabase(use_bucket_db ? std::make_unique<BTreeBucketDatabase>() : std::unique_ptr<BTreeBucketDatabase>()),
+DistributorBucketSpace::DistributorBucketSpace(uint16_t node_index)
+ : _bucketDatabase(std::make_unique<BTreeBucketDatabase>()),
_clusterState(),
_distribution(),
_node_index(node_index),
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index 8898039eb02..794bb442400 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -47,8 +47,7 @@ class DistributorBucketSpace {
bool owns_bucket_in_state(const lib::Distribution& distribution, const lib::ClusterState& cluster_state, document::BucketId bucket) const;
public:
explicit DistributorBucketSpace();
- // TODO STRIPE: Remove the use_bucket_db parameter when legacy mode is gone.
- explicit DistributorBucketSpace(uint16_t node_index, bool use_bucket_db = true);
+ explicit DistributorBucketSpace(uint16_t node_index);
~DistributorBucketSpace();
DistributorBucketSpace(const DistributorBucketSpace&) = delete;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
index 368483d3f2d..4f64dab9a68 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.cpp
@@ -13,11 +13,11 @@ using document::BucketSpace;
namespace storage::distributor {
-DistributorBucketSpaceRepo::DistributorBucketSpaceRepo(uint16_t node_index, bool use_bucket_db)
+DistributorBucketSpaceRepo::DistributorBucketSpaceRepo(uint16_t node_index)
: _map()
{
- add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>(node_index, use_bucket_db));
- add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>(node_index, use_bucket_db));
+ add(document::FixedBucketSpaces::default_space(), std::make_unique<DistributorBucketSpace>(node_index));
+ add(document::FixedBucketSpaces::global_space(), std::make_unique<DistributorBucketSpace>(node_index));
}
DistributorBucketSpaceRepo::~DistributorBucketSpaceRepo() = default;
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
index e7552f058d8..f012b25e351 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space_repo.h
@@ -19,8 +19,7 @@ private:
BucketSpaceMap _map;
public:
- // TODO STRIPE: Remove the use_bucket_db parameter when legacy mode is gone.
- explicit DistributorBucketSpaceRepo(uint16_t node_index, bool use_bucket_db = true);
+ explicit DistributorBucketSpaceRepo(uint16_t node_index);
~DistributorBucketSpaceRepo();
DistributorBucketSpaceRepo(const DistributorBucketSpaceRepo&&) = delete;
diff --git a/storage/src/vespa/storage/distributor/distributor_component.cpp b/storage/src/vespa/storage/distributor/distributor_component.cpp
index e01d7e7cb6d..a3b2b3c8e99 100644
--- a/storage/src/vespa/storage/distributor/distributor_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_component.cpp
@@ -11,8 +11,7 @@ DistributorComponent::DistributorComponent(DistributorInterface& distributor,
const std::string& name)
: storage::DistributorComponent(comp_reg, name),
_distributor(distributor),
- _bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(node_index(), false)),
- _read_only_bucket_space_repo(std::make_unique<DistributorBucketSpaceRepo>(node_index(), false))
+ _bucket_space_states()
{
}
diff --git a/storage/src/vespa/storage/distributor/distributor_component.h b/storage/src/vespa/storage/distributor/distributor_component.h
index 68db5a3c483..2aaa9f421ae 100644
--- a/storage/src/vespa/storage/distributor/distributor_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_component.h
@@ -2,6 +2,7 @@
#pragma once
+#include "bucket_space_state_map.h"
#include "distributor_interface.h"
#include "distributor_node_context.h"
#include "distributor_operation_context.h"
@@ -22,9 +23,8 @@ class DistributorComponent : public storage::DistributorComponent,
public DistributorOperationContext {
private:
DistributorInterface& _distributor;
- // TODO STRIPE: When legacy mode is removed, replace this with BucketSpaceStateMap.
- std::unique_ptr<DistributorBucketSpaceRepo> _bucket_space_repo;
- std::unique_ptr<DistributorBucketSpaceRepo> _read_only_bucket_space_repo;
+ BucketSpaceStateMap _bucket_space_states;
+
public:
DistributorComponent(DistributorInterface& distributor,
@@ -45,23 +45,15 @@ public:
api::Timestamp generate_unique_timestamp() override {
return getUniqueTimestamp();
}
- const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
- return *_bucket_space_repo;
- }
- DistributorBucketSpaceRepo& bucket_space_repo() noexcept override {
- return *_bucket_space_repo;
- }
- const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept override {
- return *_read_only_bucket_space_repo;
+ const BucketSpaceStateMap& bucket_space_states() const noexcept override {
+ return _bucket_space_states;
}
- DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept override {
- return *_read_only_bucket_space_repo;
+ BucketSpaceStateMap& bucket_space_states() noexcept override {
+ return _bucket_space_states;
}
const storage::DistributorConfiguration& distributor_config() const noexcept override {
return _distributor.config();
}
-
-
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor_operation_context.h b/storage/src/vespa/storage/distributor/distributor_operation_context.h
index e0d481a322a..9dd853c7e46 100644
--- a/storage/src/vespa/storage/distributor/distributor_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_operation_context.h
@@ -6,10 +6,10 @@
#include <vespa/storageapi/defs.h>
namespace storage { class DistributorConfiguration; }
-namespace storage::lib { class ClusterStateBundle; }
namespace storage::distributor {
+class BucketSpaceStateMap;
class DistributorBucketSpaceRepo;
/**
@@ -19,11 +19,8 @@ class DistributorOperationContext {
public:
virtual ~DistributorOperationContext() {}
virtual api::Timestamp generate_unique_timestamp() = 0;
- // TODO STRIPE: Access to bucket space repos is only temporary at this level.
- virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept = 0;
- virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
- virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
- virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
+ virtual const BucketSpaceStateMap& bucket_space_states() const noexcept = 0;
+ virtual BucketSpaceStateMap& bucket_space_states() noexcept = 0;
virtual const DistributorConfiguration& distributor_config() const noexcept = 0;
};
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index 1751b05b25d..543264d97b9 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -1,14 +1,15 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "blockingoperationstarter.h"
-#include "distributor_stripe.h"
-#include "distributor_status.h"
#include "distributor_bucket_space.h"
+#include "distributor_status.h"
+#include "distributor_stripe.h"
#include "distributormetricsset.h"
#include "idealstatemetricsset.h"
-#include "stripe_host_info_notifier.h"
#include "operation_sequencer.h"
#include "ownership_transfer_safe_time_point_calculator.h"
+#include "storage_node_up_states.h"
+#include "stripe_host_info_notifier.h"
#include "throttlingoperationstarter.h"
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
@@ -32,15 +33,11 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
DistributorMetricSet& metrics,
IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
- framework::TickingThreadPool& threadPool,
- DoneInitializeHandler& doneInitHandler,
ChainedMessageSender& messageSender,
StripeHostInfoNotifier& stripe_host_info_notifier,
- bool use_legacy_mode,
- bool& done_initializing_ref,
+ const bool& done_initializing_ref,
uint32_t stripe_index)
: DistributorStripeInterface(),
- framework::StatusReporter("distributor", "Distributor"),
_clusterStateBundle(lib::ClusterState()),
_bucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(node_identity.node_index())),
_readOnlyBucketSpaceRepo(std::make_unique<DistributorBucketSpaceRepo>(node_identity.node_index())),
@@ -51,9 +48,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_maintenanceOperationOwner(*this, _component.getClock()),
_operation_sequencer(std::make_unique<OperationSequencer>()),
_pendingMessageTracker(compReg, stripe_index),
- _bucketDBUpdater(_component, _component, *this, *this, use_legacy_mode),
- _distributorStatusDelegate(compReg, *this, *this),
- _bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater),
+ _bucketDBUpdater(_component, _component, *this, *this),
_idealStateManager(_component, _component, ideal_state_metrics),
_messageSender(messageSender),
_stripe_host_info_notifier(stripe_host_info_notifier),
@@ -61,8 +56,6 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
*_operation_sequencer, *this, _component,
_idealStateManager, _operationOwner),
_external_message_mutex(),
- _threadPool(threadPool),
- _doneInitializeHandler(doneInitHandler),
_done_initializing_ref(done_initializing_ref),
_bucketPriorityDb(std::make_unique<SimpleBucketPriorityDatabase>()),
_scanner(std::make_unique<SimpleMaintenanceScanner>(*_bucketPriorityDb, _idealStateManager, *_bucketSpaceRepo)),
@@ -83,13 +76,8 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_last_db_memory_sample_time_point(),
_inhibited_maintenance_tick_count(0),
_must_send_updated_host_info(false),
- _use_legacy_mode(use_legacy_mode),
_stripe_index(stripe_index)
{
- if (use_legacy_mode) {
- _distributorStatusDelegate.registerStatusPage();
- _bucketDBStatusDelegate.registerStatusPage();
- }
propagateDefaultDistribution(_component.getDistribution());
propagateClusterStates();
};
@@ -173,15 +161,8 @@ DistributorStripe::handle_or_enqueue_message(const std::shared_ptr<api::StorageM
if (_externalOperationHandler.try_handle_message_outside_main_thread(msg)) {
return true;
}
- MBUS_TRACE(msg->getTrace(), 9,
- vespalib::make_string("DistributorStripe[%u]: Added to message queue. Thread state: ", _stripe_index)
- + _threadPool.getStatus());
- if (_use_legacy_mode) {
- // TODO STRIPE remove
- framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
- _messageQueue.push_back(msg);
- guard.broadcast();
- } else {
+ MBUS_TRACE(msg->getTrace(), 9, vespalib::make_string("DistributorStripe[%u]: Added to message queue.", _stripe_index));
+ {
std::lock_guard lock(_external_message_mutex);
_messageQueue.push_back(msg);
// Caller has the responsibility to wake up correct stripe
@@ -294,10 +275,6 @@ DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state
propagateClusterStates();
const auto& baseline_state = *state.getBaselineClusterState();
- if (_use_legacy_mode && !_done_initializing_ref && (baseline_state.getNodeState(my_node).getState() == lib::State::UP)) {
- _done_initializing_ref = true; // TODO STRIPE remove; responsibility moved to TopLevelDistributor in non-legacy
- _doneInitializeHandler.notifyDoneInitializing();
- }
enterRecoveryMode();
// Clear all active messages on nodes that are down.
@@ -314,18 +291,6 @@ DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state
}
}
}
-
- // TODO STRIPE remove when legacy is gone; the stripe bucket DB updater does not have this info!
- if (_use_legacy_mode && _bucketDBUpdater.bucketOwnershipHasChanged()) {
- using TimePoint = OwnershipTransferSafeTimePointCalculator::TimePoint;
- // Note: this assumes that std::chrono::system_clock and the framework
- // system clock have the same epoch, which should be a reasonable
- // assumption.
- const auto now = TimePoint(std::chrono::milliseconds(
- _component.getClock().getTimeInMillis().getTime()));
- _externalOperationHandler.rejectFeedBeforeTimeReached(
- _ownershipSafeTimeCalc->safeTimePoint(now));
- }
}
OperationRoutingSnapshot DistributorStripe::read_snapshot_for_bucket(const document::Bucket& bucket) const {
@@ -400,28 +365,6 @@ void DistributorStripe::invalidate_bucket_spaces_stats() {
}
void
-DistributorStripe::storage_distribution_changed()
-{
- assert(_use_legacy_mode);
- if (!_distribution.get()
- || *_component.getDistribution() != *_distribution)
- {
- LOG(debug,
- "Distribution changed to %s, must refetch bucket information",
- _component.getDistribution()->toString().c_str());
-
- // FIXME this is not thread safe
- _nextDistribution = _component.getDistribution();
- } else {
- LOG(debug,
- "Got distribution change, but the distribution %s was the same as "
- "before: %s",
- _component.getDistribution()->toString().c_str(),
- _distribution->toString().c_str());
- }
-}
-
-void
DistributorStripe::recheckBucketInfo(uint16_t nodeIdx, const document::Bucket &bucket) {
_bucketDBUpdater.recheckBucketInfo(nodeIdx, bucket);
}
@@ -479,21 +422,6 @@ DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace,
}
}
-// TODO STRIPE must only be called when operating in legacy single stripe mode!
-// In other cases, distribution config switching is controlled by top-level distributor, not via framework(tm).
-void
-DistributorStripe::enableNextDistribution()
-{
- assert(_use_legacy_mode);
- if (_nextDistribution.get()) {
- _distribution = _nextDistribution;
- propagateDefaultDistribution(_distribution);
- _nextDistribution = std::shared_ptr<lib::Distribution>();
- // TODO conditional on whether top-level DB updater is in charge
- _bucketDBUpdater.storageDistributionChanged();
- }
-}
-
// TODO STRIPE must be invoked by top-level bucket db updater probably
void
DistributorStripe::propagateDefaultDistribution(
@@ -509,7 +437,6 @@ DistributorStripe::propagateDefaultDistribution(
// Only called when stripe is in rendezvous freeze
void
DistributorStripe::update_distribution_config(const BucketSpaceDistributionConfigs& new_configs) {
- assert(!_use_legacy_mode);
auto default_distr = new_configs.get_or_nullptr(document::FixedBucketSpaces::default_space());
auto global_distr = new_configs.get_or_nullptr(document::FixedBucketSpaces::global_space());
assert(default_distr && global_distr);
@@ -741,11 +668,7 @@ DistributorStripe::scanNextBucket()
void DistributorStripe::send_updated_host_info_if_required() {
if (_must_send_updated_host_info) {
- if (_use_legacy_mode) {
- _component.getStateUpdater().immediately_send_get_node_state_replies();
- } else {
- _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(_stripe_index);
- }
+ _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(_stripe_index);
_must_send_updated_host_info = false;
}
}
@@ -758,30 +681,14 @@ DistributorStripe::startNextMaintenanceOperation()
_scheduler->tick(_schedulingMode);
}
-// TODO STRIPE begone with this!
-framework::ThreadWaitInfo
-DistributorStripe::doCriticalTick(framework::ThreadIndex)
-{
- _tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- assert(_use_legacy_mode);
- enableNextDistribution();
- enableNextConfig();
- fetchStatusRequests();
- fetchExternalMessages();
- return _tickResult;
-}
-
framework::ThreadWaitInfo
DistributorStripe::doNonCriticalTick(framework::ThreadIndex)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- if (!_use_legacy_mode) {
+ {
std::lock_guard lock(_external_message_mutex);
fetchExternalMessages();
}
- if (_use_legacy_mode) {
- handleStatusRequests();
- }
startExternalOperations();
if (initializing()) {
_bucketDBUpdater.resendDelayedMessages();
@@ -804,7 +711,6 @@ DistributorStripe::doNonCriticalTick(framework::ThreadIndex)
}
bool DistributorStripe::tick() {
- assert(!_use_legacy_mode);
auto wait_info = doNonCriticalTick(framework::ThreadIndex(0));
return !wait_info.waitWanted(); // If we don't want to wait, we presumably did some useful stuff.
}
@@ -823,14 +729,6 @@ void DistributorStripe::mark_maintenance_tick_as_no_longer_inhibited() noexcept
}
void
-DistributorStripe::enableNextConfig()
-{
- assert(_use_legacy_mode);
- propagate_config_snapshot_to_internal_components();
-
-}
-
-void
DistributorStripe::update_total_distributor_config(std::shared_ptr<const DistributorConfiguration> config)
{
_total_config = std::move(config);
@@ -851,52 +749,12 @@ DistributorStripe::propagate_config_snapshot_to_internal_components()
}
void
-DistributorStripe::fetchStatusRequests()
-{
- assert(_use_legacy_mode);
- if (_fetchedStatusRequests.empty()) {
- _fetchedStatusRequests.swap(_statusToDo);
- }
-}
-
-void
DistributorStripe::fetchExternalMessages()
{
assert(_fetchedMessages.empty());
_fetchedMessages.swap(_messageQueue);
}
-void
-DistributorStripe::handleStatusRequests()
-{
- assert(_use_legacy_mode);
- uint32_t sz = _fetchedStatusRequests.size();
- for (uint32_t i = 0; i < sz; ++i) {
- auto& s = *_fetchedStatusRequests[i];
- s.getReporter().reportStatus(s.getStream(), s.getPath());
- s.notifyCompleted();
- }
- _fetchedStatusRequests.clear();
- if (sz > 0) {
- signalWorkWasDone();
- }
-}
-
-vespalib::string
-DistributorStripe::getReportContentType(const framework::HttpUrlPath& path) const
-{
- assert(_use_legacy_mode);
- if (path.hasAttribute("page")) {
- if (path.getAttribute("page") == "buckets") {
- return "text/html";
- } else {
- return "application/xml";
- }
- } else {
- return "text/html";
- }
-}
-
std::string
DistributorStripe::getActiveIdealStateOperations() const
{
@@ -909,54 +767,6 @@ DistributorStripe::getActiveOperations() const
return _operationOwner.toString();
}
-// TODO STRIPE remove this; delegated to top-level Distributor only
-bool
-DistributorStripe::reportStatus(std::ostream& out,
- const framework::HttpUrlPath& path) const
-{
- assert(_use_legacy_mode);
- if (!path.hasAttribute("page") || path.getAttribute("page") == "buckets") {
- framework::PartlyHtmlStatusReporter htmlReporter(*this);
- htmlReporter.reportHtmlHeader(out, path);
- if (!path.hasAttribute("page")) {
- out << "<a href=\"?page=pending\">Count of pending messages to storage nodes</a><br>\n"
- << "<a href=\"?page=buckets\">List all buckets, highlight non-ideal state</a><br>\n";
- } else {
- const_cast<IdealStateManager&>(_idealStateManager)
- .getBucketStatus(out);
- }
- htmlReporter.reportHtmlFooter(out, path);
- } else {
- framework::PartlyXmlStatusReporter xmlReporter(*this, out, path);
- using namespace vespalib::xml;
- std::string page(path.getAttribute("page"));
-
- if (page == "pending") {
- xmlReporter << XmlTag("pending")
- << XmlAttribute("externalload", _operationOwner.size())
- << XmlAttribute("maintenance",_maintenanceOperationOwner.size())
- << XmlEndTag();
- }
- }
-
- return true;
-}
-
-// TODO STRIPE remove this; delegated to top-level Distributor only
-bool
-DistributorStripe::handleStatusRequest(const DelegatedStatusRequest& request) const
-{
- assert(_use_legacy_mode);
- auto wrappedRequest = std::make_shared<DistributorStatus>(request);
- {
- framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
- _statusToDo.push_back(wrappedRequest);
- guard.broadcast();
- }
- wrappedRequest->waitForCompletion();
- return true;
-}
-
StripeAccessGuard::PendingOperationStats
DistributorStripe::pending_operation_stats() const
{
@@ -979,7 +789,6 @@ void
DistributorStripe::enable_cluster_state_bundle(const lib::ClusterStateBundle& new_state,
bool has_bucket_ownership_change)
{
- assert(!_use_legacy_mode);
// TODO STRIPE replace legacy func
enableClusterStateBundle(new_state);
if (has_bucket_ownership_change) {
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index d584960726a..0dcac9ea7b7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -53,8 +53,6 @@ class ThrottlingOperationStarter;
*/
class DistributorStripe final
: public DistributorStripeInterface,
- public StatusDelegator,
- public framework::StatusReporter,
public MinReplicaProvider,
public BucketSpacesStatsProvider,
public NonTrackingMessageSender,
@@ -65,12 +63,9 @@ public:
DistributorMetricSet& metrics,
IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
- framework::TickingThreadPool&,
- DoneInitializeHandler&,
ChainedMessageSender& messageSender,
StripeHostInfoNotifier& stripe_host_info_notifier,
- bool use_legacy_mode,
- bool& done_initializing_ref, // TODO STRIPE const ref once legacy is gone and stripe can't mutate init state
+ const bool& done_initializing_ref,
uint32_t stripe_index = 0);
~DistributorStripe() override;
@@ -114,24 +109,15 @@ public:
*/
void notifyDistributionChangeEnabled() override;
- void storage_distribution_changed();
-
void recheckBucketInfo(uint16_t nodeIdx, const document::Bucket &bucket) override;
bool handleReply(const std::shared_ptr<api::StorageReply>& reply) override;
- // StatusReporter implementation
- vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
- bool reportStatus(std::ostream&, const framework::HttpUrlPath&) const override;
-
- bool handleStatusRequest(const DelegatedStatusRequest& request) const override;
-
StripeAccessGuard::PendingOperationStats pending_operation_stats() const override;
std::string getActiveIdealStateOperations() const;
std::string getActiveOperations() const;
- framework::ThreadWaitInfo doCriticalTick(framework::ThreadIndex);
framework::ThreadWaitInfo doNonCriticalTick(framework::ThreadIndex);
/**
@@ -199,22 +185,17 @@ public:
bool tick() override;
private:
- // TODO STRIPE: reduce number of friends. DistributorStripe too popular for its own good.
friend class TopLevelDistributor;
friend class DistributorStripeTestUtil;
- friend class DistributorTestUtil;
- friend class TopLevelDistributorTestUtil;
- friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
friend class MultiThreadedStripeAccessGuard;
friend struct DistributorStripeTest;
- friend struct LegacyDistributorTest;
friend struct TopLevelDistributorTest;
+ friend class TopLevelDistributorTestUtil;
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
- void handleStatusRequests();
void send_shutdown_abort_reply(const std::shared_ptr<api::StorageMessage>&);
void handle_or_propagate_message(const std::shared_ptr<api::StorageMessage>& msg);
void startExternalOperations();
@@ -247,8 +228,6 @@ private:
bool should_inhibit_current_maintenance_scan_tick() const noexcept;
void mark_current_maintenance_tick_as_inhibited() noexcept;
void mark_maintenance_tick_as_no_longer_inhibited() noexcept;
- void enableNextConfig();
- void fetchStatusRequests();
void fetchExternalMessages();
void startNextMaintenanceOperation();
void signalWorkWasDone();
@@ -263,7 +242,6 @@ private:
bool generateOperation(const std::shared_ptr<api::StorageMessage>& msg,
Operation::SP& operation);
- void enableNextDistribution(); // TODO STRIPE remove once legacy is gone
void propagateDefaultDistribution(std::shared_ptr<const lib::Distribution>); // TODO STRIPE remove once legacy is gone
void propagateClusterStates();
@@ -316,15 +294,12 @@ private:
std::unique_ptr<OperationSequencer> _operation_sequencer;
PendingMessageTracker _pendingMessageTracker;
StripeBucketDBUpdater _bucketDBUpdater;
- StatusReporterDelegate _distributorStatusDelegate;
- StatusReporterDelegate _bucketDBStatusDelegate;
IdealStateManager _idealStateManager;
ChainedMessageSender& _messageSender;
StripeHostInfoNotifier& _stripe_host_info_notifier;
ExternalOperationHandler _externalOperationHandler;
std::shared_ptr<lib::Distribution> _distribution;
- std::shared_ptr<lib::Distribution> _nextDistribution;
using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
struct IndirectHigherPriority {
@@ -342,13 +317,7 @@ private:
MessageQueue _messageQueue;
ClientRequestPriorityQueue _client_request_priority_queue;
MessageQueue _fetchedMessages;
- framework::TickingThreadPool& _threadPool;
-
- mutable std::vector<std::shared_ptr<DistributorStatus>> _statusToDo;
- mutable std::vector<std::shared_ptr<DistributorStatus>> _fetchedStatusRequests;
-
- DoneInitializeHandler& _doneInitializeHandler; // TODO STRIPE remove when legacy is gone
- bool& _done_initializing_ref;
+ const bool& _done_initializing_ref;
std::unique_ptr<BucketPriorityDatabase> _bucketPriorityDb;
std::unique_ptr<SimpleMaintenanceScanner> _scanner;
@@ -374,7 +343,6 @@ private:
std::chrono::steady_clock::time_point _last_db_memory_sample_time_point;
size_t _inhibited_maintenance_tick_count;
bool _must_send_updated_host_info;
- bool _use_legacy_mode;
uint32_t _stripe_index;
};
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index 59029dec66a..cc5a8259c40 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -4,6 +4,7 @@
#include "distributor_bucket_space_repo.h"
#include "distributor_bucket_space.h"
#include "pendingmessagetracker.h"
+#include "storage_node_up_states.h"
#include <vespa/document/select/parser.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
#include <vespa/vdslib/state/clusterstate.h>
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
index 518c83d7ffa..a5afadad6a7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
@@ -4,7 +4,6 @@
#include "bucketgctimecalculator.h"
#include "bucketownership.h"
-#include "distributor_operation_context.h"
#include "operation_routing_snapshot.h"
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/storage/bucketdb/bucketdatabase.h>
@@ -12,6 +11,7 @@
#include <vespa/storageapi/defs.h>
namespace document { class Bucket; }
+namespace storage::lib { class ClusterStateBundle; }
namespace storage::distributor {
@@ -20,9 +20,17 @@ class PendingMessageTracker;
/**
* Interface with functionality that is used when handling distributor stripe operations.
*/
-class DistributorStripeOperationContext : public DistributorOperationContext {
+class DistributorStripeOperationContext {
public:
virtual ~DistributorStripeOperationContext() = default;
+
+ virtual api::Timestamp generate_unique_timestamp() = 0;
+ virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept = 0;
+ virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
+ virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
+ virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
+ virtual const DistributorConfiguration& distributor_config() const noexcept = 0;
+
virtual void update_bucket_database(const document::Bucket& bucket,
const BucketCopy& changed_node,
uint32_t update_flags = 0) = 0;
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
index 1ca985947a2..0cf976c969c 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
@@ -159,14 +159,6 @@ void MultiThreadedStripeAccessGuard::report_delayed_single_bucket_requests(vespa
});
}
-TickableStripe& MultiThreadedStripeAccessGuard::first_stripe() noexcept {
- return _stripe_pool.stripe_thread(0).stripe();
-}
-
-const TickableStripe& MultiThreadedStripeAccessGuard::first_stripe() const noexcept {
- return _stripe_pool.stripe_thread(0).stripe();
-}
-
template <typename Func>
void MultiThreadedStripeAccessGuard::for_each_stripe(Func&& f) {
for (auto& stripe_thread : _stripe_pool) {
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
index 62af21cc43f..0ecc9eb803d 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
@@ -60,10 +60,6 @@ public:
void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
private:
- // TODO STRIPE remove once multi threaded stripe support is implemented
- TickableStripe& first_stripe() noexcept;
- const TickableStripe& first_stripe() const noexcept;
-
template <typename Func>
void for_each_stripe(Func&& f);
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 9d9a04e9dcc..aa02f937b6b 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -4,13 +4,14 @@
#include <vespa/document/fieldvalue/document.h>
#include <vespa/storage/distributor/activecopy.h>
+#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/operationtargetresolverimpl.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
+#include <vespa/storage/distributor/storage_node_up_states.h>
#include <vespa/storageapi/message/persistence.h>
+#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/storage/distributor/distributor_bucket_space.h>
-#include <vespa/vdslib/distribution/distribution.h>
#include <algorithm>
#include <vespa/log/log.h>
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
index 335d070ad7b..05045c43888 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
@@ -1,9 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "pending_bucket_space_db_transition.h"
+#include "bucket_space_state_map.h"
#include "clusterinformation.h"
+#include "pending_bucket_space_db_transition.h"
#include "pendingclusterstate.h"
-#include "distributor_bucket_space.h"
#include "stripe_access_guard.h"
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
@@ -19,7 +19,7 @@ using lib::NodeType;
using lib::NodeState;
PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(document::BucketSpace bucket_space,
- DistributorBucketSpace &distributorBucketSpace,
+ const BucketSpaceState &bucket_space_state,
bool distributionChanged,
const OutdatedNodes &outdatedNodes,
std::shared_ptr<const ClusterInformation> clusterInfo,
@@ -31,10 +31,10 @@ PendingBucketSpaceDbTransition::PendingBucketSpaceDbTransition(document::BucketS
_missingEntries(),
_clusterInfo(std::move(clusterInfo)),
_outdatedNodes(newClusterState.getNodeCount(NodeType::STORAGE)),
- _prevClusterState(distributorBucketSpace.getClusterState()),
+ _prevClusterState(bucket_space_state.get_cluster_state()),
_newClusterState(newClusterState),
_creationTimestamp(creationTimestamp),
- _distributorBucketSpace(distributorBucketSpace),
+ _bucket_space_state(bucket_space_state),
_distributorIndex(_clusterInfo->getDistributorIndex()),
_bucketOwnershipTransfer(distributionChanged),
_rejectedRequests()
@@ -217,24 +217,11 @@ PendingBucketSpaceDbTransition::DbMerger::addToInserter(BucketDatabase::Trailing
inserter.insert_at_end(bucket_id, e);
}
-// TODO STRIPE remove legacy single stripe stuff
-void
-PendingBucketSpaceDbTransition::mergeIntoBucketDatabase()
-{
- BucketDatabase &db(_distributorBucketSpace.getBucketDatabase());
- std::sort(_entries.begin(), _entries.end());
-
- const auto& dist = _distributorBucketSpace.getDistribution();
- DbMerger merger(_creationTimestamp, dist, _newClusterState, _clusterInfo->getStorageUpStates(), _outdatedNodes, _entries);
-
- db.merge(merger);
-}
-
void
PendingBucketSpaceDbTransition::merge_into_bucket_databases(StripeAccessGuard& guard)
{
std::sort(_entries.begin(), _entries.end());
- const auto& dist = _distributorBucketSpace.getDistribution();
+ const auto& dist = _bucket_space_state.get_distribution();
guard.merge_entries_into_db(_bucket_space, _creationTimestamp, dist, _newClusterState,
_clusterInfo->getStorageUpStates(), _outdatedNodes, _entries);
}
@@ -296,7 +283,7 @@ PendingBucketSpaceDbTransition::nodeWasUpButNowIsDown(const lib::State& old,
bool
PendingBucketSpaceDbTransition::nodeInSameGroupAsSelf(uint16_t index) const
{
- const auto &dist(_distributorBucketSpace.getDistribution());
+ const auto &dist(_bucket_space_state.get_distribution());
if (dist.getNodeGraph().getGroupForNode(index) ==
dist.getNodeGraph().getGroupForNode(_distributorIndex)) {
LOG(debug,
@@ -317,7 +304,7 @@ PendingBucketSpaceDbTransition::nodeNeedsOwnershipTransferFromGroupDown(
uint16_t nodeIndex,
const lib::ClusterState& state) const
{
- const auto &dist(_distributorBucketSpace.getDistribution());
+ const auto &dist(_bucket_space_state.get_distribution());
if (!dist.distributorAutoOwnershipTransferOnWholeGroupDown()) {
return false; // Not doing anything for downed groups.
}
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
index f7766cb265d..37d48323066 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
@@ -16,9 +16,9 @@ class State;
namespace storage::distributor {
+class BucketSpaceState;
class ClusterInformation;
class PendingClusterState;
-class DistributorBucketSpace;
class StripeAccessGuard;
/**
@@ -50,7 +50,7 @@ private:
const lib::ClusterState& _prevClusterState;
const lib::ClusterState& _newClusterState;
const api::Timestamp _creationTimestamp;
- DistributorBucketSpace& _distributorBucketSpace;
+ const BucketSpaceState& _bucket_space_state;
uint16_t _distributorIndex;
bool _bucketOwnershipTransfer;
std::unordered_map<uint16_t, size_t> _rejectedRequests;
@@ -126,7 +126,7 @@ public:
};
PendingBucketSpaceDbTransition(document::BucketSpace bucket_space,
- DistributorBucketSpace &distributorBucketSpace,
+ const BucketSpaceState &bucket_space_state,
bool distributionChanged,
const OutdatedNodes &outdatedNodes,
std::shared_ptr<const ClusterInformation> clusterInfo,
@@ -135,7 +135,6 @@ public:
~PendingBucketSpaceDbTransition();
// Merges all the results with the corresponding bucket database.
- void mergeIntoBucketDatabase();
void merge_into_bucket_databases(StripeAccessGuard& guard);
// Adds the info from the reply to our list of information.
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
index 69cf5486a8a..59f5d0a9322 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
@@ -1,13 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "pendingclusterstate.h"
+#include "bucket_space_state_map.h"
#include "pending_bucket_space_db_transition.h"
+#include "pendingclusterstate.h"
#include "top_level_bucket_db_updater.h"
-#include "distributor_bucket_space_repo.h"
-#include "distributor_bucket_space.h"
-#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
-#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/storage/common/global_bucket_space_distribution_converter.h>
+#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/xmlstream.hpp>
#include <climits>
@@ -27,7 +26,7 @@ PendingClusterState::PendingClusterState(
const framework::Clock& clock,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
const std::shared_ptr<api::SetSystemStateCommand>& newStateCmd,
const OutdatedNodesMap &outdatedNodesMap,
api::Timestamp creationTimestamp)
@@ -41,7 +40,7 @@ PendingClusterState::PendingClusterState(
_clusterInfo(clusterInfo),
_creationTimestamp(creationTimestamp),
_sender(sender),
- _bucketSpaceRepo(bucketSpaceRepo),
+ _bucket_space_states(bucket_space_states),
_clusterStateVersion(_cmd->getClusterStateBundle().getVersion()),
_isVersionedTransition(true),
_bucketOwnershipTransfer(false),
@@ -55,7 +54,7 @@ PendingClusterState::PendingClusterState(
const framework::Clock& clock,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
api::Timestamp creationTimestamp)
: _requestedNodes(clusterInfo->getStorageNodeCount()),
_prevClusterStateBundle(clusterInfo->getClusterStateBundle()),
@@ -64,7 +63,7 @@ PendingClusterState::PendingClusterState(
_clusterInfo(clusterInfo),
_creationTimestamp(creationTimestamp),
_sender(sender),
- _bucketSpaceRepo(bucketSpaceRepo),
+ _bucket_space_states(bucket_space_states),
_clusterStateVersion(0),
_isVersionedTransition(false),
_bucketOwnershipTransfer(true),
@@ -80,7 +79,7 @@ void
PendingClusterState::initializeBucketSpaceTransitions(bool distributionChanged, const OutdatedNodesMap &outdatedNodesMap)
{
OutdatedNodes emptyOutdatedNodes;
- for (auto &elem : _bucketSpaceRepo) {
+ for (const auto &elem : _bucket_space_states) {
auto onItr = outdatedNodesMap.find(elem.first);
const auto &outdatedNodes = (onItr == outdatedNodesMap.end()) ? emptyOutdatedNodes : onItr->second;
auto pendingTransition =
@@ -100,8 +99,7 @@ PendingClusterState::initializeBucketSpaceTransitions(bool distributionChanged,
void
PendingClusterState::logConstructionInformation() const
{
- const auto &distributorBucketSpace(_bucketSpaceRepo.get(document::FixedBucketSpaces::default_space()));
- const auto &distribution(distributorBucketSpace.getDistribution());
+ const auto &distribution = _bucket_space_states.get(document::FixedBucketSpaces::default_space()).get_distribution();
LOG(debug,
"New PendingClusterState constructed with previous cluster "
"state '%s', new cluster state '%s', distribution config "
@@ -190,8 +188,7 @@ PendingClusterState::requestBucketInfoFromStorageNodesWithChangedState()
void
PendingClusterState::requestNode(BucketSpaceAndNode bucketSpaceAndNode)
{
- const auto &distributorBucketSpace(_bucketSpaceRepo.get(bucketSpaceAndNode.bucketSpace));
- const auto &distribution(distributorBucketSpace.getDistribution());
+ const auto &distribution = _bucket_space_states.get(bucketSpaceAndNode.bucketSpace).get_distribution();
vespalib::string distributionHash;
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
bool sendLegacyHash = false;
@@ -207,10 +204,10 @@ PendingClusterState::requestNode(BucketSpaceAndNode bucketSpaceAndNode)
if (!sendLegacyHash) {
distributionHash = distribution.getNodeGraph().getDistributionConfigHash();
} else {
- const auto& defaultSpace = _bucketSpaceRepo.get(document::FixedBucketSpaces::default_space());
+ const auto& defaultSpace = _bucket_space_states.get(document::FixedBucketSpaces::default_space());
// Generate legacy distribution hash explicitly.
auto legacyGlobalDistr = GlobalBucketSpaceDistributionConverter::convert_to_global(
- defaultSpace.getDistribution(), true/*use legacy mode*/);
+ defaultSpace.get_distribution(), true/*use legacy mode*/);
distributionHash = legacyGlobalDistr->getNodeGraph().getDistributionConfigHash();
LOG(debug, "Falling back to sending legacy hash to node %u: %s",
bucketSpaceAndNode.node, distributionHash.c_str());
@@ -323,14 +320,6 @@ PendingClusterState::requestNodesToString() const
}
void
-PendingClusterState::mergeIntoBucketDatabases()
-{
- for (auto &elem : _pendingTransitions) {
- elem.second->mergeIntoBucketDatabase();
- }
-}
-
-void
PendingClusterState::merge_into_bucket_databases(StripeAccessGuard& guard)
{
for (auto &elem : _pendingTransitions) {
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.h b/storage/src/vespa/storage/distributor/pendingclusterstate.h
index babcebea69d..fd209197ec6 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.h
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.h
@@ -15,9 +15,9 @@
namespace storage::distributor {
+class BucketSpaceStateMap;
class DistributorMessageSender;
class PendingBucketSpaceDbTransition;
-class DistributorBucketSpaceRepo;
class StripeAccessGuard;
/**
@@ -45,14 +45,14 @@ public:
const framework::Clock& clock,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
const std::shared_ptr<api::SetSystemStateCommand>& newStateCmd,
const OutdatedNodesMap &outdatedNodesMap,
api::Timestamp creationTimestamp)
{
// Naked new due to private constructor
return std::unique_ptr<PendingClusterState>(new PendingClusterState(
- clock, clusterInfo, sender, bucketSpaceRepo,
+ clock, clusterInfo, sender, bucket_space_states,
newStateCmd, outdatedNodesMap, creationTimestamp));
}
@@ -64,12 +64,12 @@ public:
const framework::Clock& clock,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
api::Timestamp creationTimestamp)
{
// Naked new due to private constructor
return std::unique_ptr<PendingClusterState>(new PendingClusterState(
- clock, clusterInfo, sender, bucketSpaceRepo, creationTimestamp));
+ clock, clusterInfo, sender, bucket_space_states, creationTimestamp));
}
PendingClusterState(const PendingClusterState &) = delete;
@@ -146,7 +146,6 @@ public:
/**
* Merges all the results with the corresponding bucket databases.
*/
- void mergeIntoBucketDatabases();
void merge_into_bucket_databases(StripeAccessGuard& guard);
// Get pending transition for a specific bucket space. Only used by unit test.
@@ -169,7 +168,7 @@ private:
const framework::Clock&,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
const std::shared_ptr<api::SetSystemStateCommand>& newStateCmd,
const OutdatedNodesMap &outdatedNodesMap,
api::Timestamp creationTimestamp);
@@ -182,7 +181,7 @@ private:
const framework::Clock&,
const ClusterInformation::CSP& clusterInfo,
DistributorMessageSender& sender,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
+ const BucketSpaceStateMap& bucket_space_states,
api::Timestamp creationTimestamp);
struct BucketSpaceAndNode {
@@ -229,7 +228,7 @@ private:
api::Timestamp _creationTimestamp;
DistributorMessageSender& _sender;
- DistributorBucketSpaceRepo& _bucketSpaceRepo;
+ const BucketSpaceStateMap& _bucket_space_states;
uint32_t _clusterStateVersion;
bool _isVersionedTransition;
bool _bucketOwnershipTransfer;
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
index c48434484d2..f0cc18b346e 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -31,25 +31,19 @@ namespace storage::distributor {
StripeBucketDBUpdater::StripeBucketDBUpdater(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorStripeInterface& owner,
- DistributorMessageSender& sender,
- bool use_legacy_mode)
+ DistributorMessageSender& sender)
: framework::StatusReporter("bucketdb", "Bucket DB Updater"),
_node_ctx(node_ctx),
_op_ctx(op_ctx),
_distributor_interface(owner),
_delayedRequests(),
_sentMessages(),
- _pendingClusterState(),
- _history(),
_sender(sender),
_enqueuedRechecks(),
- _outdatedNodesMap(),
- _transitionTimer(_node_ctx.clock()),
_stale_reads_enabled(false),
_active_distribution_contexts(),
_explicit_transition_read_guard(),
- _distribution_context_mutex(),
- _use_legacy_mode(use_legacy_mode)
+ _distribution_context_mutex()
{
for (auto& elem : _op_ctx.bucket_space_repo()) {
_active_distribution_contexts.emplace(
@@ -223,66 +217,18 @@ public:
}
-void
-StripeBucketDBUpdater::removeSuperfluousBuckets(
- const lib::ClusterStateBundle& newState,
- bool is_distribution_config_change)
-{
- assert(_use_legacy_mode);
- const bool move_to_read_only_db = shouldDeferStateEnabling();
- const char* up_states = storage_node_up_states();
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- const auto& newDistribution(elem.second->getDistribution());
- const auto& oldClusterState(elem.second->getClusterState());
- const auto& new_cluster_state = newState.getDerivedClusterState(elem.first);
-
- // Running a full DB sweep is expensive, so if the cluster state transition does
- // not actually indicate that buckets should possibly be removed, we elide it entirely.
- if (!is_distribution_config_change
- && db_pruning_may_be_elided(oldClusterState, *new_cluster_state, up_states))
- {
- LOG(debug, "[bucket space '%s']: eliding DB pruning for state transition '%s' -> '%s'",
- document::FixedBucketSpaces::to_string(elem.first).data(),
- oldClusterState.toString().c_str(), new_cluster_state->toString().c_str());
- continue;
- }
-
- auto& bucketDb(elem.second->getBucketDatabase());
- auto& readOnlyDb(_op_ctx.read_only_bucket_space_repo().get(elem.first).getBucketDatabase());
-
- // Remove all buckets not belonging to this distributor, or
- // being on storage nodes that are no longer up.
- MergingNodeRemover proc(
- oldClusterState,
- *new_cluster_state,
- _node_ctx.node_index(),
- newDistribution,
- up_states,
- move_to_read_only_db);
-
- bucketDb.merge(proc);
- if (move_to_read_only_db) {
- ReadOnlyDbMergingInserter read_only_merger(proc.getNonOwnedEntries());
- readOnlyDb.merge(read_only_merger);
- }
- maybe_inject_simulated_db_pruning_delay();
- }
-}
-
PotentialDataLossReport
StripeBucketDBUpdater::remove_superfluous_buckets(
document::BucketSpace bucket_space,
const lib::ClusterState& new_state,
bool is_distribution_change)
{
- assert(!_use_legacy_mode);
(void)is_distribution_change; // TODO remove if not needed
const bool move_to_read_only_db = shouldDeferStateEnabling();
const char* up_states = storage_node_up_states();
auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
const auto& new_distribution = s.getDistribution();
- const auto& old_cluster_state = s.getClusterState();
// Elision of DB sweep is done at a higher level, so we don't have to do that here.
auto& bucket_db = s.getBucketDatabase();
auto& read_only_db = _op_ctx.read_only_bucket_space_repo().get(bucket_space).getBucketDatabase();
@@ -290,7 +236,6 @@ StripeBucketDBUpdater::remove_superfluous_buckets(
// Remove all buckets not belonging to this distributor, or
// being on storage nodes that are no longer up.
MergingNodeRemover proc(
- old_cluster_state,
new_state,
_node_ctx.node_index(),
new_distribution,
@@ -303,7 +248,7 @@ StripeBucketDBUpdater::remove_superfluous_buckets(
read_only_db.merge(read_only_merger);
}
PotentialDataLossReport report;
- report.buckets = proc.removed_buckets();
+ report.buckets = proc.removed_buckets();
report.documents = proc.removed_documents();
return report;
}
@@ -317,7 +262,6 @@ StripeBucketDBUpdater::merge_entries_into_db(document::BucketSpace bucket_space,
const std::unordered_set<uint16_t>& outdated_nodes,
const std::vector<dbtransition::Entry>& entries)
{
- assert(!_use_legacy_mode);
auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
auto& bucket_db = s.getBucketDatabase();
@@ -326,44 +270,6 @@ StripeBucketDBUpdater::merge_entries_into_db(document::BucketSpace bucket_space,
bucket_db.merge(merger);
}
-namespace {
-
-void maybe_sleep_for(std::chrono::milliseconds ms) {
- if (ms.count() > 0) {
- std::this_thread::sleep_for(ms);
- }
-}
-
-}
-
-void
-StripeBucketDBUpdater::maybe_inject_simulated_db_pruning_delay() {
- maybe_sleep_for(_op_ctx.distributor_config().simulated_db_pruning_latency());
-}
-
-void
-StripeBucketDBUpdater::maybe_inject_simulated_db_merging_delay() {
- maybe_sleep_for(_op_ctx.distributor_config().simulated_db_merging_latency());
-}
-
-void
-StripeBucketDBUpdater::ensureTransitionTimerStarted()
-{
- // Don't overwrite start time if we're already processing a state, as
- // that will make transition times appear artificially low.
- if (!hasPendingClusterState()) {
- _transitionTimer = framework::MilliSecTimer(
- _node_ctx.clock());
- }
-}
-
-void
-StripeBucketDBUpdater::completeTransitionTimer()
-{
- _distributor_interface.getMetrics()
- .stateTransitionTime.addValue(_transitionTimer.getElapsedTimeAsDouble());
-}
-
void
StripeBucketDBUpdater::clearReadOnlyBucketRepoDatabases()
{
@@ -372,46 +278,6 @@ StripeBucketDBUpdater::clearReadOnlyBucketRepoDatabases()
}
}
-void
-StripeBucketDBUpdater::storageDistributionChanged()
-{
- ensureTransitionTimerStarted();
-
- removeSuperfluousBuckets(_op_ctx.cluster_state_bundle(), true);
-
- auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
- _node_ctx.node_index(),
- _op_ctx.cluster_state_bundle(),
- storage_node_up_states());
- _pendingClusterState = PendingClusterState::createForDistributionChange(
- _node_ctx.clock(),
- std::move(clusterInfo),
- _sender,
- _op_ctx.bucket_space_repo(),
- _op_ctx.generate_unique_timestamp());
- _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
- _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
-}
-
-void
-StripeBucketDBUpdater::replyToPreviousPendingClusterStateIfAny()
-{
- if (_pendingClusterState.get() && _pendingClusterState->hasCommand()) {
- _distributor_interface.getMessageSender().sendUp(
- std::make_shared<api::SetSystemStateReply>(*_pendingClusterState->getCommand()));
- }
-}
-
-void
-StripeBucketDBUpdater::replyToActivationWithActualVersion(
- const api::ActivateClusterStateVersionCommand& cmd,
- uint32_t actualVersion)
-{
- auto reply = std::make_shared<api::ActivateClusterStateVersionReply>(cmd);
- reply->setActualVersion(actualVersion);
- _distributor_interface.getMessageSender().sendUp(reply); // TODO let API accept rvalues
-}
-
void StripeBucketDBUpdater::update_read_snapshot_before_db_pruning() {
std::lock_guard lock(_distribution_context_mutex);
for (auto& elem : _op_ctx.bucket_space_repo()) {
@@ -428,7 +294,6 @@ void StripeBucketDBUpdater::update_read_snapshot_before_db_pruning() {
}
}
-
void StripeBucketDBUpdater::update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) {
std::lock_guard lock(_distribution_context_mutex);
const auto old_default_state = _op_ctx.bucket_space_repo().get(
@@ -467,88 +332,6 @@ void StripeBucketDBUpdater::update_read_snapshot_after_activation(const lib::Clu
}
}
-bool
-StripeBucketDBUpdater::onSetSystemState(
- const std::shared_ptr<api::SetSystemStateCommand>& cmd)
-{
- assert(_use_legacy_mode);
- LOG(debug,
- "Received new cluster state %s",
- cmd->getSystemState().toString().c_str());
-
- const lib::ClusterStateBundle oldState = _op_ctx.cluster_state_bundle();
- const lib::ClusterStateBundle& state = cmd->getClusterStateBundle();
-
- if (state == oldState) {
- return false;
- }
- ensureTransitionTimerStarted();
- // Separate timer since _transition_timer might span multiple pending states.
- framework::MilliSecTimer process_timer(_node_ctx.clock());
- update_read_snapshot_before_db_pruning();
- const auto& bundle = cmd->getClusterStateBundle();
- removeSuperfluousBuckets(bundle, false);
- update_read_snapshot_after_db_pruning(bundle);
- replyToPreviousPendingClusterStateIfAny();
-
- auto clusterInfo = std::make_shared<const SimpleClusterInformation>(
- _node_ctx.node_index(),
- _op_ctx.cluster_state_bundle(),
- storage_node_up_states());
- _pendingClusterState = PendingClusterState::createForClusterStateChange(
- _node_ctx.clock(),
- std::move(clusterInfo),
- _sender,
- _op_ctx.bucket_space_repo(),
- cmd,
- _outdatedNodesMap,
- _op_ctx.generate_unique_timestamp());
- _outdatedNodesMap = _pendingClusterState->getOutdatedNodesMap();
-
- _distributor_interface.getMetrics().set_cluster_state_processing_time.addValue(
- process_timer.getElapsedTimeAsDouble());
-
- _op_ctx.bucket_space_repo().set_pending_cluster_state_bundle(_pendingClusterState->getNewClusterStateBundle());
- if (isPendingClusterStateCompleted()) {
- processCompletedPendingClusterState();
- }
- return true;
-}
-
-bool
-StripeBucketDBUpdater::onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd)
-{
- assert(_use_legacy_mode);
- if (hasPendingClusterState() && _pendingClusterState->isVersionedTransition()) {
- const auto pending_version = _pendingClusterState->clusterStateVersion();
- if (pending_version == cmd->version()) {
- if (isPendingClusterStateCompleted()) {
- assert(_pendingClusterState->isDeferred());
- activatePendingClusterState();
- } else {
- LOG(error, "Received cluster state activation for pending version %u "
- "without pending state being complete yet. This is not expected, "
- "as no activation should be sent before all distributors have "
- "reported that state processing is complete.", pending_version);
- replyToActivationWithActualVersion(*cmd, 0); // Invalid version, will cause re-send (hopefully when completed).
- return true;
- }
- } else {
- replyToActivationWithActualVersion(*cmd, pending_version);
- return true;
- }
- } else if (shouldDeferStateEnabling()) {
- // Likely just a resend, but log warn for now to get a feel of how common it is.
- LOG(warning, "Received cluster state activation command for version %u, which "
- "has no corresponding pending state. Likely resent operation.", cmd->version());
- } else {
- LOG(debug, "Received cluster state activation command for version %u, but distributor "
- "config does not have deferred activation enabled. Treating as no-op.", cmd->version());
- }
- // Fall through to next link in call chain that cares about this message.
- return false;
-}
-
StripeBucketDBUpdater::MergeReplyGuard::~MergeReplyGuard()
{
if (_reply) {
@@ -646,30 +429,9 @@ bool
StripeBucketDBUpdater::onRequestBucketInfoReply(
const std::shared_ptr<api::RequestBucketInfoReply> & repl)
{
- if (pendingClusterStateAccepted(repl)) {
- return true;
- }
return processSingleBucketInfoReply(repl);
}
-bool
-StripeBucketDBUpdater::pendingClusterStateAccepted(
- const std::shared_ptr<api::RequestBucketInfoReply> & repl)
-{
- if (_pendingClusterState.get()
- && _pendingClusterState->onRequestBucketInfoReply(repl))
- {
- if (isPendingClusterStateCompleted()) {
- processCompletedPendingClusterState();
- }
- return true;
- }
- LOG(spam,
- "Reply %s was not accepted by pending cluster state",
- repl->toString().c_str());
- return false;
-}
-
void
StripeBucketDBUpdater::handleSingleBucketInfoFailure(
const std::shared_ptr<api::RequestBucketInfoReply>& repl,
@@ -688,9 +450,6 @@ StripeBucketDBUpdater::handleSingleBucketInfoFailure(
void
StripeBucketDBUpdater::resendDelayedMessages()
{
- if (_pendingClusterState) {
- _pendingClusterState->resendDelayedMessages();
- }
if (_delayedRequests.empty()) {
return; // Don't fetch time if not needed
}
@@ -803,100 +562,11 @@ StripeBucketDBUpdater::updateDatabase(document::BucketSpace bucketSpace, uint16_
}
}
-bool
-StripeBucketDBUpdater::isPendingClusterStateCompleted() const
-{
- return _pendingClusterState.get() && _pendingClusterState->done();
-}
-
-void
-StripeBucketDBUpdater::processCompletedPendingClusterState()
-{
- if (_pendingClusterState->isDeferred()) {
- LOG(debug, "Deferring completion of pending cluster state version %u until explicitly activated",
- _pendingClusterState->clusterStateVersion());
- assert(_pendingClusterState->hasCommand()); // Deferred transitions should only ever be created by state commands.
- // Sending down SetSystemState command will reach the state manager and a reply
- // will be auto-sent back to the cluster controller in charge. Once this happens,
- // it will send an explicit activation command once all distributors have reported
- // that their pending cluster states have completed.
- // A booting distributor will treat itself as "system Up" before the state has actually
- // taken effect via activation. External operation handler will keep operations from
- // actually being scheduled until state has been activated. The external operation handler
- // needs to be explicitly aware of the case where no state has yet to be activated.
- _distributor_interface.getMessageSender().sendDown(
- _pendingClusterState->getCommand());
- _pendingClusterState->clearCommand();
- return;
- }
- // Distribution config change or non-deferred cluster state. Immediately activate
- // the pending state without being told to do so explicitly.
- activatePendingClusterState();
-}
-
-void
-StripeBucketDBUpdater::activatePendingClusterState()
-{
- framework::MilliSecTimer process_timer(_node_ctx.clock());
-
- _pendingClusterState->mergeIntoBucketDatabases();
- maybe_inject_simulated_db_merging_delay();
-
- if (_pendingClusterState->isVersionedTransition()) {
- LOG(debug, "Activating pending cluster state version %u", _pendingClusterState->clusterStateVersion());
- enableCurrentClusterStateBundleInDistributor();
- if (_pendingClusterState->hasCommand()) {
- _distributor_interface.getMessageSender().sendDown(
- _pendingClusterState->getCommand());
- }
- addCurrentStateToClusterStateHistory();
- } else {
- LOG(debug, "Activating pending distribution config");
- // TODO distribution changes cannot currently be deferred as they are not
- // initiated by the cluster controller!
- _distributor_interface.notifyDistributionChangeEnabled();
- }
-
- update_read_snapshot_after_activation(_pendingClusterState->getNewClusterStateBundle());
- _pendingClusterState.reset();
- _outdatedNodesMap.clear();
- _op_ctx.bucket_space_repo().clear_pending_cluster_state_bundle(); // TODO also read only bucket space..?
- sendAllQueuedBucketRechecks();
- completeTransitionTimer();
- clearReadOnlyBucketRepoDatabases();
-
- _distributor_interface.getMetrics().activate_cluster_state_processing_time.addValue(
- process_timer.getElapsedTimeAsDouble());
-}
-
-void
-StripeBucketDBUpdater::enableCurrentClusterStateBundleInDistributor()
-{
- const lib::ClusterStateBundle& state(
- _pendingClusterState->getNewClusterStateBundle());
-
- LOG(debug,
- "StripeBucketDBUpdater finished processing state %s",
- state.getBaselineClusterState()->toString().c_str());
-
- _distributor_interface.enableClusterStateBundle(state);
-}
-
void StripeBucketDBUpdater::simulate_cluster_state_bundle_activation(const lib::ClusterStateBundle& activated_state) {
update_read_snapshot_after_activation(activated_state);
_distributor_interface.enableClusterStateBundle(activated_state);
}
-void
-StripeBucketDBUpdater::addCurrentStateToClusterStateHistory()
-{
- _history.push_back(_pendingClusterState->getSummary());
-
- if (_history.size() > 50) {
- _history.pop_front();
- }
-}
-
vespalib::string
StripeBucketDBUpdater::getReportContentType(const framework::HttpUrlPath&) const
{
@@ -952,19 +622,7 @@ StripeBucketDBUpdater::reportXmlStatus(vespalib::xml::XmlOutputStream& xos,
<< XmlTag("systemstate_active")
<< XmlContent(_op_ctx.cluster_state_bundle().getBaselineClusterState()->toString())
<< XmlEndTag();
- if (_pendingClusterState) {
- xos << *_pendingClusterState;
- }
- xos << XmlTag("systemstate_history");
- for (auto i(_history.rbegin()), e(_history.rend()); i != e; ++i) {
- xos << XmlTag("change")
- << XmlAttribute("from", i->_prevClusterState)
- << XmlAttribute("to", i->_newClusterState)
- << XmlAttribute("processingtime", i->_processingTime)
- << XmlEndTag();
- }
- xos << XmlEndTag()
- << XmlTag("single_bucket_requests");
+ xos << XmlTag("single_bucket_requests");
report_single_bucket_requests(xos);
xos << XmlEndTag()
<< XmlTag("delayed_single_bucket_requests");
@@ -990,14 +648,12 @@ StripeBucketDBUpdater::report_delayed_single_bucket_requests(vespalib::xml::XmlO
}
StripeBucketDBUpdater::MergingNodeRemover::MergingNodeRemover(
- const lib::ClusterState& oldState,
const lib::ClusterState& s,
uint16_t localIndex,
const lib::Distribution& distribution,
const char* upStates,
bool track_non_owned_entries)
- : _oldState(oldState),
- _state(s),
+ : _state(s),
_available_nodes(),
_nonOwnedBuckets(),
_removed_buckets(0),
@@ -1140,15 +796,6 @@ StripeBucketDBUpdater::MergingNodeRemover::storage_node_is_available(uint16_t in
return ((index < _available_nodes.size()) && _available_nodes[index]);
}
-StripeBucketDBUpdater::MergingNodeRemover::~MergingNodeRemover()
-{
- if (_removed_buckets != 0) {
- LOGBM(info, "After cluster state change %s, %zu buckets no longer "
- "have available replicas. %zu documents in these buckets will "
- "be unavailable until nodes come back up",
- _oldState.getTextualDifference(_state).c_str(),
- _removed_buckets, _removed_documents);
- }
-}
+StripeBucketDBUpdater::MergingNodeRemover::~MergingNodeRemover() = default;
} // distributor
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
index 9bc91ca78e7..1d81aa014a8 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
@@ -35,12 +35,10 @@ class StripeBucketDBUpdater final
public api::MessageHandler
{
public:
- using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
StripeBucketDBUpdater(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorStripeInterface& owner,
- DistributorMessageSender& sender,
- bool use_legacy_mode);
+ DistributorMessageSender& sender);
~StripeBucketDBUpdater() override;
void flush();
@@ -48,13 +46,10 @@ public:
void recheckBucketInfo(uint32_t nodeIdx, const document::Bucket& bucket);
void handle_activated_cluster_state_bundle();
- bool onSetSystemState(const std::shared_ptr<api::SetSystemStateCommand>& cmd) override;
- bool onActivateClusterStateVersion(const std::shared_ptr<api::ActivateClusterStateVersionCommand>& cmd) override;
bool onRequestBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply> & repl) override;
bool onMergeBucketReply(const std::shared_ptr<api::MergeBucketReply>& reply) override;
bool onNotifyBucketChange(const std::shared_ptr<api::NotifyBucketChangeCommand>&) override;
void resendDelayedMessages();
- void storageDistributionChanged();
vespalib::string reportXmlStatus(vespalib::xml::XmlOutputStream&, const framework::HttpUrlPath&) const;
vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
@@ -68,16 +63,6 @@ public:
const DistributorNodeContext& node_context() const { return _node_ctx; }
DistributorStripeOperationContext& operation_context() { return _op_ctx; }
- /**
- * Returns whether the current PendingClusterState indicates that there has
- * been a transfer of bucket ownership amongst the distributors in the
- * cluster. This method only makes sense to call when _pending_cluster_state
- * is active, such as from within a enableClusterState() call.
- */
- bool bucketOwnershipHasChanged() const {
- return ((_pendingClusterState.get() != nullptr)
- && _pendingClusterState->hasBucketOwnershipTransfer());
- }
void set_stale_reads_enabled(bool enabled) noexcept {
_stale_reads_enabled.store(enabled, std::memory_order_relaxed);
}
@@ -144,8 +129,6 @@ private:
};
friend class DistributorStripeTestUtil;
- friend class DistributorTestUtil;
- friend class TopLevelDistributorTestUtil; // TODO STRIPE remove asap
// TODO refactor and rewire to avoid needing this direct meddling
friend class DistributorStripe;
@@ -156,13 +139,9 @@ private:
bool shouldDeferStateEnabling() const noexcept;
bool hasPendingClusterState() const;
- bool pendingClusterStateAccepted(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
bool processSingleBucketInfoReply(const std::shared_ptr<api::RequestBucketInfoReply>& repl);
void handleSingleBucketInfoFailure(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
const BucketRequest& req);
- bool isPendingClusterStateCompleted() const;
- void processCompletedPendingClusterState();
- void activatePendingClusterState();
void mergeBucketInfoWithDatabase(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
const BucketRequest& req);
void convertBucketInfoToBucketList(const std::shared_ptr<api::RequestBucketInfoReply>& repl,
@@ -171,8 +150,6 @@ private:
const std::shared_ptr<MergeReplyGuard>& mergeReply);
void addBucketInfoForNode(const BucketDatabase::Entry& e, uint16_t node,
BucketListMerger::BucketList& existing) const;
- void ensureTransitionTimerStarted();
- void completeTransitionTimer();
void clearReadOnlyBucketRepoDatabases();
/**
* Adds all buckets contained in the bucket database
@@ -189,15 +166,10 @@ private:
*/
void updateDatabase(document::BucketSpace bucketSpace, uint16_t node, BucketListMerger& merger);
- void updateState(const lib::ClusterState& oldState, const lib::ClusterState& newState);
-
void update_read_snapshot_before_db_pruning();
- void removeSuperfluousBuckets(const lib::ClusterStateBundle& newState,
- bool is_distribution_config_change);
void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state);
void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state);
- // TODO STRIPE only called when stripe guard is held
PotentialDataLossReport remove_superfluous_buckets(document::BucketSpace bucket_space,
const lib::ClusterState& new_state,
bool is_distribution_change);
@@ -209,26 +181,15 @@ private:
const std::unordered_set<uint16_t>& outdated_nodes,
const std::vector<dbtransition::Entry>& entries);
- void replyToPreviousPendingClusterStateIfAny();
- void replyToActivationWithActualVersion(
- const api::ActivateClusterStateVersionCommand& cmd,
- uint32_t actualVersion);
-
- void enableCurrentClusterStateBundleInDistributor();
- void addCurrentStateToClusterStateHistory();
void enqueueRecheckUntilPendingStateEnabled(uint16_t node, const document::Bucket&);
void sendAllQueuedBucketRechecks();
- void maybe_inject_simulated_db_pruning_delay();
- void maybe_inject_simulated_db_merging_delay();
-
/**
Removes all copies of buckets that are on nodes that are down.
*/
class MergingNodeRemover : public BucketDatabase::MergingProcessor {
public:
- MergingNodeRemover(const lib::ClusterState& oldState,
- const lib::ClusterState& s,
+ MergingNodeRemover(const lib::ClusterState& s,
uint16_t localIndex,
const lib::Distribution& distribution,
const char* upStates,
@@ -250,44 +211,38 @@ private:
bool has_unavailable_nodes(const BucketDatabase::Entry&) const;
bool storage_node_is_available(uint16_t index) const noexcept;
- const lib::ClusterState _oldState;
- const lib::ClusterState _state;
- std::vector<bool> _available_nodes;
+ const lib::ClusterState _state;
+ std::vector<bool> _available_nodes;
std::vector<BucketDatabase::Entry> _nonOwnedBuckets;
- size_t _removed_buckets;
- size_t _removed_documents;
-
- uint16_t _localIndex;
- const lib::Distribution& _distribution;
- const char* _upStates;
- bool _track_non_owned_entries;
-
- mutable uint64_t _cachedDecisionSuperbucket;
- mutable bool _cachedOwned;
+ size_t _removed_buckets;
+ size_t _removed_documents;
+ uint16_t _localIndex;
+ const lib::Distribution& _distribution;
+ const char* _upStates;
+ bool _track_non_owned_entries;
+ mutable uint64_t _cachedDecisionSuperbucket;
+ mutable bool _cachedOwned;
};
- const DistributorNodeContext& _node_ctx;
- DistributorStripeOperationContext& _op_ctx;
- DistributorStripeInterface& _distributor_interface;
- std::deque<std::pair<framework::MilliSecTime, BucketRequest> > _delayedRequests;
- std::map<uint64_t, BucketRequest> _sentMessages;
- std::unique_ptr<PendingClusterState> _pendingClusterState;
- std::list<PendingClusterState::Summary> _history;
- DistributorMessageSender& _sender;
- std::set<EnqueuedBucketRecheck> _enqueuedRechecks;
- OutdatedNodesMap _outdatedNodesMap;
- framework::MilliSecTimer _transitionTimer;
- std::atomic<bool> _stale_reads_enabled;
using DistributionContexts = std::unordered_map<document::BucketSpace,
std::shared_ptr<BucketSpaceDistributionContext>,
document::BucketSpace::hash>;
- DistributionContexts _active_distribution_contexts;
using DbGuards = std::unordered_map<document::BucketSpace,
std::shared_ptr<BucketDatabase::ReadGuard>,
document::BucketSpace::hash>;
- DbGuards _explicit_transition_read_guard;
- mutable std::mutex _distribution_context_mutex;
- bool _use_legacy_mode;
+ using DelayedRequestsQueue = std::deque<std::pair<framework::MilliSecTime, BucketRequest>>;
+
+ const DistributorNodeContext& _node_ctx;
+ DistributorStripeOperationContext& _op_ctx;
+ DistributorStripeInterface& _distributor_interface;
+ DelayedRequestsQueue _delayedRequests;
+ std::map<uint64_t, BucketRequest> _sentMessages;
+ DistributorMessageSender& _sender;
+ std::set<EnqueuedBucketRecheck> _enqueuedRechecks;
+ std::atomic<bool> _stale_reads_enabled;
+ DistributionContexts _active_distribution_contexts;
+ DbGuards _explicit_transition_read_guard;
+ mutable std::mutex _distribution_context_mutex;
};
}
diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
index 191be2a6766..ac97dde6a0c 100644
--- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
@@ -58,10 +58,8 @@ TopLevelBucketDBUpdater::~TopLevelBucketDBUpdater() = default;
void
TopLevelBucketDBUpdater::propagate_active_state_bundle_internally() {
- for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
- for (auto& iter : *repo) {
- iter.second->setClusterState(_active_state_bundle.getDerivedClusterState(iter.first));
- }
+ for (auto& elem : _op_ctx.bucket_space_states()) {
+ elem.second->set_cluster_state(_active_state_bundle.getDerivedClusterState(elem.first));
}
if (_state_activation_listener) {
_state_activation_listener->on_cluster_state_bundle_activated(_active_state_bundle);
@@ -71,29 +69,22 @@ TopLevelBucketDBUpdater::propagate_active_state_bundle_internally() {
void
TopLevelBucketDBUpdater::bootstrap_distribution_config(std::shared_ptr<const lib::Distribution> distribution) {
auto global_distr = GlobalBucketSpaceDistributionConverter::convert_to_global(*distribution);
- for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
- repo->get(document::FixedBucketSpaces::default_space()).setDistribution(distribution);
- repo->get(document::FixedBucketSpaces::global_space()).setDistribution(global_distr);
- }
+ _op_ctx.bucket_space_states().get(document::FixedBucketSpaces::default_space()).set_distribution(distribution);
+ _op_ctx.bucket_space_states().get(document::FixedBucketSpaces::global_space()).set_distribution(global_distr);
// TODO STRIPE do we need to bootstrap the stripes as well here? Or do they do this on their own volition?
// ... need to take a guard if so, so can probably not be done at ctor time..?
}
void
TopLevelBucketDBUpdater::propagate_distribution_config(const BucketSpaceDistributionConfigs& configs) {
- for (auto* repo : {&_op_ctx.bucket_space_repo(), &_op_ctx.read_only_bucket_space_repo()}) {
- if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::default_space())) {
- repo->get(document::FixedBucketSpaces::default_space()).setDistribution(distr);
- }
- if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::global_space())) {
- repo->get(document::FixedBucketSpaces::global_space()).setDistribution(distr);
- }
+ if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::default_space())) {
+ _op_ctx.bucket_space_states().get(document::FixedBucketSpaces::default_space()).set_distribution(distr);
+ }
+ if (auto distr = configs.get_or_nullptr(document::FixedBucketSpaces::global_space())) {
+ _op_ctx.bucket_space_states().get(document::FixedBucketSpaces::global_space()).set_distribution(distr);
}
}
-// FIXME what about bucket DB replica update timestamp allocations?! Replace with u64 counter..?
-// Must at the very least ensure we use stripe-local TS generation for DB inserts...! i.e. no global TS
-// Or do we have to touch these at all here? Just defer all this via stripe interface?
void
TopLevelBucketDBUpdater::flush()
{
@@ -125,10 +116,8 @@ TopLevelBucketDBUpdater::remove_superfluous_buckets(
bool is_distribution_config_change)
{
const char* up_states = storage_node_up_states();
- // TODO STRIPE explicit space -> config mapping, don't get via repo
- // ... but we need to get the current cluster state per space..!
- for (auto& elem : _op_ctx.bucket_space_repo()) {
- const auto& old_cluster_state(elem.second->getClusterState());
+ for (auto& elem : _op_ctx.bucket_space_states()) {
+ const auto& old_cluster_state(elem.second->get_cluster_state());
const auto& new_cluster_state = new_state.getDerivedClusterState(elem.first);
// Running a full DB sweep is expensive, so if the cluster state transition does
@@ -209,7 +198,7 @@ TopLevelBucketDBUpdater::storage_distribution_changed(const BucketSpaceDistribut
_node_ctx.clock(),
std::move(clusterInfo),
_sender,
- _op_ctx.bucket_space_repo(), // TODO STRIPE cannot use!
+ _op_ctx.bucket_space_states(),
_op_ctx.generate_unique_timestamp());
_outdated_nodes_map = _pending_cluster_state->getOutdatedNodesMap();
@@ -266,7 +255,7 @@ TopLevelBucketDBUpdater::onSetSystemState(
_node_ctx.clock(),
std::move(clusterInfo),
_sender,
- _op_ctx.bucket_space_repo(), // TODO STRIPE remove
+ _op_ctx.bucket_space_states(),
cmd,
_outdated_nodes_map,
_op_ctx.generate_unique_timestamp()); // FIXME STRIPE must be atomic across all threads
diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
index 2eccb70fdf9..e01ea30cbda 100644
--- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
@@ -2,7 +2,7 @@
#pragma once
#include "bucketlistmerger.h"
-#include "distributor_stripe_component.h"
+#include "distributor_component.h"
#include "distributormessagesender.h"
#include "messageguard.h"
#include "operation_routing_snapshot.h"
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.cpp b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
index ae414b2a85e..5f8f05c3ee0 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.cpp
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
@@ -22,13 +22,11 @@
#include <vespa/storage/common/node_identity.h>
#include <vespa/storage/common/nodestateupdater.h>
#include <vespa/storage/config/distributorconfiguration.h>
-#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/visitor.h>
#include <vespa/storageframework/generic/status/xmlstatusreporter.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/memoryusage.h>
-#include <vespa/vespalib/util/time.h>
#include <algorithm>
#include <vespa/log/log.h>
@@ -38,16 +36,6 @@ using namespace std::chrono_literals;
namespace storage::distributor {
-/* TODO STRIPE
- * - need a DistributorStripeComponent per stripe
- * - or better, remove entirely!
- * - probably also DistributorStripeInterface since it's used to send
- * - metrics aggregation
- * - host info aggregation..!!
- * - handled if Distributor getMinReplica etc delegates to stripes?
- * - these are already thread safe
- * - status aggregation
- */
TopLevelDistributor::TopLevelDistributor(DistributorComponentRegister& compReg,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
@@ -61,23 +49,11 @@ TopLevelDistributor::TopLevelDistributor(DistributorComponentRegister& compReg,
_node_identity(node_identity),
_comp_reg(compReg),
_done_init_handler(done_init_handler),
- _use_legacy_mode(num_distributor_stripes == 0),
_done_initializing(false),
- _metrics(std::make_shared<DistributorMetricSet>()),
- _total_metrics(_use_legacy_mode ? std::shared_ptr<DistributorTotalMetrics>()
- : std::make_shared<DistributorTotalMetrics>(num_distributor_stripes)),
- _ideal_state_metrics(_use_legacy_mode ? std::make_shared<IdealStateMetricSet>()
- : std::shared_ptr<IdealStateMetricSet>()),
- _ideal_state_total_metrics(_use_legacy_mode ? std::shared_ptr<IdealStateTotalMetrics>()
- : std::make_shared<IdealStateTotalMetrics>(num_distributor_stripes)),
+ _total_metrics(std::make_shared<DistributorTotalMetrics>(num_distributor_stripes)),
+ _ideal_state_total_metrics(std::make_shared<IdealStateTotalMetrics>(num_distributor_stripes)),
_messageSender(messageSender),
_n_stripe_bits(0),
- _stripe(std::make_unique<DistributorStripe>(compReg,
- _use_legacy_mode ? *_metrics : _total_metrics->stripe(0),
- (_use_legacy_mode ? *_ideal_state_metrics
- : _ideal_state_total_metrics->stripe(0)),
- node_identity, threadPool,
- _done_init_handler, *this, *this, _use_legacy_mode, _done_initializing)),
_stripe_pool(stripe_pool),
_stripes(),
_stripe_accessor(),
@@ -105,35 +81,33 @@ TopLevelDistributor::TopLevelDistributor(DistributorComponentRegister& compReg,
_next_distribution(),
_current_internal_config_generation(_component.internal_config_generation())
{
- _component.registerMetric(_use_legacy_mode ? *_metrics : *_total_metrics);
- _ideal_state_component.registerMetric(_use_legacy_mode ? *_ideal_state_metrics :
- *_ideal_state_total_metrics);
+ _component.registerMetric(*_total_metrics);
+ _ideal_state_component.registerMetric(*_ideal_state_total_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
- if (!_use_legacy_mode) {
- assert(num_distributor_stripes == adjusted_num_stripes(num_distributor_stripes));
- _n_stripe_bits = calc_num_stripe_bits(num_distributor_stripes);
- LOG(debug, "Setting up distributor with %u stripes using %u stripe bits",
- num_distributor_stripes, _n_stripe_bits);
- _stripe_accessor = std::make_unique<MultiThreadedStripeAccessor>(_stripe_pool);
- _bucket_db_updater = std::make_unique<TopLevelBucketDBUpdater>(_component, _component,
- *this, *this,
- _component.getDistribution(),
- *_stripe_accessor,
- this);
- _stripes.emplace_back(std::move(_stripe));
- for (size_t i = 1; i < num_distributor_stripes; ++i) {
- _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg,
- _total_metrics->stripe(i),
- _ideal_state_total_metrics->stripe(i),
- node_identity, threadPool,
- _done_init_handler, *this, *this, _use_legacy_mode,
- _done_initializing, i));
- }
- _stripe_scan_stats.resize(num_distributor_stripes);
- _distributorStatusDelegate.registerStatusPage();
- _bucket_db_status_delegate = std::make_unique<StatusReporterDelegate>(compReg, *this, *_bucket_db_updater);
- _bucket_db_status_delegate->registerStatusPage();
- }
+
+ assert(num_distributor_stripes == adjusted_num_stripes(num_distributor_stripes));
+ _n_stripe_bits = calc_num_stripe_bits(num_distributor_stripes);
+ LOG(debug, "Setting up distributor with %u stripes using %u stripe bits",
+ num_distributor_stripes, _n_stripe_bits);
+ _stripe_accessor = std::make_unique<MultiThreadedStripeAccessor>(_stripe_pool);
+ _bucket_db_updater = std::make_unique<TopLevelBucketDBUpdater>(_component, _component,
+ *this, *this,
+ _component.getDistribution(),
+ *_stripe_accessor,
+ this);
+ for (size_t i = 0; i < num_distributor_stripes; ++i) {
+ _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg,
+ _total_metrics->stripe(i),
+ _ideal_state_total_metrics->stripe(i),
+ node_identity,
+ *this, *this,
+ _done_initializing, i));
+ }
+ _stripe_scan_stats.resize(num_distributor_stripes);
+ _distributorStatusDelegate.registerStatusPage();
+ _bucket_db_status_delegate = std::make_unique<StatusReporterDelegate>(compReg, *this, *_bucket_db_updater);
+ _bucket_db_status_delegate->registerStatusPage();
+
_hostInfoReporter.enableReporting(config().getEnableHostInfoReporting());
hostInfoReporterRegistrar.registerReporter(&_hostInfoReporter);
propagateDefaultDistribution(_component.getDistribution());
@@ -148,114 +122,7 @@ TopLevelDistributor::~TopLevelDistributor()
DistributorMetricSet&
TopLevelDistributor::getMetrics()
{
- return _use_legacy_mode ? *_metrics : _total_metrics->bucket_db_updater_metrics();
-}
-
-// TODO STRIPE figure out how to handle inspection functions used by tests when legacy mode no longer exists.
-// All functions below that assert on _use_legacy_mode are only currently used by tests
-
-bool
-TopLevelDistributor::isInRecoveryMode() const noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->isInRecoveryMode();
-}
-
-const PendingMessageTracker&
-TopLevelDistributor::getPendingMessageTracker() const {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getPendingMessageTracker();
-}
-
-PendingMessageTracker&
-TopLevelDistributor::getPendingMessageTracker() {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getPendingMessageTracker();
-}
-
-DistributorBucketSpaceRepo&
-TopLevelDistributor::getBucketSpaceRepo() noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getBucketSpaceRepo();
-}
-
-const DistributorBucketSpaceRepo&
-TopLevelDistributor::getBucketSpaceRepo() const noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getBucketSpaceRepo();
-}
-
-DistributorBucketSpaceRepo&
-TopLevelDistributor::getReadOnlyBucketSpaceRepo() noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getReadOnlyBucketSpaceRepo();
-}
-
-const DistributorBucketSpaceRepo&
-TopLevelDistributor::getReadyOnlyBucketSpaceRepo() const noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getReadOnlyBucketSpaceRepo();;
-}
-
-storage::distributor::DistributorStripeComponent&
-TopLevelDistributor::distributor_component() noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- // TODO STRIPE We need to grab the stripe's component since tests like to access
- // these things uncomfortably directly.
- return _stripe->_component;
-}
-
-StripeBucketDBUpdater&
-TopLevelDistributor::bucket_db_updater() {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->bucket_db_updater();
-}
-
-const StripeBucketDBUpdater&
-TopLevelDistributor::bucket_db_updater() const {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->bucket_db_updater();
-}
-
-IdealStateManager&
-TopLevelDistributor::ideal_state_manager() {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->ideal_state_manager();
-}
-
-const IdealStateManager&
-TopLevelDistributor::ideal_state_manager() const {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->ideal_state_manager();
-}
-
-ExternalOperationHandler&
-TopLevelDistributor::external_operation_handler() {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->external_operation_handler();
-}
-
-const ExternalOperationHandler&
-TopLevelDistributor::external_operation_handler() const {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->external_operation_handler();
-}
-
-BucketDBMetricUpdater&
-TopLevelDistributor::bucket_db_metric_updater() const noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->_bucketDBMetricUpdater;
-}
-
-const DistributorConfiguration&
-TopLevelDistributor::getConfig() const {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->getConfig();
-}
-
-std::chrono::steady_clock::duration
-TopLevelDistributor::db_memory_sample_interval() const noexcept {
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->db_memory_sample_interval();
+ return _total_metrics->bucket_db_updater_metrics();
}
void
@@ -272,8 +139,6 @@ TopLevelDistributor::onOpen()
{
LOG(debug, "Distributor::onOpen invoked");
setNodeStateUp();
- framework::MilliSecTime maxProcessingTime(60 * 1000);
- framework::MilliSecTime waitTime(1000);
if (_component.getDistributorConfig().startDistributorThread) {
_threadPool.addThread(*this);
_threadPool.start(_component.getThreadPool());
@@ -289,33 +154,27 @@ void TopLevelDistributor::onClose() {
// Note: In a running system this function is called by the main thread in StorageApp as part of shutdown.
// The distributor and stripe thread pools are already stopped at this point.
LOG(debug, "Distributor::onClose invoked");
- if (_use_legacy_mode) {
- _stripe->flush_and_close();
- } else {
- // Tests may run with multiple stripes but without threads (for determinism's sake),
- // so only try to flush stripes if a pool is running.
- // TODO STRIPE probably also need to flush when running tests to handle any explicit close-tests.
- if (_stripe_pool.stripe_count() > 0) {
- assert(_stripe_pool.is_stopped());
- for (auto& thread : _stripe_pool) {
- thread->stripe().flush_and_close();
- }
+ // Tests may run with multiple stripes but without threads (for determinism's sake),
+ // so only try to flush stripes if a pool is running.
+ // TODO STRIPE probably also need to flush when running tests to handle any explicit close-tests.
+ if (_stripe_pool.stripe_count() > 0) {
+ assert(_stripe_pool.is_stopped());
+ for (auto& thread : _stripe_pool) {
+ thread->stripe().flush_and_close();
}
- assert(_bucket_db_updater);
- _bucket_db_updater->flush();
}
+ assert(_bucket_db_updater);
+ _bucket_db_updater->flush();
}
void
TopLevelDistributor::start_stripe_pool()
{
- if (!_use_legacy_mode) {
- std::vector<TickableStripe*> pool_stripes;
- for (auto& stripe : _stripes) {
- pool_stripes.push_back(stripe.get());
- }
- _stripe_pool.start(pool_stripes); // If unit testing, this won't actually start any OS threads
+ std::vector<TickableStripe*> pool_stripes;
+ for (auto& stripe : _stripes) {
+ pool_stripes.push_back(stripe.get());
}
+ _stripe_pool.start(pool_stripes); // If unit testing, this won't actually start any OS threads
}
void
@@ -410,37 +269,19 @@ TopLevelDistributor::stripe_of_bucket_id(const document::BucketId& bucket_id, co
bool
TopLevelDistributor::onDown(const std::shared_ptr<api::StorageMessage>& msg)
{
- if (_use_legacy_mode) {
- return _stripe->handle_or_enqueue_message(msg);
- } else {
- if (should_be_handled_by_top_level_bucket_db_updater(*msg)) {
- dispatch_to_main_distributor_thread_queue(msg);
- return true;
- }
- auto bucket_id = get_bucket_id_for_striping(*msg, _component);
- uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, *msg);
- MBUS_TRACE(msg->getTrace(), 9,
- vespalib::make_string("Distributor::onDown(): Dispatch message to stripe %u", stripe_idx));
- bool handled = _stripes[stripe_idx]->handle_or_enqueue_message(msg);
- if (handled) {
- _stripe_pool.notify_stripe_event_has_triggered(stripe_idx);
- }
- return handled;
+ if (should_be_handled_by_top_level_bucket_db_updater(*msg)) {
+ dispatch_to_main_distributor_thread_queue(msg);
+ return true;
}
-}
-
-bool
-TopLevelDistributor::handleReply(const std::shared_ptr<api::StorageReply>& reply)
-{
- assert(_use_legacy_mode);
- return _stripe->handleReply(reply);
-}
-
-bool
-TopLevelDistributor::handleMessage(const std::shared_ptr<api::StorageMessage>& msg)
-{
- assert(_use_legacy_mode); // TODO STRIPE
- return _stripe->handleMessage(msg);
+ auto bucket_id = get_bucket_id_for_striping(*msg, _component);
+ uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, *msg);
+ MBUS_TRACE(msg->getTrace(), 9,
+ vespalib::make_string("Distributor::onDown(): Dispatch message to stripe %u", stripe_idx));
+ bool handled = _stripes[stripe_idx]->handle_or_enqueue_message(msg);
+ if (handled) {
+ _stripe_pool.notify_stripe_event_has_triggered(stripe_idx);
+ }
+ return handled;
}
const DistributorConfiguration&
@@ -461,53 +302,28 @@ TopLevelDistributor::sendReply(const std::shared_ptr<api::StorageReply>& reply)
sendUp(reply);
}
-const lib::ClusterStateBundle&
-TopLevelDistributor::getClusterStateBundle() const
-{
- assert(_use_legacy_mode); // TODO STRIPE
- // TODO STRIPE must offer a single unifying state across stripes
- return _stripe->getClusterStateBundle();
-}
-
-void
-TopLevelDistributor::enableClusterStateBundle(const lib::ClusterStateBundle& state)
-{
- assert(_use_legacy_mode); // TODO STRIPE
- // TODO STRIPE make test injection/force-function
- _stripe->enableClusterStateBundle(state);
-}
-
void
TopLevelDistributor::storageDistributionChanged()
{
- if (!_use_legacy_mode) {
- if (!_distribution || (*_component.getDistribution() != *_distribution)) {
- LOG(debug, "Distribution changed to %s, must re-fetch bucket information",
- _component.getDistribution()->toString().c_str());
- _next_distribution = _component.getDistribution(); // FIXME this is not thread safe
- } else {
- LOG(debug, "Got distribution change, but the distribution %s was the same as before: %s",
- _component.getDistribution()->toString().c_str(),
- _distribution->toString().c_str());
- }
+ if (!_distribution || (*_component.getDistribution() != *_distribution)) {
+ LOG(debug, "Distribution changed to %s, must re-fetch bucket information",
+ _component.getDistribution()->toString().c_str());
+ _next_distribution = _component.getDistribution(); // FIXME this is not thread safe
} else {
- // May happen from any thread.
- _stripe->storage_distribution_changed();
+ LOG(debug, "Got distribution change, but the distribution %s was the same as before: %s",
+ _component.getDistribution()->toString().c_str(),
+ _distribution->toString().c_str());
}
}
void
TopLevelDistributor::enableNextDistribution()
{
- if (!_use_legacy_mode) {
- if (_next_distribution) {
- _distribution = _next_distribution;
- _next_distribution = std::shared_ptr<lib::Distribution>();
- auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(_distribution);
- _bucket_db_updater->storage_distribution_changed(new_configs);
- }
- } else {
- _stripe->enableNextDistribution();
+ if (_next_distribution) {
+ _distribution = _next_distribution;
+ _next_distribution = std::shared_ptr<lib::Distribution>();
+ auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(_distribution);
+ _bucket_db_updater->storage_distribution_changed(new_configs);
}
}
@@ -517,79 +333,51 @@ void
TopLevelDistributor::propagateDefaultDistribution(
std::shared_ptr<const lib::Distribution> distribution)
{
- // TODO STRIPE cannot directly access stripe when not in legacy mode!
- if (_use_legacy_mode) {
- _stripe->propagateDefaultDistribution(std::move(distribution));
- } else {
- // Should only be called at ctor time, at which point the pool is not yet running.
- assert(_stripe_pool.stripe_count() == 0);
- auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(std::move(distribution));
- for (auto& stripe : _stripes) {
- stripe->update_distribution_config(new_configs);
- }
+ // Should only be called at ctor time, at which point the pool is not yet running.
+ assert(_stripe_pool.stripe_count() == 0);
+ auto new_configs = BucketSpaceDistributionConfigs::from_default_distribution(std::move(distribution));
+ for (auto& stripe : _stripes) {
+ stripe->update_distribution_config(new_configs);
}
}
std::unordered_map<uint16_t, uint32_t>
TopLevelDistributor::getMinReplica() const
{
- if (_use_legacy_mode) {
- return _stripe->getMinReplica();
- } else {
- std::unordered_map<uint16_t, uint32_t> result;
- for (const auto& stripe : _stripes) {
- merge_min_replica_stats(result, stripe->getMinReplica());
- }
- return result;
+ std::unordered_map<uint16_t, uint32_t> result;
+ for (const auto& stripe : _stripes) {
+ merge_min_replica_stats(result, stripe->getMinReplica());
}
+ return result;
}
BucketSpacesStatsProvider::PerNodeBucketSpacesStats
TopLevelDistributor::getBucketSpacesStats() const
{
- if (_use_legacy_mode) {
- return _stripe->getBucketSpacesStats();
- } else {
- BucketSpacesStatsProvider::PerNodeBucketSpacesStats result;
- for (const auto& stripe : _stripes) {
- merge_per_node_bucket_spaces_stats(result, stripe->getBucketSpacesStats());
- }
- return result;
+ BucketSpacesStatsProvider::PerNodeBucketSpacesStats result;
+ for (const auto& stripe : _stripes) {
+ merge_per_node_bucket_spaces_stats(result, stripe->getBucketSpacesStats());
}
+ return result;
}
SimpleMaintenanceScanner::PendingMaintenanceStats
TopLevelDistributor::pending_maintenance_stats() const {
- if (_use_legacy_mode) {
- return _stripe->pending_maintenance_stats();
- } else {
- SimpleMaintenanceScanner::PendingMaintenanceStats result;
- for (const auto& stripe : _stripes) {
- result.merge(stripe->pending_maintenance_stats());
- }
- return result;
+ SimpleMaintenanceScanner::PendingMaintenanceStats result;
+ for (const auto& stripe : _stripes) {
+ result.merge(stripe->pending_maintenance_stats());
}
+ return result;
}
void
TopLevelDistributor::propagateInternalScanMetricsToExternal()
{
- if (_use_legacy_mode) {
- _stripe->propagateInternalScanMetricsToExternal();
- } else {
- for (auto &stripe : _stripes) {
- stripe->propagateInternalScanMetricsToExternal();
- }
- _total_metrics->aggregate();
- _ideal_state_total_metrics->aggregate();
+ for (auto &stripe : _stripes) {
+ stripe->propagateInternalScanMetricsToExternal();
}
-}
-
-void
-TopLevelDistributor::scanAllBuckets()
-{
- assert(_use_legacy_mode); // TODO STRIPE
- _stripe->scanAllBuckets();
+ _total_metrics->aggregate();
+ _ideal_state_total_metrics->aggregate();
}
void
@@ -604,7 +392,6 @@ TopLevelDistributor::dispatch_to_main_distributor_thread_queue(const std::shared
void
TopLevelDistributor::fetch_external_messages()
{
- assert(!_use_legacy_mode);
assert(_fetched_messages.empty());
_fetched_messages.swap(_message_queue);
}
@@ -612,7 +399,6 @@ TopLevelDistributor::fetch_external_messages()
void
TopLevelDistributor::process_fetched_external_messages()
{
- assert(!_use_legacy_mode);
for (auto& msg : _fetched_messages) {
MBUS_TRACE(msg->getTrace(), 9, "Distributor: Processing message in main thread");
if (!msg->callHandler(*_bucket_db_updater, msg)) {
@@ -627,36 +413,25 @@ TopLevelDistributor::process_fetched_external_messages()
}
framework::ThreadWaitInfo
-TopLevelDistributor::doCriticalTick(framework::ThreadIndex idx)
+TopLevelDistributor::doCriticalTick([[maybe_unused]] framework::ThreadIndex idx)
{
_tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- if (!_use_legacy_mode) {
- enableNextDistribution();
- fetch_status_requests();
- fetch_external_messages();
- }
+ enableNextDistribution();
+ fetch_status_requests();
+ fetch_external_messages();
// Propagates any new configs down to stripe(s)
enable_next_config_if_changed();
- if (_use_legacy_mode) {
- _stripe->doCriticalTick(idx);
- _tickResult.merge(_stripe->_tickResult);
- }
return _tickResult;
}
framework::ThreadWaitInfo
-TopLevelDistributor::doNonCriticalTick(framework::ThreadIndex idx)
+TopLevelDistributor::doNonCriticalTick([[maybe_unused]] framework::ThreadIndex idx)
{
- if (_use_legacy_mode) {
- _stripe->doNonCriticalTick(idx);
- _tickResult = _stripe->_tickResult;
- } else {
- _tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
- handle_status_requests();
- process_fetched_external_messages();
- send_host_info_if_appropriate();
- _bucket_db_updater->resend_delayed_messages();
- }
+ _tickResult = framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN;
+ handle_status_requests();
+ process_fetched_external_messages();
+ send_host_info_if_appropriate();
+ _bucket_db_updater->resend_delayed_messages();
return _tickResult;
}
@@ -666,20 +441,13 @@ TopLevelDistributor::enable_next_config_if_changed()
// Only lazily trigger a config propagation and internal update if something has _actually changed_.
if (_component.internal_config_generation() != _current_internal_config_generation) {
_total_config = _component.total_distributor_config_sp();
- if (!_use_legacy_mode) {
+ {
auto guard = _stripe_accessor->rendezvous_and_hold_all();
guard->update_total_distributor_config(_component.total_distributor_config_sp());
- } else {
- _stripe->update_total_distributor_config(_component.total_distributor_config_sp());
}
_hostInfoReporter.enableReporting(config().getEnableHostInfoReporting());
_current_internal_config_generation = _component.internal_config_generation();
}
- if (_use_legacy_mode) {
- // TODO STRIPE remove these once tests are fixed to trigger reconfig properly
- _hostInfoReporter.enableReporting(getConfig().getEnableHostInfoReporting());
- _stripe->enableNextConfig(); // TODO STRIPE avoid redundant call
- }
}
void
@@ -688,7 +456,6 @@ TopLevelDistributor::notify_stripe_wants_to_send_host_info(uint16_t stripe_index
// TODO STRIPE assert(_done_initializing); (can't currently do due to some unit test restrictions; uncomment and find out)
LOG(debug, "Stripe %u has signalled an intent to send host info out-of-band", stripe_index);
std::lock_guard lock(_stripe_scan_notify_mutex);
- assert(!_use_legacy_mode);
assert(stripe_index < _stripe_scan_stats.size());
auto& stats = _stripe_scan_stats[stripe_index];
stats.wants_to_send_host_info = true;
@@ -735,7 +502,6 @@ TopLevelDistributor::send_host_info_if_appropriate()
void
TopLevelDistributor::on_cluster_state_bundle_activated(const lib::ClusterStateBundle& new_bundle)
{
- assert(!_use_legacy_mode);
lib::Node my_node(lib::NodeType::DISTRIBUTOR, getDistributorIndex());
if (!_done_initializing && (new_bundle.getBaselineClusterState()->getNodeState(my_node).getState() == lib::State::UP)) {
_done_initializing = true;
@@ -780,7 +546,6 @@ TopLevelDistributor::work_was_done() const noexcept
vespalib::string
TopLevelDistributor::getReportContentType(const framework::HttpUrlPath& path) const
{
- assert(!_use_legacy_mode);
if (path.hasAttribute("page")) {
if (path.getAttribute("page") == "buckets") {
return "text/html";
@@ -792,18 +557,10 @@ TopLevelDistributor::getReportContentType(const framework::HttpUrlPath& path) co
}
}
-std::string
-TopLevelDistributor::getActiveIdealStateOperations() const
-{
- assert(_use_legacy_mode);
- return _stripe->getActiveIdealStateOperations();
-}
-
bool
TopLevelDistributor::reportStatus(std::ostream& out,
const framework::HttpUrlPath& path) const
{
- assert(!_use_legacy_mode);
if (!path.hasAttribute("page") || path.getAttribute("page") == "buckets") {
framework::PartlyHtmlStatusReporter htmlReporter(*this);
htmlReporter.reportHtmlHeader(out, path);
@@ -816,7 +573,7 @@ TopLevelDistributor::reportStatus(std::ostream& out,
} else {
auto guard = _stripe_accessor->rendezvous_and_hold_all();
const auto& op_ctx = _component;
- for (const auto& space : op_ctx.bucket_space_repo()) {
+ for (const auto& space : op_ctx.bucket_space_states()) {
out << "<h2>" << document::FixedBucketSpaces::to_string(space.first) << " - " << space.first << "</h2>\n";
guard->report_bucket_db_status(space.first, out);
}
@@ -842,7 +599,6 @@ TopLevelDistributor::reportStatus(std::ostream& out,
bool
TopLevelDistributor::handleStatusRequest(const DelegatedStatusRequest& request) const
{
- assert(!_use_legacy_mode);
auto wrappedRequest = std::make_shared<DistributorStatus>(request);
{
framework::TickingLockGuard guard(_threadPool.freezeCriticalTicks());
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.h b/storage/src/vespa/storage/distributor/top_level_distributor.h
index 81a30accf01..420d0df08ed 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.h
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.h
@@ -101,8 +101,6 @@ public:
void storageDistributionChanged() override;
- bool handleReply(const std::shared_ptr<api::StorageReply>& reply);
-
// StatusReporter implementation
vespalib::string getReportContentType(const framework::HttpUrlPath&) const override;
bool reportStatus(std::ostream&, const framework::HttpUrlPath&) const override;
@@ -134,43 +132,12 @@ public:
private:
friend class DistributorStripeTestUtil;
- friend class DistributorTestUtil;
friend class TopLevelDistributorTestUtil;
- friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
friend struct DistributorStripeTest;
- friend struct LegacyDistributorTest;
friend struct TopLevelDistributorTest;
void setNodeStateUp();
- bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
-
- /**
- * Enables a new cluster state. Used by tests to bypass TopLevelBucketDBUpdater.
- */
- void enableClusterStateBundle(const lib::ClusterStateBundle& clusterStateBundle);
-
- // Accessors used by tests
- std::string getActiveIdealStateOperations() const;
- const lib::ClusterStateBundle& getClusterStateBundle() const;
- const DistributorConfiguration& getConfig() const;
- bool isInRecoveryMode() const noexcept;
- PendingMessageTracker& getPendingMessageTracker();
- const PendingMessageTracker& getPendingMessageTracker() const;
- DistributorBucketSpaceRepo& getBucketSpaceRepo() noexcept;
- const DistributorBucketSpaceRepo& getBucketSpaceRepo() const noexcept;
- DistributorBucketSpaceRepo& getReadOnlyBucketSpaceRepo() noexcept;
- const DistributorBucketSpaceRepo& getReadyOnlyBucketSpaceRepo() const noexcept;
- storage::distributor::DistributorStripeComponent& distributor_component() noexcept;
- std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
-
- StripeBucketDBUpdater& bucket_db_updater();
- const StripeBucketDBUpdater& bucket_db_updater() const;
- IdealStateManager& ideal_state_manager();
- const IdealStateManager& ideal_state_manager() const;
- ExternalOperationHandler& external_operation_handler();
- const ExternalOperationHandler& external_operation_handler() const;
- BucketDBMetricUpdater& bucket_db_metric_updater() const noexcept;
/**
* Return a copy of the latest min replica data, see MinReplicaProvider.
@@ -185,7 +152,6 @@ private:
* Takes metric lock.
*/
void propagateInternalScanMetricsToExternal();
- void scanAllBuckets();
void enable_next_config_if_changed();
void fetch_status_requests();
void handle_status_requests();
@@ -217,15 +183,11 @@ private:
const NodeIdentity _node_identity;
DistributorComponentRegister& _comp_reg;
DoneInitializeHandler& _done_init_handler;
- const bool _use_legacy_mode;
bool _done_initializing;
- std::shared_ptr<DistributorMetricSet> _metrics;
std::shared_ptr<DistributorTotalMetrics> _total_metrics;
- std::shared_ptr<IdealStateMetricSet> _ideal_state_metrics;
std::shared_ptr<IdealStateTotalMetrics> _ideal_state_total_metrics;
ChainedMessageSender* _messageSender;
uint8_t _n_stripe_bits;
- std::unique_ptr<DistributorStripe> _stripe;
DistributorStripePool& _stripe_pool;
std::vector<std::unique_ptr<DistributorStripe>> _stripes;
std::unique_ptr<StripeAccessor> _stripe_accessor;
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
index 614d11aabfa..623de8d85d1 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchenvironment.cpp
@@ -16,8 +16,8 @@ __thread SearchEnvironment::EnvMap * SearchEnvironment::_localEnvMap=0;
SearchEnvironment::Env::Env(const vespalib::string & muffens, const config::ConfigUri & configUri, Fast_NormalizeWordFolder & wf) :
_configId(configUri.getConfigId()),
_configurer(std::make_unique<config::SimpleConfigRetriever>(createKeySet(configUri.getConfigId()), configUri.getContext()), this),
- _vsmAdapter(new VSMAdapter(muffens, _configId, wf)),
- _rankManager(new RankManager(_vsmAdapter.get()))
+ _vsmAdapter(std::make_unique<VSMAdapter>(muffens, _configId, wf)),
+ _rankManager(std::make_unique<RankManager>(_vsmAdapter.get()))
{
_configurer.start();
diff --git a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
index 5e1e95b4681..a1ec153d7da 100644
--- a/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
+++ b/streamingvisitors/src/vespa/searchvisitor/searchvisitor.cpp
@@ -180,7 +180,7 @@ SearchVisitor::SearchVisitor(StorageComponent& component,
_hitCount(0),
_hitsRejectedCount(0),
_query(),
- _queryResult(new documentapi::QueryResultMessage()),
+ _queryResult(std::make_unique<documentapi::QueryResultMessage>()),
_fieldSearcherMap(),
_docTypeMapping(),
_fieldSearchSpecMap(),
@@ -192,10 +192,10 @@ SearchVisitor::SearchVisitor(StorageComponent& component,
_groupingList(),
_attributeFields(),
_sortList(),
- _searchBuffer(new vsm::SearcherBuf()),
+ _searchBuffer(std::make_shared<vsm::SearcherBuf>()),
_tmpSortBuffer(256),
- _documentIdAttributeBacking(new search::SingleStringExtAttribute("[docid]") ),
- _rankAttributeBacking(new search::SingleFloatExtAttribute("[rank]") ),
+ _documentIdAttributeBacking(std::make_shared<search::SingleStringExtAttribute>("[docid]") ),
+ _rankAttributeBacking(std::make_shared<search::SingleFloatExtAttribute>("[rank]") ),
_documentIdAttribute(dynamic_cast<search::SingleStringExtAttribute &>(*_documentIdAttributeBacking)),
_rankAttribute(dynamic_cast<search::SingleFloatExtAttribute &>(*_rankAttributeBacking)),
_shouldFillRankAttribute(false),
@@ -680,8 +680,8 @@ SearchVisitor::setupScratchDocument(const StringFieldIdTMap & fieldsInQuery)
void
SearchVisitor::setupDocsumObjects()
{
- std::unique_ptr<DocsumFilter> docsumFilter(new DocsumFilter(_vsmAdapter->getDocsumTools(),
- _rankController.getRankProcessor()->getHitCollector()));
+ auto docsumFilter = std::make_unique<DocsumFilter>(_vsmAdapter->getDocsumTools(),
+ _rankController.getRankProcessor()->getHitCollector());
docsumFilter->init(_fieldSearchSpecMap.nameIdMap(), *_fieldPathMap);
docsumFilter->setSnippetModifiers(_snippetModifierManager.getModifiers());
_summaryGenerator.setFilter(std::move(docsumFilter));
@@ -815,7 +815,7 @@ SearchVisitor::setupGrouping(const std::vector<char> & groupingBlob)
uint32_t numGroupings(0);
is >> numGroupings;
for(size_t i(0); i < numGroupings; i++) {
- std::unique_ptr<Grouping> ag(new Grouping());
+ auto ag = std::make_unique<Grouping>();
ag->deserialize(is);
GroupingList::value_type groupingPtr(ag.release());
Grouping & grouping = *groupingPtr;
@@ -882,7 +882,7 @@ SearchVisitor::handleDocuments(const document::BucketId&,
const document::DocumentType* defaultDocType = _docTypeMapping.getDefaultDocumentType();
assert(defaultDocType);
for (const auto & entry : entries) {
- StorageDocument::UP document(new StorageDocument(entry->releaseDocument(), _fieldPathMap, highestFieldNo));
+ auto document = std::make_unique<StorageDocument>(entry->releaseDocument(), _fieldPathMap, highestFieldNo);
try {
if (defaultDocType != nullptr
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
index ec8adfba67d..769a2a54c95 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
@@ -280,14 +280,19 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
}
@Override
- public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry) {
+ public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry, Optional<String> reason) {
URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s/member/%s/decision", athenzRole.domain().getName(), athenzRole.roleName(), athenzUser.getFullName()));
MembershipEntity membership = new MembershipEntity.RoleMembershipEntity(athenzUser.getFullName(), true, athenzRole.roleName(), Long.toString(expiry.getEpochSecond()));
- HttpUriRequest request = RequestBuilder.put()
+
+ var requestBuilder = RequestBuilder.put()
.setUri(uri)
- .setEntity(toJsonStringEntity(membership))
- .build();
- execute(request, response -> readEntity(response, Void.class));
+ .setEntity(toJsonStringEntity(membership));
+
+ if (reason.filter(s -> !s.isBlank()).isPresent()) {
+ requestBuilder.addHeader("Y-Audit-Ref", reason.get());
+ }
+
+ execute(requestBuilder.build(), response -> readEntity(response, Void.class));
}
@Override
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
index 548b95ee4a4..b1c26923113 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
@@ -60,7 +60,7 @@ public interface ZmsClient extends AutoCloseable {
Map<AthenzUser, String> listPendingRoleApprovals(AthenzRole athenzRole);
- void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry);
+ void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry, Optional<String> reason);
List<AthenzIdentity> listMembers(AthenzRole athenzRole);
diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json
index 199291ac276..e5d5b8ba5b6 100644
--- a/vespajlib/abi-spec.json
+++ b/vespajlib/abi-spec.json
@@ -658,6 +658,7 @@
"public static void logAndDie(java.lang.String, boolean)",
"public static void logAndDie(java.lang.String, java.lang.Throwable)",
"public static void logAndDie(java.lang.String, java.lang.Throwable, boolean)",
+ "public static void dumpHeap(java.lang.String, boolean)",
"public static void dumpThreads()"
],
"fields": []
diff --git a/vespajlib/src/main/java/com/yahoo/lang/MutableBoolean.java b/vespajlib/src/main/java/com/yahoo/lang/MutableBoolean.java
index 17501b17bd0..877620547ba 100644
--- a/vespajlib/src/main/java/com/yahoo/lang/MutableBoolean.java
+++ b/vespajlib/src/main/java/com/yahoo/lang/MutableBoolean.java
@@ -22,6 +22,12 @@ public class MutableBoolean {
public void orSet(boolean value) { this.value |= value; }
+ public boolean getAndSet(boolean newValue) {
+ boolean prev = value;
+ value = newValue;
+ return prev;
+ }
+
@Override
public String toString() { return Boolean.toString(value); }
diff --git a/vespajlib/src/main/java/com/yahoo/protect/Process.java b/vespajlib/src/main/java/com/yahoo/protect/Process.java
index 4d2fafd4665..f3674f665b2 100644
--- a/vespajlib/src/main/java/com/yahoo/protect/Process.java
+++ b/vespajlib/src/main/java/com/yahoo/protect/Process.java
@@ -1,7 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.protect;
+import com.sun.management.HotSpotDiagnosticMXBean;
+import javax.management.MBeanServer;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
@@ -70,6 +74,16 @@ public final class Process {
}
}
+ public static void dumpHeap(String filePath, boolean live) throws IOException {
+ log.log(Level.INFO, "Will dump the heap to '" + filePath + "', with the live = " + live);
+ getHotspotMXBean().dumpHeap(filePath, live);
+ }
+
+ private static HotSpotDiagnosticMXBean getHotspotMXBean() throws IOException {
+ MBeanServer server = ManagementFactory.getPlatformMBeanServer();
+ return ManagementFactory.newPlatformMXBeanProxy(
+ server, "com.sun.management:type=HotSpotDiagnostic", HotSpotDiagnosticMXBean.class);
+ }
public static void dumpThreads() {
boolean alreadyDumpingThreads = busyDumpingThreads.getAndSet(true);