summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArne H Juul <arnej@yahooinc.com>2021-11-16 13:00:53 +0000
committerArne H Juul <arnej@yahooinc.com>2021-11-16 13:00:53 +0000
commitfeeaa3eda037278bb4f483e2000606cd12afc582 (patch)
tree39d0536cba71fecbaefec85a7a9f8f85c3940606
parent159dc93308eccd375ea33f3bf6d3d6ab1f5813fc (diff)
parentbf366caae591f98c0dd002574f3f7de465746fd6 (diff)
Merge branch 'master' into arnej/add-feature-flag-to-ignore-stack-sizes
-rw-r--r--CMakeLists.txt3
-rw-r--r--build_settings.cmake13
-rw-r--r--client/go/auth/auth.go175
-rw-r--r--client/go/auth/secrets.go24
-rw-r--r--client/go/auth/token.go68
-rw-r--r--client/go/cli/cli.go355
-rw-r--r--client/go/cmd/config.go4
-rw-r--r--client/go/cmd/helpers.go37
-rw-r--r--client/go/cmd/login.go34
-rw-r--r--client/go/go.mod15
-rw-r--r--client/go/go.sum58
-rw-r--r--client/go/util/spinner.go63
-rw-r--r--client/go/vespa/deploy.go20
-rw-r--r--client/go/vespa/target.go79
-rw-r--r--client/go/vespa/target_test.go7
-rw-r--r--client/pom.xml13
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java17
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java41
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java31
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java4
-rwxr-xr-xconfigserver/src/main/sh/start-configserver1
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java3
-rwxr-xr-xcontainer-disc/src/main/sh/vespa-start-container-daemon.sh1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java23
-rw-r--r--controller-server/src/main/resources/configdefinitions/vespa.hosted.controller.maven.repository.config.maven-repository.def2
-rw-r--r--default_build_settings.cmake3
-rw-r--r--dist/vespa.spec1
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDistributionConnectionPool.java43
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java32
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java6
-rw-r--r--http-utils/pom.xml12
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java11
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java27
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/AbstractProducer.java68
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java20
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/TestProcessFactory.java6
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java3
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java35
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java30
-rw-r--r--parent/pom.xml1
-rw-r--r--searchcore/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp67
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp23
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp60
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp6
-rw-r--r--searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp117
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lid_allocator/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lid_allocator/lid_allocator_test.cpp162
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lid_state_vector/CMakeLists.txt10
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lid_state_vector/lid_state_vector_test.cpp173
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp26
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp15
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.cpp18
-rw-r--r--searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/bucketdb/remove_batch_entry.h36
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.cpp70
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/partial_result.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/result_processor.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp58
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h10
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/thread_utils.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h4
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h5
-rw-r--r--searchlib/src/tests/attribute/searchcontext/searchcontext_test.cpp34
-rw-r--r--searchlib/src/tests/grouping/grouping_test.cpp2
-rw-r--r--searchlib/src/tests/groupingengine/groupingengine_benchmark.cpp2
-rw-r--r--searchlib/src/tests/groupingengine/groupingengine_test.cpp2
-rw-r--r--searchlib/src/tests/hitcollector/hitcollector_test.cpp4
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp21
-rw-r--r--searchlib/src/tests/sortresults/sorttest.cpp6
-rw-r--r--searchlib/src/tests/sortspec/multilevelsort.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/grouping.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/loadedvalue.h3
-rw-r--r--searchlib/src/vespa/searchlib/common/sortresults.cpp20
-rw-r--r--searchlib/src/vespa/searchlib/fef/termfieldmatchdata.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_remover.h1
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h4
-rw-r--r--security-utils/pom.xml12
-rw-r--r--staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp13
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp85
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h20
-rwxr-xr-xstandalone-container/src/main/sh/standalone-container.sh1
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt1
-rw-r--r--storage/src/tests/distributor/blockingoperationstartertest.cpp3
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test.cpp17
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.cpp9
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.h2
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp62
-rw-r--r--storage/src/tests/distributor/mock_tickable_stripe.h4
-rw-r--r--storage/src/tests/distributor/node_supported_features_repo_test.cpp52
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp53
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.cpp6
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp58
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.cpp2
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h7
-rw-r--r--storage/src/vespa/storage/config/stor-distributormanager.def7
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_operation_context.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h4
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h10
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_interface.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h2
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h2
-rw-r--r--storage/src/vespa/storage/distributor/node_supported_features.h19
-rw-r--r--storage/src/vespa/storage/distributor/node_supported_features_repo.cpp37
-rw-r--r--storage/src/vespa/storage/distributor/node_supported_features_repo.h37
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/pendingclusterstate.h13
-rw-r--r--storage/src/vespa/storage/distributor/stripe_access_guard.h4
-rw-r--r--storage/src/vespa/storage/distributor/tickable_stripe.h4
-rw-r--r--storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h2
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.cpp90
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.h36
-rw-r--r--storageapi/src/tests/mbusprot/storageprotocoltest.cpp12
-rw-r--r--storageapi/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto7
-rw-r--r--storageapi/src/vespa/storageapi/mbusprot/protocolserialization7.cpp11
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.cpp6
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.h27
-rw-r--r--vespa-feed-client-cli/pom.xml11
-rw-r--r--vespa-feed-client/pom.xml11
-rw-r--r--vespa-hadoop/pom.xml30
-rw-r--r--vespa-http-client/pom.xml11
-rw-r--r--vespamalloc/src/vespamalloc/malloc/allocchunk.h16
161 files changed, 2689 insertions, 790 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ea469733695..863ce8dd9de 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -150,8 +150,7 @@ add_subdirectory(vespajlib)
add_subdirectory(vespalib)
add_subdirectory(vespalog)
if(NOT CMAKE_HOST_SYSTEM_NAME STREQUAL "Darwin" AND
- NOT DEFINED VESPA_USE_SANITIZER AND
- CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ NOT DEFINED VESPA_USE_SANITIZER)
add_subdirectory(vespamalloc)
endif()
add_subdirectory(vsm)
diff --git a/build_settings.cmake b/build_settings.cmake
index a0b9ce5160a..6c3740f1bce 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -35,6 +35,7 @@ endif()
# Warnings that are specific to C++ compilation
# Note: this is not a union of C_WARN_OPTS, since CMAKE_CXX_FLAGS already includes CMAKE_C_FLAGS, which in turn includes C_WARN_OPTS transitively
+set(VESPA_ATOMIC_LIB "atomic")
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
set(CXX_SPECIFIC_WARN_OPTS "-Wnon-virtual-dtor -Wformat-security -Wno-overloaded-virtual")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-delete-null-pointer-checks -fsized-deallocation")
@@ -43,7 +44,6 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" ST
set(VESPA_GCC_LIB "")
set(VESPA_STDCXX_FS_LIB "")
else()
- set(VESPA_ATOMIC_LIB "atomic")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
set(VESPA_GCC_LIB "gcc")
set(VESPA_STDCXX_FS_LIB "stdc++fs")
@@ -54,17 +54,6 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" ST
endif()
else()
set(CXX_SPECIFIC_WARN_OPTS "-Wnoexcept -Wsuggest-override -Wnon-virtual-dtor -Wformat-security")
- if(VESPA_OS_DISTRO_COMBINED STREQUAL "centos 8" OR
- (VESPA_OS_DISTRO STREQUAL "rocky" AND
- VESPA_OS_DISTRO_VERSION VERSION_GREATER_EQUAL "8" AND
- VESPA_OS_DISTRO_VERSION VERSION_LESS "9") OR
- (VESPA_OS_DISTRO STREQUAL "rhel" AND
- VESPA_OS_DISTRO_VERSION VERSION_GREATER_EQUAL "8" AND
- VESPA_OS_DISTRO_VERSION VERSION_LESS "9"))
- set(VESPA_ATOMIC_LIB "")
- else()
- set(VESPA_ATOMIC_LIB "atomic")
- endif()
set(VESPA_GCC_LIB "gcc")
set(VESPA_STDCXX_FS_LIB "stdc++fs")
endif()
diff --git a/client/go/auth/auth.go b/client/go/auth/auth.go
new file mode 100644
index 00000000000..397e410924d
--- /dev/null
+++ b/client/go/auth/auth.go
@@ -0,0 +1,175 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package auth
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+const (
+ audiencePath = "/api/v2/"
+ waitThresholdInSeconds = 3
+ // SecretsNamespace namespace used to set/get values from the keychain
+ SecretsNamespace = "vespa-cli"
+)
+
+var requiredScopes = []string{"openid", "offline_access"}
+
+type Authenticator struct {
+ Audience string
+ ClientID string
+ DeviceCodeEndpoint string
+ OauthTokenEndpoint string
+}
+
+// SecretStore provides access to stored sensitive data.
+type SecretStore interface {
+ // Get gets the secret
+ Get(namespace, key string) (string, error)
+ // Delete removes the secret
+ Delete(namespace, key string) error
+}
+
+type Result struct {
+ Tenant string
+ Domain string
+ RefreshToken string
+ AccessToken string
+ ExpiresIn int64
+}
+
+type State struct {
+ DeviceCode string `json:"device_code"`
+ UserCode string `json:"user_code"`
+ VerificationURI string `json:"verification_uri_complete"`
+ ExpiresIn int `json:"expires_in"`
+ Interval int `json:"interval"`
+}
+
+// RequiredScopes returns the scopes used for login.
+func RequiredScopes() []string { return requiredScopes }
+
+func (s *State) IntervalDuration() time.Duration {
+ return time.Duration(s.Interval+waitThresholdInSeconds) * time.Second
+}
+
+// Start kicks-off the device authentication flow
+// by requesting a device code from Auth0,
+// The returned state contains the URI for the next step of the flow.
+func (a *Authenticator) Start(ctx context.Context) (State, error) {
+ s, err := a.getDeviceCode(ctx)
+ if err != nil {
+ return State{}, fmt.Errorf("cannot get device code: %w", err)
+ }
+ return s, nil
+}
+
+// Wait waits until the user is logged in on the browser.
+func (a *Authenticator) Wait(ctx context.Context, state State) (Result, error) {
+ t := time.NewTicker(state.IntervalDuration())
+ for {
+ select {
+ case <-ctx.Done():
+ return Result{}, ctx.Err()
+ case <-t.C:
+ data := url.Values{
+ "client_id": {a.ClientID},
+ "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"},
+ "device_code": {state.DeviceCode},
+ }
+ r, err := http.PostForm(a.OauthTokenEndpoint, data)
+ if err != nil {
+ return Result{}, fmt.Errorf("cannot get device code: %w", err)
+ }
+ defer r.Body.Close()
+
+ var res struct {
+ AccessToken string `json:"access_token"`
+ IDToken string `json:"id_token"`
+ RefreshToken string `json:"refresh_token"`
+ Scope string `json:"scope"`
+ ExpiresIn int64 `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ Error *string `json:"error,omitempty"`
+ ErrorDescription string `json:"error_description,omitempty"`
+ }
+
+ err = json.NewDecoder(r.Body).Decode(&res)
+ if err != nil {
+ return Result{}, fmt.Errorf("cannot decode response: %w", err)
+ }
+
+ if res.Error != nil {
+ if *res.Error == "authorization_pending" {
+ continue
+ }
+ return Result{}, errors.New(res.ErrorDescription)
+ }
+
+ ten, domain, err := parseTenant(res.AccessToken)
+ if err != nil {
+ return Result{}, fmt.Errorf("cannot parse tenant from the given access token: %w", err)
+ }
+
+ return Result{
+ RefreshToken: res.RefreshToken,
+ AccessToken: res.AccessToken,
+ ExpiresIn: res.ExpiresIn,
+ Tenant: ten,
+ Domain: domain,
+ }, nil
+ }
+ }
+}
+
+func (a *Authenticator) getDeviceCode(ctx context.Context) (State, error) {
+ data := url.Values{
+ "client_id": {a.ClientID},
+ "scope": {strings.Join(requiredScopes, " ")},
+ "audience": {a.Audience},
+ }
+ r, err := http.PostForm(a.DeviceCodeEndpoint, data)
+ if err != nil {
+ return State{}, fmt.Errorf("cannot get device code: %w", err)
+ }
+ defer r.Body.Close()
+ var res State
+ err = json.NewDecoder(r.Body).Decode(&res)
+ if err != nil {
+ return State{}, fmt.Errorf("cannot decode response: %w", err)
+ }
+ return res, nil
+}
+
+func parseTenant(accessToken string) (tenant, domain string, err error) {
+ parts := strings.Split(accessToken, ".")
+ v, err := base64.RawURLEncoding.DecodeString(parts[1])
+ if err != nil {
+ return "", "", err
+ }
+ var payload struct {
+ AUDs []string `json:"aud"`
+ }
+ if err := json.Unmarshal(v, &payload); err != nil {
+ return "", "", err
+ }
+ for _, aud := range payload.AUDs {
+ u, err := url.Parse(aud)
+ if err != nil {
+ return "", "", err
+ }
+ if u.Path == audiencePath {
+ parts := strings.Split(u.Host, ".")
+ return parts[0], u.Host, nil
+ }
+ }
+ return "", "", fmt.Errorf("audience not found for %s", audiencePath)
+}
diff --git a/client/go/auth/secrets.go b/client/go/auth/secrets.go
new file mode 100644
index 00000000000..e38d8c56595
--- /dev/null
+++ b/client/go/auth/secrets.go
@@ -0,0 +1,24 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package auth
+
+import (
+ "github.com/zalando/go-keyring"
+)
+
+type Keyring struct{}
+
+// Set sets the given key/value pair with the given namespace.
+func (k *Keyring) Set(namespace, key, value string) error {
+ return keyring.Set(namespace, key, value)
+}
+
+// Get gets a value for the given namespace and key.
+func (k *Keyring) Get(namespace, key string) (string, error) {
+ return keyring.Get(namespace, key)
+}
+
+// Delete deletes a value for the given namespace and key.
+func (k *Keyring) Delete(namespace, key string) error {
+ return keyring.Delete(namespace, key)
+}
diff --git a/client/go/auth/token.go b/client/go/auth/token.go
new file mode 100644
index 00000000000..e9b90b8994e
--- /dev/null
+++ b/client/go/auth/token.go
@@ -0,0 +1,68 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package auth
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+)
+
+type TokenResponse struct {
+ AccessToken string `json:"access_token"`
+ IDToken string `json:"id_token"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int `json:"expires_in"`
+}
+
+type TokenRetriever struct {
+ Authenticator *Authenticator
+ Secrets SecretStore
+ Client *http.Client
+}
+
+// Delete deletes the given tenant from the secrets' storage.
+func (t *TokenRetriever) Delete(tenant string) error {
+ return t.Secrets.Delete(SecretsNamespace, tenant)
+}
+
+// Refresh gets a new access token from the provided refresh token,
+// The request is used the default client_id and endpoint for device authentication.
+func (t *TokenRetriever) Refresh(ctx context.Context, tenant string) (TokenResponse, error) {
+ // get stored refresh token:
+ refreshToken, err := t.Secrets.Get(SecretsNamespace, tenant)
+ if err != nil {
+ return TokenResponse{}, fmt.Errorf("cannot get the stored refresh token: %w", err)
+ }
+ if refreshToken == "" {
+ return TokenResponse{}, errors.New("cannot use the stored refresh token: the token is empty")
+ }
+ // get access token:
+ r, err := t.Client.PostForm(t.Authenticator.OauthTokenEndpoint, url.Values{
+ "grant_type": {"refresh_token"},
+ "client_id": {t.Authenticator.ClientID},
+ "refresh_token": {refreshToken},
+ })
+ if err != nil {
+ return TokenResponse{}, fmt.Errorf("cannot get a new access token from the refresh token: %w", err)
+ }
+
+ defer r.Body.Close()
+ if r.StatusCode != http.StatusOK {
+ b, _ := ioutil.ReadAll(r.Body)
+ bodyStr := string(b)
+ return TokenResponse{}, fmt.Errorf("cannot get a new access token from the refresh token: %s", bodyStr)
+ }
+
+ var res TokenResponse
+ err = json.NewDecoder(r.Body).Decode(&res)
+ if err != nil {
+ return TokenResponse{}, fmt.Errorf("cannot decode response: %w", err)
+ }
+
+ return res, nil
+}
diff --git a/client/go/cli/cli.go b/client/go/cli/cli.go
new file mode 100644
index 00000000000..e1dde387b89
--- /dev/null
+++ b/client/go/cli/cli.go
@@ -0,0 +1,355 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package cli
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/joeshaw/envdecode"
+ "github.com/pkg/browser"
+ "github.com/vespa-engine/vespa/client/go/util"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/lestrrat-go/jwx/jwt"
+ "github.com/vespa-engine/vespa/client/go/auth"
+)
+
+const accessTokenExpThreshold = 5 * time.Minute
+
+var errUnauthenticated = errors.New("not logged in. Try 'vespa login'")
+
+type config struct {
+ InstallID string `json:"install_id,omitempty"`
+ DefaultTenant string `json:"default_tenant"`
+ Tenants map[string]Tenant `json:"tenants"`
+}
+
+// Tenant is an auth0 Tenant.
+type Tenant struct {
+ Name string `json:"name"`
+ Domain string `json:"domain"`
+ AccessToken string `json:"access_token,omitempty"`
+ Scopes []string `json:"scopes,omitempty"`
+ ExpiresAt time.Time `json:"expires_at"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+}
+
+type Cli struct {
+ Authenticator *auth.Authenticator
+ tenant string
+ initOnce sync.Once
+ errOnce error
+ Path string
+ config config
+}
+
+// IsLoggedIn encodes the domain logic for determining whether we're
+// logged in. This might check our config storage, or just in memory.
+func (c *Cli) IsLoggedIn() bool {
+ // No need to check errors for initializing context.
+ _ = c.init()
+
+ if c.tenant == "" {
+ return false
+ }
+
+ // Parse the access token for the tenant.
+ t, err := jwt.ParseString(c.config.Tenants[c.tenant].AccessToken)
+ if err != nil {
+ return false
+ }
+
+ // Check if token is valid.
+ if err = jwt.Validate(t, jwt.WithIssuer("https://vespa-cd.auth0.com/")); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// default to vespa-cd.auth0.com
+var (
+ authCfg struct {
+ Audience string `env:"AUTH0_AUDIENCE,default=https://vespa-cd.auth0.com/api/v2/"`
+ ClientID string `env:"AUTH0_CLIENT_ID,default=4wYWA496zBP28SLiz0PuvCt8ltL11DZX"`
+ DeviceCodeEndpoint string `env:"AUTH0_DEVICE_CODE_ENDPOINT,default=https://vespa-cd.auth0.com/oauth/device/code"`
+ OauthTokenEndpoint string `env:"AUTH0_OAUTH_TOKEN_ENDPOINT,default=https://vespa-cd.auth0.com/oauth/token"`
+ }
+)
+
+func ContextWithCancel() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, os.Interrupt)
+ go func() {
+ <-ch
+ defer cancel()
+ os.Exit(0)
+ }()
+ return ctx
+}
+
+// Setup will try to initialize the config context, as well as figure out if
+// there's a readily available tenant.
+func GetCli(configPath string) (*Cli, error) {
+ c := Cli{}
+ c.Path = configPath
+ if err := envdecode.StrictDecode(&authCfg); err != nil {
+ return nil, fmt.Errorf("could not decode env: %w", err)
+ }
+ c.Authenticator = &auth.Authenticator{
+ Audience: authCfg.Audience,
+ ClientID: authCfg.ClientID,
+ DeviceCodeEndpoint: authCfg.DeviceCodeEndpoint,
+ OauthTokenEndpoint: authCfg.OauthTokenEndpoint,
+ }
+ return &c, nil
+}
+
+// prepareTenant loads the Tenant, refreshing its token if necessary.
+// The Tenant access token needs a refresh if:
+// 1. the Tenant scopes are different from the currently required scopes.
+// 2. the access token is expired.
+func (c *Cli) PrepareTenant(ctx context.Context) (Tenant, error) {
+ if err := c.init(); err != nil {
+ return Tenant{}, err
+ }
+ t, err := c.getTenant()
+ if err != nil {
+ return Tenant{}, err
+ }
+
+ if t.ClientID != "" && t.ClientSecret != "" {
+ return t, nil
+ }
+
+ if t.AccessToken == "" || scopesChanged(t) {
+ t, err = RunLogin(ctx, c, true)
+ if err != nil {
+ return Tenant{}, err
+ }
+ } else if isExpired(t.ExpiresAt, accessTokenExpThreshold) {
+ // check if the stored access token is expired:
+ // use the refresh token to get a new access token:
+ tr := &auth.TokenRetriever{
+ Authenticator: c.Authenticator,
+ Secrets: &auth.Keyring{},
+ Client: http.DefaultClient,
+ }
+
+ res, err := tr.Refresh(ctx, t.Domain)
+ if err != nil {
+ // ask and guide the user through the login process:
+ fmt.Println(fmt.Errorf("failed to renew access token, %s", err))
+ t, err = RunLogin(ctx, c, true)
+ if err != nil {
+ return Tenant{}, err
+ }
+ } else {
+ // persist the updated tenant with renewed access token
+ t.AccessToken = res.AccessToken
+ t.ExpiresAt = time.Now().Add(
+ time.Duration(res.ExpiresIn) * time.Second,
+ )
+
+ err = c.AddTenant(t)
+ if err != nil {
+ return Tenant{}, err
+ }
+ }
+ }
+
+ return t, nil
+}
+
+// isExpired is true if now() + a threshold is after the given date
+func isExpired(t time.Time, threshold time.Duration) bool {
+ return time.Now().Add(threshold).After(t)
+}
+
+// scopesChanged compare the Tenant scopes
+// with the currently required scopes.
+func scopesChanged(t Tenant) bool {
+ want := auth.RequiredScopes()
+ got := t.Scopes
+
+ sort.Strings(want)
+ sort.Strings(got)
+
+ if (want == nil) != (got == nil) {
+ return true
+ }
+
+ if len(want) != len(got) {
+ return true
+ }
+
+ for i := range t.Scopes {
+ if want[i] != got[i] {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *Cli) getTenant() (Tenant, error) {
+ if err := c.init(); err != nil {
+ return Tenant{}, err
+ }
+
+ t, ok := c.config.Tenants[c.tenant]
+ if !ok {
+ return Tenant{}, fmt.Errorf("unable to find tenant: %s; run 'vespa login' to configure a new tenant", c.tenant)
+ }
+
+ return t, nil
+}
+
+// AddTenant assigns an existing, or new Tenant. This is expected to be called
+// after a login has completed.
+func (c *Cli) AddTenant(ten Tenant) error {
+ _ = c.init()
+
+ if c.config.DefaultTenant == "" {
+ c.config.DefaultTenant = ten.Domain
+ }
+
+ // If we're dealing with an empty file, we'll need to initialize this map.
+ if c.config.Tenants == nil {
+ c.config.Tenants = map[string]Tenant{}
+ }
+
+ c.config.Tenants[ten.Domain] = ten
+
+ if err := c.persistConfig(); err != nil {
+ return fmt.Errorf("unexpected error persisting config: %w", err)
+ }
+
+ return nil
+}
+
+func (c *Cli) persistConfig() error {
+ dir := filepath.Dir(c.Path)
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ if err := os.MkdirAll(dir, 0700); err != nil {
+ return err
+ }
+ }
+
+ buf, err := json.MarshalIndent(c.config, "", " ")
+ if err != nil {
+ return err
+ }
+
+ if err := ioutil.WriteFile(c.Path, buf, 0600); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Cli) init() error {
+ c.initOnce.Do(func() {
+ if c.errOnce = c.initContext(); c.errOnce != nil {
+ return
+ }
+ })
+ return c.errOnce
+}
+
+func (c *Cli) initContext() (err error) {
+ if _, err := os.Stat(c.Path); os.IsNotExist(err) {
+ return errUnauthenticated
+ }
+
+ var buf []byte
+ if buf, err = ioutil.ReadFile(c.Path); err != nil {
+ return err
+ }
+
+ if err := json.Unmarshal(buf, &c.config); err != nil {
+ return err
+ }
+
+ if c.tenant == "" && c.config.DefaultTenant == "" {
+ return errUnauthenticated
+ }
+
+ if c.tenant == "" {
+ c.tenant = c.config.DefaultTenant
+ }
+
+ return nil
+}
+
+// RunLogin runs the login flow guiding the user through the process
+// by showing the login instructions, opening the browser.
+// Use `expired` to run the login from other commands setup:
+// this will only affect the messages.
+func RunLogin(ctx context.Context, cli *Cli, expired bool) (Tenant, error) {
+ if expired {
+ fmt.Println("Please sign in to re-authorize the CLI.")
+ }
+
+ state, err := cli.Authenticator.Start(ctx)
+ if err != nil {
+ return Tenant{}, fmt.Errorf("could not start the authentication process: %w", err)
+ }
+
+ fmt.Printf("Your Device Confirmation code is: %s\n\n", state.UserCode)
+ fmt.Println("Press Enter to open the browser to log in or ^C to quit...")
+ fmt.Scanln()
+
+ err = browser.OpenURL(state.VerificationURI)
+
+ if err != nil {
+ fmt.Printf("Couldn't open the URL, please do it manually: %s.", state.VerificationURI)
+ }
+
+ var res auth.Result
+ err = util.Spinner("Waiting for login to complete in browser", func() error {
+ res, err = cli.Authenticator.Wait(ctx, state)
+ return err
+ })
+
+ if err != nil {
+ return Tenant{}, fmt.Errorf("login error: %w", err)
+ }
+
+ fmt.Print("\n")
+ fmt.Println("Successfully logged in.")
+ fmt.Print("\n")
+
+ // store the refresh token
+ secretsStore := &auth.Keyring{}
+ err = secretsStore.Set(auth.SecretsNamespace, res.Domain, res.RefreshToken)
+ if err != nil {
+ // log the error but move on
+ fmt.Println("Could not store the refresh token locally, please expect to login again once your access token expired.")
+ }
+
+ t := Tenant{
+ Name: res.Tenant,
+ Domain: res.Domain,
+ AccessToken: res.AccessToken,
+ ExpiresAt: time.Now().Add(time.Duration(res.ExpiresIn) * time.Second),
+ Scopes: auth.RequiredScopes(),
+ }
+ err = cli.AddTenant(t)
+ if err != nil {
+ return Tenant{}, fmt.Errorf("could not add tenant to config: %w", err)
+ }
+
+ return t, nil
+}
diff --git a/client/go/cmd/config.go b/client/go/cmd/config.go
index d10f66c83c6..0b08a2dc28d 100644
--- a/client/go/cmd/config.go
+++ b/client/go/cmd/config.go
@@ -148,6 +148,10 @@ func (c *Config) ReadAPIKey(tenantName string) ([]byte, error) {
return ioutil.ReadFile(c.APIKeyPath(tenantName))
}
+func (c *Config) AuthConfigPath() string {
+ return filepath.Join(c.Home, "auth", "config.json")
+}
+
func (c *Config) ReadSessionID(app vespa.ApplicationID) (int64, error) {
sessionPath, err := c.applicationFilePath(app, "session_id")
if err != nil {
diff --git a/client/go/cmd/helpers.go b/client/go/cmd/helpers.go
index 4768290e33e..54d8798b71d 100644
--- a/client/go/cmd/helpers.go
+++ b/client/go/cmd/helpers.go
@@ -153,10 +153,17 @@ func getConsoleURL() string {
}
func getApiURL() string {
- if getSystem() == "publiccd" {
- return "https://api.vespa-external-cd.aws.oath.cloud:4443"
+ if vespa.Auth0AccessTokenEnabled() {
+ if getSystem() == "publiccd" {
+ return "https://api.vespa-external-cd.aws.oath.cloud:443"
+ }
+ return "https://api.vespa-external.aws.oath.cloud:443"
+ } else {
+ if getSystem() == "publiccd" {
+ return "https://api.vespa-external-cd.aws.oath.cloud:4443"
+ }
+ return "https://api.vespa-external.aws.oath.cloud:4443"
}
- return "https://api.vespa-external.aws.oath.cloud:4443"
}
func getTarget() vespa.Target {
@@ -174,9 +181,12 @@ func getTarget() vespa.Target {
fatalErr(err, "Could not load config")
return nil
}
- apiKey, err := ioutil.ReadFile(cfg.APIKeyPath(deployment.Application.Tenant))
- if err != nil {
- fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
+ var apiKey []byte = nil
+ if !vespa.Auth0AccessTokenEnabled() {
+ apiKey, err = ioutil.ReadFile(cfg.APIKeyPath(deployment.Application.Tenant))
+ if err != nil {
+ fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
+ }
}
privateKeyFile, err := cfg.PrivateKeyPath(deployment.Application)
if err != nil {
@@ -201,7 +211,8 @@ func getTarget() vespa.Target {
vespa.LogOptions{
Writer: stdout,
Level: vespa.LogLevel(logLevelArg),
- })
+ },
+ cfg.AuthConfigPath())
}
fatalErrHint(fmt.Errorf("Invalid target: %s", targetType), "Valid targets are 'local', 'cloud' or an URL")
return nil
@@ -232,11 +243,13 @@ func getDeploymentOpts(cfg *Config, pkg vespa.ApplicationPackage, target vespa.T
fatalErrHint(fmt.Errorf("Missing certificate in application package"), "Applications in Vespa Cloud require a certificate", "Try 'vespa cert'")
return opts
}
- var err error
- opts.APIKey, err = cfg.ReadAPIKey(deployment.Application.Tenant)
- if err != nil {
- fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
- return opts
+ if !vespa.Auth0AccessTokenEnabled() {
+ var err error
+ opts.APIKey, err = cfg.ReadAPIKey(deployment.Application.Tenant)
+ if err != nil {
+ fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
+ return opts
+ }
}
opts.Deployment = deployment
}
diff --git a/client/go/cmd/login.go b/client/go/cmd/login.go
new file mode 100644
index 00000000000..767d462b0be
--- /dev/null
+++ b/client/go/cmd/login.go
@@ -0,0 +1,34 @@
+package cmd
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/vespa-engine/vespa/client/go/cli"
+ "github.com/vespa-engine/vespa/client/go/vespa"
+)
+
+func init() {
+ if vespa.Auth0AccessTokenEnabled() {
+ rootCmd.AddCommand(loginCmd)
+ }
+}
+
+var loginCmd = &cobra.Command{
+ Use: "login",
+ Args: cobra.NoArgs,
+ Short: "Authenticate the Vespa CLI",
+ Example: "$ vespa login",
+ DisableAutoGenTag: true,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ ctx := cmd.Context()
+ cfg, err := LoadConfig()
+ if err != nil {
+ return err
+ }
+ c, err := cli.GetCli(cfg.AuthConfigPath())
+ if err != nil {
+ return err
+ }
+ _, err = cli.RunLogin(ctx, c, false)
+ return err
+ },
+}
diff --git a/client/go/go.mod b/client/go/go.mod
index 27faff3fd0b..70eea958933 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -3,12 +3,23 @@ module github.com/vespa-engine/vespa/client/go
go 1.15
require (
+ github.com/briandowns/spinner v1.16.0
+ github.com/fatih/color v1.10.0 // indirect
+ github.com/joeshaw/envdecode v0.0.0-20200121155833-099f1fc765bd
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
+ github.com/lestrrat-go/jwx v1.2.9
github.com/logrusorgru/aurora/v3 v3.0.0
- github.com/mattn/go-colorable v0.0.9
- github.com/mattn/go-isatty v0.0.3
+ github.com/mattn/go-colorable v0.1.8
+ github.com/mattn/go-isatty v0.0.13
+ github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2
+ github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.8.1
+ github.com/stretchr/objx v0.1.1 // indirect
github.com/stretchr/testify v1.7.0
+ github.com/zalando/go-keyring v0.1.1
+ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
+ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
)
diff --git a/client/go/go.sum b/client/go/go.sum
index 0462d0575a1..59656af2b35 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -45,6 +45,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/briandowns/spinner v1.16.0 h1:DFmp6hEaIx2QXXuqSJmtfSBSAjRmpGiKG6ip2Wm/yOs=
+github.com/briandowns/spinner v1.16.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@@ -57,9 +59,14 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/danieljoos/wincred v1.1.0 h1:3RNcEpBg4IhIChZdFRSdlQt1QjCp1sMAPIrOnm7Yf8g=
+github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d h1:1iy2qD6JEhHKKhUOA9IWs7mjco7lnw2qx8FsRI2wirE=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -68,12 +75,18 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/goccy/go-json v0.7.10 h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec=
+github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -165,6 +178,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/joeshaw/envdecode v0.0.0-20200121155833-099f1fc765bd h1:nIzoSW6OhhppWLm4yqBwZsKJlAayUu5FGozhrF3ETSM=
+github.com/joeshaw/envdecode v0.0.0-20200121155833-099f1fc765bd/go.mod h1:MEQrHur0g8VplbLOv5vXmDzacSaH9Z7XhcgsSh1xciU=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
@@ -180,14 +195,31 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
+github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
+github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
+github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ=
+github.com/lestrrat-go/httpcc v1.0.0 h1:FszVC6cKfDvBKcJv646+lkh4GydQg2Z29scgUfkOpYc=
+github.com/lestrrat-go/httpcc v1.0.0/go.mod h1:tGS/u00Vh5N6FHNkExqGGNId8e0Big+++0Gf8MBnAvE=
+github.com/lestrrat-go/iter v1.0.1 h1:q8faalr2dY6o8bV45uwrxq12bRa1ezKrB6oM9FUgN4A=
+github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc=
+github.com/lestrrat-go/jwx v1.2.9 h1:kS8kLI4oaBYJJ6u6rpbPI0tDYVCqo0P5u8vv1zoQ49U=
+github.com/lestrrat-go/jwx v1.2.9/go.mod h1:25DcLbNWArPA/Ew5CcBmewl32cJKxOk5cbepBsIJFzw=
+github.com/lestrrat-go/option v1.0.0 h1:WqAWL8kh8VcSoD6xjSH34/1m8yxluXQbDeKNfvFeEO4=
+github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/logrusorgru/aurora/v3 v3.0.0 h1:R6zcoZZbvVcGMvDCKo45A9U/lzYyzl5NfYIvznmDfE4=
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
+github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -204,7 +236,11 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2 h1:acNfDZXmm28D2Yg/c3ALnZStzNaZMSagpbr96vY6Zjc=
+github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -235,6 +271,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -249,6 +287,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/zalando/go-keyring v0.1.1 h1:w2V9lcx/Uj4l+dzAf1m9s+DJ1O8ROkEHnynonHjTcYE=
+github.com/zalando/go-keyring v0.1.1/go.mod h1:OIC+OZ28XbmwFxU/Rp9V7eKzZjamBJwRzC8UFJH9+L8=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
@@ -269,6 +309,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201217014255-9d1352758620/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -367,6 +410,7 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -376,9 +420,11 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -403,8 +449,11 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -575,8 +624,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
diff --git a/client/go/util/spinner.go b/client/go/util/spinner.go
new file mode 100644
index 00000000000..1deb4296d28
--- /dev/null
+++ b/client/go/util/spinner.go
@@ -0,0 +1,63 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package util
+
+import (
+ "os"
+ "time"
+
+ "github.com/briandowns/spinner"
+ "github.com/pkg/errors"
+)
+
+const (
+ spinnerTextEllipsis = "..."
+ spinnerTextDone = "done"
+ spinnerTextFailed = "failed"
+ spinnerColor = "blue"
+)
+
+var messages = os.Stderr
+
+func Spinner(text string, fn func() error) error {
+ initialMsg := text + spinnerTextEllipsis + " "
+ doneMsg := initialMsg + spinnerTextDone + "\n"
+ failMsg := initialMsg + spinnerTextFailed + "\n"
+
+ return loading(initialMsg, doneMsg, failMsg, fn)
+}
+
+func loading(initialMsg, doneMsg, failMsg string, fn func() error) error {
+ done := make(chan struct{})
+ errc := make(chan error)
+ go func() {
+ defer close(done)
+
+ s := spinner.New(spinner.CharSets[11], 100*time.Millisecond, spinner.WithWriter(messages))
+ s.Prefix = initialMsg
+ s.FinalMSG = doneMsg
+ s.HideCursor = true
+ s.Writer = messages
+
+ if err := s.Color(spinnerColor); err != nil {
+ panic(Error(err, "failed setting spinner color"))
+ }
+
+ s.Start()
+ err := <-errc
+ if err != nil {
+ s.FinalMSG = failMsg
+ }
+
+ s.Stop()
+ }()
+
+ err := fn()
+ errc <- err
+ <-done
+ return err
+}
+
+func Error(e error, message string) error {
+ return errors.Wrap(e, message)
+}
diff --git a/client/go/vespa/deploy.go b/client/go/vespa/deploy.go
index 3718c7d813a..9c5fb3a12ac 100644
--- a/client/go/vespa/deploy.go
+++ b/client/go/vespa/deploy.go
@@ -327,12 +327,12 @@ func Submit(opts DeploymentOpts) error {
Header: make(http.Header),
}
request.Header.Set("Content-Type", writer.FormDataContentType())
- signer := NewRequestSigner(opts.Deployment.Application.SerializedForm(), opts.APIKey)
- if err := signer.SignRequest(request); err != nil {
+ serviceDescription := "Submit service"
+ sigKeyId := opts.Deployment.Application.SerializedForm()
+ if err := opts.Target.PrepareApiRequest(request, sigKeyId); err != nil {
return err
}
- serviceDescription := "Submit service"
- response, err := util.HttpDo(request, time.Minute*10, serviceDescription)
+ response, err := util.HttpDo(request, time.Minute*10, sigKeyId)
if err != nil {
return err
}
@@ -344,7 +344,7 @@ func checkDeploymentOpts(opts DeploymentOpts) error {
if !opts.ApplicationPackage.HasCertificate() {
return fmt.Errorf("%s: missing certificate in package", opts)
}
- if opts.APIKey == nil {
+ if !Auth0AccessTokenEnabled() && opts.APIKey == nil {
return fmt.Errorf("%s: missing api key", opts.String())
}
return nil
@@ -363,13 +363,11 @@ func uploadApplicationPackage(url *url.URL, opts DeploymentOpts) (int64, error)
Header: header,
Body: ioutil.NopCloser(zipReader),
}
- if opts.APIKey != nil {
- signer := NewRequestSigner(opts.Deployment.Application.SerializedForm(), opts.APIKey)
- if err := signer.SignRequest(request); err != nil {
- return 0, err
- }
- }
serviceDescription := "Deploy service"
+ sigKeyId := opts.Deployment.Application.SerializedForm()
+ if err := opts.Target.PrepareApiRequest(request, sigKeyId); err != nil {
+ return 0, err
+ }
response, err := util.HttpDo(request, time.Minute*10, serviceDescription)
if err != nil {
return 0, err
diff --git a/client/go/vespa/target.go b/client/go/vespa/target.go
index 8a09440f5cc..f497bf5b3cd 100644
--- a/client/go/vespa/target.go
+++ b/client/go/vespa/target.go
@@ -6,13 +6,16 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
+ "github.com/vespa-engine/vespa/client/go/cli"
"io"
"io/ioutil"
"math"
"net/http"
"net/url"
+ "os"
"sort"
"strconv"
+ "strings"
"time"
"github.com/vespa-engine/vespa/client/go/util"
@@ -35,6 +38,7 @@ type Service struct {
BaseURL string
Name string
TLSOptions TLSOptions
+ Target *Target
}
// Target represents a Vespa platform, running named Vespa services.
@@ -47,6 +51,8 @@ type Target interface {
// PrintLog writes the logs of this deployment using given options to control output.
PrintLog(options LogOptions) error
+
+ PrepareApiRequest(req *http.Request, sigKeyId string) error
}
// TLSOptions configures the certificate to use for service requests.
@@ -66,11 +72,21 @@ type LogOptions struct {
Level int
}
+func Auth0AccessTokenEnabled() bool {
+ v, present := os.LookupEnv("VESPA_CLI_OAUTH2_DEVICE_FLOW")
+ if !present {
+ return false
+ }
+ return strings.ToLower(v) == "true" || v == "1" || v == ""
+}
+
type customTarget struct {
targetType string
baseURL string
}
+func (t *customTarget) PrepareApiRequest(req *http.Request, sigKeyId string) error { return nil }
+
// Do sends request to this service. Any required authentication happens automatically.
func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Response, error) {
if s.TLSOptions.KeyPair.Certificate != nil {
@@ -192,8 +208,9 @@ type cloudTarget struct {
tlsOptions TLSOptions
logOptions LogOptions
- queryURL string
- documentURL string
+ queryURL string
+ documentURL string
+ authConfigPath string
}
func (t *cloudTarget) Type() string { return t.targetType }
@@ -221,6 +238,30 @@ func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64) (
return nil, fmt.Errorf("unknown service: %s", name)
}
+func (t *cloudTarget) PrepareApiRequest(req *http.Request, sigKeyId string) error {
+ if Auth0AccessTokenEnabled() {
+ if err := t.addAuth0AccessToken(req); err != nil {
+ return err
+ }
+ } else if t.apiKey != nil {
+ signer := NewRequestSigner(sigKeyId, t.apiKey)
+ if err := signer.SignRequest(req); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *cloudTarget) addAuth0AccessToken(request *http.Request) error {
+ c, err := cli.GetCli(t.authConfigPath)
+ tenant, err := c.PrepareTenant(cli.ContextWithCancel())
+ if err != nil {
+ return err
+ }
+ request.Header.Set("Authorization", "Bearer "+tenant.AccessToken)
+ return nil
+}
+
func (t *cloudTarget) logsURL() string {
return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s/logs",
t.apiURL,
@@ -233,7 +274,6 @@ func (t *cloudTarget) PrintLog(options LogOptions) error {
if err != nil {
return err
}
- signer := NewRequestSigner(t.deployment.Application.SerializedForm(), t.apiKey)
lastFrom := options.From
requestFunc := func() *http.Request {
fromMillis := lastFrom.Unix() * 1000
@@ -244,9 +284,7 @@ func (t *cloudTarget) PrintLog(options LogOptions) error {
q.Set("to", strconv.FormatInt(toMillis, 10))
}
req.URL.RawQuery = q.Encode()
- if err := signer.SignRequest(req); err != nil {
- panic(err)
- }
+ t.PrepareApiRequest(req, t.deployment.Application.SerializedForm())
return req
}
logFunc := func(status int, response []byte) (bool, error) {
@@ -280,16 +318,15 @@ func (t *cloudTarget) PrintLog(options LogOptions) error {
}
func (t *cloudTarget) waitForEndpoints(timeout time.Duration, runID int64) error {
- signer := NewRequestSigner(t.deployment.Application.SerializedForm(), t.apiKey)
if runID > 0 {
- if err := t.waitForRun(signer, runID, timeout); err != nil {
+ if err := t.waitForRun(runID, timeout); err != nil {
return err
}
}
- return t.discoverEndpoints(signer, timeout)
+ return t.discoverEndpoints(timeout)
}
-func (t *cloudTarget) waitForRun(signer *RequestSigner, runID int64, timeout time.Duration) error {
+func (t *cloudTarget) waitForRun(runID int64, timeout time.Duration) error {
runURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/job/%s-%s/run/%d",
t.apiURL,
t.deployment.Application.Tenant, t.deployment.Application.Application, t.deployment.Application.Instance,
@@ -303,7 +340,7 @@ func (t *cloudTarget) waitForRun(signer *RequestSigner, runID int64, timeout tim
q := req.URL.Query()
q.Set("after", strconv.FormatInt(lastID, 10))
req.URL.RawQuery = q.Encode()
- if err := signer.SignRequest(req); err != nil {
+ if err := t.PrepareApiRequest(req, t.deployment.Application.SerializedForm()); err != nil {
panic(err)
}
return req
@@ -353,7 +390,7 @@ func (t *cloudTarget) printLog(response jobResponse, last int64) int64 {
return response.LastID
}
-func (t *cloudTarget) discoverEndpoints(signer *RequestSigner, timeout time.Duration) error {
+func (t *cloudTarget) discoverEndpoints(timeout time.Duration) error {
deploymentURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s",
t.apiURL,
t.deployment.Application.Tenant, t.deployment.Application.Application, t.deployment.Application.Instance,
@@ -362,7 +399,7 @@ func (t *cloudTarget) discoverEndpoints(signer *RequestSigner, timeout time.Dura
if err != nil {
return err
}
- if err := signer.SignRequest(req); err != nil {
+ if err := t.PrepareApiRequest(req, t.deployment.Application.SerializedForm()); err != nil {
return err
}
var endpointURL string
@@ -409,14 +446,16 @@ func CustomTarget(baseURL string) Target {
}
// CloudTarget creates a Target for the Vespa Cloud platform.
-func CloudTarget(apiURL string, deployment Deployment, apiKey []byte, tlsOptions TLSOptions, logOptions LogOptions) Target {
+func CloudTarget(apiURL string, deployment Deployment, apiKey []byte, tlsOptions TLSOptions, logOptions LogOptions,
+ authConfigPath string) Target {
return &cloudTarget{
- apiURL: apiURL,
- targetType: cloudTargetType,
- deployment: deployment,
- apiKey: apiKey,
- tlsOptions: tlsOptions,
- logOptions: logOptions,
+ apiURL: apiURL,
+ targetType: cloudTargetType,
+ deployment: deployment,
+ apiKey: apiKey,
+ tlsOptions: tlsOptions,
+ logOptions: logOptions,
+ authConfigPath: authConfigPath,
}
}
diff --git a/client/go/vespa/target_test.go b/client/go/vespa/target_test.go
index ed924059297..d4d23901513 100644
--- a/client/go/vespa/target_test.go
+++ b/client/go/vespa/target_test.go
@@ -143,7 +143,10 @@ func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target {
x509KeyPair, err := tls.X509KeyPair(kp.Certificate, kp.PrivateKey)
assert.Nil(t, err)
- apiKey, err := CreateAPIKey()
+ var apiKey []byte = nil
+ if !Auth0AccessTokenEnabled() {
+ apiKey, err = CreateAPIKey()
+ }
assert.Nil(t, err)
target := CloudTarget(
@@ -154,7 +157,7 @@ func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target {
},
apiKey,
TLSOptions{KeyPair: x509KeyPair},
- LogOptions{Writer: logWriter})
+ LogOptions{Writer: logWriter}, "")
if ct, ok := target.(*cloudTarget); ok {
ct.apiURL = url
} else {
diff --git a/client/pom.xml b/client/pom.xml
index ba153aed8f8..4abcdf9ac6c 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -16,12 +16,6 @@
<packaging>jar</packaging>
<version>7-SNAPSHOT</version>
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- <!-- TODO: Remove when we no longer support JDK 8 clients -->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<dependency>
<groupId>com.google.code.gson</groupId>
@@ -64,6 +58,13 @@
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <configuration>
+ <release>${vespaClients.jdk.releaseVersion}</release>
+ </configuration>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<configuration>
<finalName>${project.artifactId}-jar-with-dependencies</finalName>
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
index 8f9e5edc337..8b9d1f34154 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
@@ -4,23 +4,20 @@ package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.FileReference;
import com.yahoo.jrt.DoubleArray;
-import com.yahoo.jrt.Int32Value;
import com.yahoo.jrt.Method;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.StringArray;
import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
-import java.util.logging.Level;
import com.yahoo.vespa.filedistribution.FileDownloader;
-import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import java.io.File;
-import java.util.Arrays;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -67,10 +64,6 @@ class FileDistributionRpcServer {
.methodDesc("download status for file references")
.returnDesc(0, "file references", "array of file references")
.returnDesc(1, "download status", "percentage downloaded of each file reference in above array"));
- supervisor.addMethod(new Method("filedistribution.setFileReferencesToDownload", "S", "i", this::setFileReferencesToDownload)
- .methodDesc("set which file references to download")
- .paramDesc(0, "file references", "file reference to download")
- .returnDesc(0, "ret", "0 if success, 1 otherwise"));
}
@@ -105,14 +98,6 @@ class FileDistributionRpcServer {
req.returnValues().add(new DoubleArray(downloadStatusArray));
}
- private void setFileReferencesToDownload(Request req) {
- log.log(Level.FINE, () -> "Received method call '" + req.methodName() + "' with parameters : " + req.parameters());
- Arrays.stream(req.parameters().get(0).asStringArray())
- .map(FileReference::new)
- .forEach(fileReference -> downloader.downloadIfNeeded(new FileReferenceDownload(fileReference)));
- req.returnValues().add(new Int32Value(0));
- }
-
private void downloadFile(Request req) {
FileReference fileReference = new FileReference(req.parameters().get(0).asString());
log.log(Level.FINE, () -> "getFile() called for file reference '" + fileReference.value() + "'");
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
index 14385cbc90e..f6708a1432c 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
@@ -95,11 +95,11 @@ public class JRTConnectionPool implements ConnectionPool {
return pickNewConnectionRandomly(getSources());
}
- private JRTConnection pickNewConnectionRandomly(List<JRTConnection> sources) {
+ protected JRTConnection pickNewConnectionRandomly(List<JRTConnection> sources) {
return sources.get(ThreadLocalRandom.current().nextInt(0, sources.size()));
}
- List<JRTConnection> getSources() {
+ protected List<JRTConnection> getSources() {
List<JRTConnection> ret;
synchronized (connections) {
ret = new ArrayList<>(connections.values());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java
index a49e2ec76bb..605f5924e68 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionImpl.java
@@ -41,8 +41,8 @@ public class FileDistributionImpl implements FileDistribution, RequestWaiter {
return fileReferencesDir;
}
- // Notifies config proxy which file references it should start downloading. It's OK if the call does not succeed,
- // as downloading will then start synchronously when a service requests a file reference instead
+ // Notifies client which file references it should start downloading. It's OK if the call does not succeed,
+ // as this is just a hint to the client to start downloading. Currently the only client is the config server
private void startDownloadingFileReferences(String hostName, int port, Set<FileReference> fileReferences) {
Target target = supervisor.connect(new Spec(hostName, port));
Request request = new Request("filedistribution.setFileReferencesToDownload");
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index f34beae9c46..f4801c5a7ea 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -16,11 +16,14 @@ import com.yahoo.vespa.config.JRTConnectionPool;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.CompressedFileReference;
import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
+import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.yolean.Exceptions;
import java.io.File;
@@ -30,6 +33,7 @@ import java.nio.file.Path;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -73,14 +77,15 @@ public class FileServer {
@SuppressWarnings("WeakerAccess") // Created by dependency injection
@Inject
- public FileServer(ConfigserverConfig configserverConfig) {
+ public FileServer(ConfigserverConfig configserverConfig, FlagSource flagSource) {
this(new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())),
- createFileDownloader(getOtherConfigServersInCluster(configserverConfig)));
+ createFileDownloader(getOtherConfigServersInCluster(configserverConfig),
+ Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value()));
}
// For testing only
public FileServer(File rootDir) {
- this(rootDir, createFileDownloader(List.of()));
+ this(rootDir, createFileDownloader(List.of(), true));
}
public FileServer(File rootDir, FileDownloader fileDownloader) {
@@ -94,10 +99,6 @@ public class FileServer {
return hasFile(new FileReference(fileReference));
}
- FileDirectory getRootDir() {
- return root;
- }
-
private boolean hasFile(FileReference reference) {
try {
return root.getFile(reference).exists();
@@ -107,6 +108,8 @@ public class FileServer {
return false;
}
+ FileDirectory getRootDir() { return root; }
+
void startFileServing(String fileName, Receiver target) {
FileReference reference = new FileReference(fileName);
File file = root.getFile(reference);
@@ -153,6 +156,8 @@ public class FileServer {
}
public void serveFile(String fileReference, boolean downloadFromOtherSourceIfNotFound, Request request, Receiver receiver) {
+ if (executor instanceof ThreadPoolExecutor)
+ log.log(Level.FINE, () -> "Active threads is now " + ((ThreadPoolExecutor) executor).getActiveCount());
executor.execute(() -> serveFileInternal(fileReference, downloadFromOtherSourceIfNotFound, request, receiver));
}
@@ -191,32 +196,34 @@ public class FileServer {
FileReferenceDownload newDownload = new FileReferenceDownload(fileReference, false, fileReferenceDownload.client());
return downloader.getFile(newDownload).isPresent();
} else {
- log.log(Level.FINE, "File not found, will not download from another source since request came from another config server");
+ log.log(Level.FINE, "File not found, will not download from another source, since request came from another config server");
return false;
}
}
- public FileDownloader downloader() {
- return downloader;
- }
+ public FileDownloader downloader() { return downloader; }
public void close() {
downloader.close();
executor.shutdown();
}
- private static FileDownloader createFileDownloader(List<String> configServers) {
+ private static FileDownloader createFileDownloader(List<String> configServers, boolean useFileDistributionConnectionPool) {
Supervisor supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true);
return new FileDownloader(configServers.isEmpty()
? FileDownloader.emptyConnectionPool()
- : getConnectionPool(configServers, supervisor),
+ : createConnectionPool(configServers, supervisor, useFileDistributionConnectionPool),
supervisor);
}
- private static ConnectionPool getConnectionPool(List<String> configServers, Supervisor supervisor) {
- return configServers.size() > 0
- ? new JRTConnectionPool(new ConfigSourceSet(configServers), supervisor)
- : FileDownloader.emptyConnectionPool();
+ private static ConnectionPool createConnectionPool(List<String> configServers, Supervisor supervisor, boolean useFileDistributionConnectionPool) {
+ ConfigSourceSet configSourceSet = new ConfigSourceSet(configServers);
+
+ if (configServers.size() == 0) return FileDownloader.emptyConnectionPool();
+
+ return useFileDistributionConnectionPool
+ ? new FileDistributionConnectionPool(configSourceSet, supervisor)
+ : new JRTConnectionPool(configSourceSet, supervisor);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index d4d4a7fa7d3..08c300220df 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -16,7 +16,9 @@ import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
+import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import java.io.File;
import java.time.Duration;
@@ -40,6 +42,8 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
private final File downloadDirectory;
private final ConfigserverConfig configserverConfig;
private final Supervisor supervisor;
+ private final boolean useFileDistributionConnectionPool;
+
ApplicationPackageMaintainer(ApplicationRepository applicationRepository,
Curator curator,
@@ -49,7 +53,8 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
this.applicationRepository = applicationRepository;
this.configserverConfig = applicationRepository.configserverConfig();
this.supervisor = new Supervisor(new Transport("filedistribution-pool")).setDropEmptyBuffers(true);
- downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
+ this.downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir()));
+ this.useFileDistributionConnectionPool = Flags.USE_FILE_DISTRIBUTION_CONNECTION_POOL.bindTo(flagSource).value();
}
@Override
@@ -61,25 +66,24 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
try (var fileDownloader = createFileDownloader()) {
for (var applicationId : applicationRepository.listApplications()) {
- log.fine(() -> "Verifying application package for " + applicationId);
+ log.finest(() -> "Verifying application package for " + applicationId);
Session session = applicationRepository.getActiveSession(applicationId);
if (session == null) continue; // App might be deleted after call to listApplications() or not activated yet (bootstrap phase)
- FileReference applicationPackage = session.getApplicationPackageReference();
- long sessionId = session.getSessionId();
- log.fine(() -> "Verifying application package file reference " + applicationPackage + " for session " + sessionId);
-
- if (applicationPackage != null) {
+ FileReference appFileReference = session.getApplicationPackageReference();
+ if (appFileReference != null) {
+ long sessionId = session.getSessionId();
attempts++;
- if (! fileReferenceExistsOnDisk(downloadDirectory, applicationPackage)) {
- log.fine(() -> "Downloading missing application package for application " + applicationId + " (session " + sessionId + ")");
+ if (! fileReferenceExistsOnDisk(downloadDirectory, appFileReference)) {
+ log.fine(() -> "Downloading application package for " + applicationId + " (session " + sessionId + ")");
- FileReferenceDownload download = new FileReferenceDownload(applicationPackage,
+ FileReferenceDownload download = new FileReferenceDownload(appFileReference,
false,
this.getClass().getSimpleName());
if (fileDownloader.getFile(download).isEmpty()) {
failures++;
- log.warning("Failed to download application package for application " + applicationId + " (session " + sessionId + ")");
+ log.warning("Failed to download application package (" + appFileReference + ")" +
+ " for " + applicationId + " (session " + sessionId + ")");
continue;
}
}
@@ -91,7 +95,10 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
}
private FileDownloader createFileDownloader() {
- return new FileDownloader(new JRTConnectionPool(new ConfigSourceSet(getOtherConfigServersInCluster(configserverConfig)), supervisor),
+ ConfigSourceSet configSourceSet = new ConfigSourceSet(getOtherConfigServersInCluster(configserverConfig));
+ return new FileDownloader(useFileDistributionConnectionPool
+ ? new FileDistributionConnectionPool(configSourceSet, supervisor)
+ : new JRTConnectionPool(configSourceSet, supervisor),
supervisor,
downloadDirectory);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index 5a0a9b1d796..ce592c3282a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -242,7 +242,8 @@ public class SessionPreparer {
try {
this.preprocessedApplicationPackage = applicationPackage.preprocess(properties.zone(), logger);
} catch (IOException | RuntimeException e) {
- throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e);
+ throw new IllegalArgumentException("Error preprocessing application package for " + applicationId +
+ ", session " + sessionZooKeeperClient.sessionId(), e);
}
checkTimeout("preprocess");
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
index 11b2881bc34..0b34bd95f8e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
@@ -66,6 +66,7 @@ public class SessionZooKeeperClient {
private final Curator curator;
private final TenantName tenantName;
+ private final long sessionId;
private final Path sessionPath;
private final Path sessionStatusPath;
private final String serverId; // hostname
@@ -75,6 +76,7 @@ public class SessionZooKeeperClient {
public SessionZooKeeperClient(Curator curator, TenantName tenantName, long sessionId, String serverId, AddFileInterface fileManager, int maxNodeSize) {
this.curator = curator;
this.tenantName = tenantName;
+ this.sessionId = sessionId;
this.sessionPath = getSessionPath(tenantName, sessionId);
this.serverId = serverId;
this.sessionStatusPath = sessionPath.append(ZKApplication.SESSIONSTATE_ZK_SUBPATH);
@@ -106,6 +108,8 @@ public class SessionZooKeeperClient {
}
}
+ public long sessionId() { return sessionId; }
+
public CompletionWaiter createActiveWaiter() { return createCompletionWaiter(ACTIVE_BARRIER); }
CompletionWaiter createPrepareWaiter() { return createCompletionWaiter(PREPARE_BARRIER); }
diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver
index 317af4b2fea..efee86be29f 100755
--- a/configserver/src/main/sh/start-configserver
+++ b/configserver/src/main/sh/start-configserver
@@ -177,6 +177,7 @@ vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
+ --add-opens=java.base/sun.security.util=ALL-UNNAMED \
-Djava.io.tmpdir=${VESPA_HOME}/tmp \
-Djava.library.path=${VESPA_HOME}/lib64 \
-Djava.awt.headless=true \
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index 29ec11bad26..f85ca37a351 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -10,6 +10,7 @@ import com.yahoo.net.HostName;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -117,7 +118,7 @@ public class FileServerTest {
private FileServer createFileServer(ConfigserverConfig.Builder configBuilder) throws IOException {
File fileReferencesDir = temporaryFolder.newFolder();
configBuilder.fileReferencesDir(fileReferencesDir.getAbsolutePath());
- return new FileServer(new ConfigserverConfig(configBuilder));
+ return new FileServer(new ConfigserverConfig(configBuilder), new InMemoryFlagSource());
}
private static class FileReceiver implements FileServer.Receiver {
diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh
index ded38e9f7c9..d465edb3c39 100755
--- a/container-disc/src/main/sh/vespa-start-container-daemon.sh
+++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh
@@ -271,6 +271,7 @@ exec $numactlcmd $envcmd java \
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
+ --add-opens=java.base/sun.security.util=ALL-UNNAMED \
-Djava.io.tmpdir="${VESPA_HOME}/tmp" \
-Djava.library.path="${VESPA_HOME}/lib64" \
-Djava.awt.headless=true \
diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
index 99de33aca94..51c82eac264 100644
--- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
@@ -23,6 +23,7 @@ import com.yahoo.search.query.ParameterParser;
import com.yahoo.search.result.ErrorMessage;
import com.yahoo.search.searchchain.Execution;
import com.yahoo.vespa.streamingvisitors.VdsStreamingSearcher;
+import com.yahoo.yolean.Exceptions;
import java.util.ArrayList;
import java.util.Collection;
@@ -312,8 +313,9 @@ public class ClusterSearcher extends Searcher {
mergedResult.mergeWith(result);
mergedResult.hits().addAll(result.hits().asUnorderedHits());
} catch (ExecutionException | InterruptedException e) {
- mergedResult.mergeWith(new Result(query,
- ErrorMessage.createInternalServerError("Unable to query restrict='" + query.getModel().getRestrict() + "'\n" + e)));
+ mergedResult.hits().addError(ErrorMessage.createInternalServerError("Failed querying '" +
+ query.getModel().getRestrict() + "': " +
+ Exceptions.toMessageString(e)));
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 2f9c8488668..e8a7f7729fb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -423,10 +423,7 @@ public class ApplicationSerializer {
Optional<Instant> buildTime = SlimeUtils.optionalInstant(object.field(buildTimeField));
Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField));
Optional<String> commit = SlimeUtils.optionalString(object.field(commitField));
-
- // TODO (freva): Simplify once this has rolled out everywhere
- Inspector deployedDirectlyInspector = object.field(deployedDirectlyField);
- boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool();
+ boolean deployedDirectly = object.field(deployedDirectlyField).asBool();
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 6af3978d0cd..289d6c3a99d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -177,10 +177,7 @@ class RunSerializer {
Optional<Instant> buildTime = SlimeUtils.optionalInstant(versionObject.field(buildTimeField));
Optional<String> sourceUrl = SlimeUtils.optionalString(versionObject.field(sourceUrlField));
Optional<String> commit = SlimeUtils.optionalString(versionObject.field(commitField));
-
- // TODO (freva): Simplify once this has rolled out everywhere
- Inspector deployedDirectlyInspector = versionObject.field(deployedDirectlyField);
- boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool();
+ boolean deployedDirectly = versionObject.field(deployedDirectlyField).asBool();
return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail,
compileVersion, buildTime, sourceUrl, commit, deployedDirectly);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index ef129dd76f7..5cd5a70e4a4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -2042,19 +2042,20 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
return new MessageResponse("Deactivated " + id);
}
- /** Returns test config for indicated job, with production deployments of the default instance. */
+ /** Returns test config for indicated job, with production deployments of the default instance if the given is not in deployment spec. */
private HttpResponse testConfig(ApplicationId id, JobType type) {
- // TODO jonmv: Support non-default instances as well; requires API change in clients.
- ApplicationId defaultInstanceId = TenantAndApplicationId.from(id).defaultInstance();
+ Application application = controller.applications().requireApplication(TenantAndApplicationId.from(id));
+ ApplicationId prodInstanceId = application.deploymentSpec().instance(id.instance()).isPresent()
+ ? id : TenantAndApplicationId.from(id).defaultInstance();
HashSet<DeploymentId> deployments = controller.applications()
- .getInstance(defaultInstanceId).stream()
- .flatMap(instance -> instance.productionDeployments().keySet().stream())
- .map(zone -> new DeploymentId(defaultInstanceId, zone))
- .collect(Collectors.toCollection(HashSet::new));
- var testedZone = type.zone(controller.system());
-
- // If a production job is specified, the production deployment of the _default instance_ is the relevant one,
- // as user instances should not exist in prod. TODO jonmv: Remove this when multiple instances are supported (above).
+ .getInstance(prodInstanceId).stream()
+ .flatMap(instance -> instance.productionDeployments().keySet().stream())
+ .map(zone -> new DeploymentId(prodInstanceId, zone))
+ .collect(Collectors.toCollection(HashSet::new));
+ ZoneId testedZone = type.zone(controller.system());
+
+ // If a production job is specified, the production deployment of the orchestrated instance is the relevant one,
+ // as user instances should not exist in prod.
if ( ! type.isProduction())
deployments.add(new DeploymentId(id, testedZone));
diff --git a/controller-server/src/main/resources/configdefinitions/vespa.hosted.controller.maven.repository.config.maven-repository.def b/controller-server/src/main/resources/configdefinitions/vespa.hosted.controller.maven.repository.config.maven-repository.def
index 900a5b854ce..2f32c0375ea 100644
--- a/controller-server/src/main/resources/configdefinitions/vespa.hosted.controller.maven.repository.config.maven-repository.def
+++ b/controller-server/src/main/resources/configdefinitions/vespa.hosted.controller.maven.repository.config.maven-repository.def
@@ -12,4 +12,4 @@ groupId string default=com.yahoo.vespa
# Artifact ID of the artifact to list versions for
#
-artifactId string default=tenant-base
+artifactId string default=cloud-tenant-base
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index d3ee77b887f..e482439dd7d 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -232,6 +232,7 @@ function(vespa_use_default_build_settings)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${DEFAULT_CMAKE_SHARED_LINKER_FLAGS}" PARENT_SCOPE)
endif()
if(NOT DEFINED DEFAULT_VESPA_CPU_ARCH_FLAGS)
+ message("-- CMAKE_SYSTEM_PROCESSOR = ${CMAKE_SYSTEM_PROCESSOR}")
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
if(VESPA_OS_DISTRO STREQUAL "fedora" AND "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
set(DEFAULT_VESPA_CPU_ARCH_FLAGS "-march=westmere -mtune=haswell")
@@ -239,6 +240,8 @@ function(vespa_use_default_build_settings)
else()
set(DEFAULT_VESPA_CPU_ARCH_FLAGS "-mtune=intel")
endif()
+ elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
+ set(DEFAULT_VESPA_CPU_ARCH_FLAGS "-march=armv8.2-a+fp16+rcpc+dotprod+crypto -mtune=neoverse-n1")
endif()
endif()
if(DEFINED DEFAULT_CMAKE_PREFIX_PATH)
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 81a180348c2..031213ac693 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -59,6 +59,7 @@ BuildRequires: python3-devel
%if 0%{?el8}
BuildRequires: gcc-toolset-10-gcc-c++
BuildRequires: gcc-toolset-10-binutils
+BuildRequires: gcc-toolset-10-libatomic-devel
%define _devtoolset_enable /opt/rh/gcc-toolset-10/enable
BuildRequires: maven
BuildRequires: pybind11-devel
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDistributionConnectionPool.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDistributionConnectionPool.java
new file mode 100644
index 00000000000..3a03e6a87d5
--- /dev/null
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDistributionConnectionPool.java
@@ -0,0 +1,43 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.filedistribution;
+
+import com.yahoo.config.subscription.ConfigSourceSet;
+import com.yahoo.jrt.Supervisor;
+import com.yahoo.vespa.config.Connection;
+import com.yahoo.vespa.config.JRTConnection;
+import com.yahoo.vespa.config.JRTConnectionPool;
+
+import java.util.List;
+
+
+/**
+ * A pool of JRT connections to a set of file distribution source (one or more config servers).
+ * Used by file distribution clients, where the source that can serve a file reference might be
+ * different for each file reference (unlike config requests, where all requests should be served by the same source).
+ * A new connection is chosen randomly when calling {#link {@link #switchConnection(Connection failingConnection)}}.
+ * Unlike JRTConnectionPool there is no state that holds the 'current' connection, a new connection is picked
+ * randomly if {@link #getCurrent()} is called.
+ *
+ * @author hmusum
+ */
+public class FileDistributionConnectionPool extends JRTConnectionPool {
+
+ public FileDistributionConnectionPool(ConfigSourceSet sourceSet, Supervisor supervisor) {
+ super(sourceSet, supervisor);
+ }
+
+ @Override
+ public synchronized JRTConnection getCurrent() {
+ return pickNewConnectionRandomly(getSources());
+ }
+
+ @Override
+ public synchronized JRTConnection switchConnection(Connection failingConnection) {
+ if (getSources().size() <= 1) return getCurrent();
+
+ List<JRTConnection> sourceCandidates = getSources();
+ sourceCandidates.remove(failingConnection);
+ return pickNewConnectionRandomly(sourceCandidates);
+ }
+
+}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
index 445106f4fe4..1bb6b7586f5 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
@@ -85,34 +85,42 @@ public class FileReferenceDownloader {
}
private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount) {
+ Request request = createRequest(fileReferenceDownload);
Connection connection = connectionPool.getCurrent();
- Request request = new Request("filedistribution.serveFile");
- String fileReference = fileReferenceDownload.fileReference().value();
- request.parameters().add(new StringValue(fileReference));
- request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1));
- double timeoutSecs = (double) rpcTimeout.getSeconds();
- timeoutSecs += retryCount * 10.0;
- connection.invokeSync(request, timeoutSecs);
+ connection.invokeSync(request, rpcTimeout(retryCount).getSeconds());
+
Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE);
+ FileReference fileReference = fileReferenceDownload.fileReference();
if (validateResponse(request)) {
log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection + ", retry count " + retryCount);
if (request.returnValues().get(0).asInt32() == 0) {
- log.log(Level.FINE, () -> "Found file reference '" + fileReference + "' available at " + connection.getAddress());
+ log.log(Level.FINE, () -> "Found '" + fileReference + "' available at " + connection.getAddress());
return true;
} else {
- log.log(logLevel, "File reference '" + fileReference + "' not found at " + connection.getAddress());
+ log.log(logLevel, "'" + fileReference + "' not found at " + connection.getAddress());
connectionPool.switchConnection(connection);
return false;
}
} else {
- log.log(logLevel, () -> "Downloading file " + fileReference + " from " + connection.getAddress() + " failed: " +
- request + ", error: " + request.errorMessage() + ", will use another config server for next request" +
- " (retry count " + retryCount + ", rpc timeout " + rpcTimeout.getSeconds() + ")");
+ log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " +
+ request + ", error: " + request.errorMessage() + ", will switch config server for next request" +
+ " (retry " + retryCount + ", rpc timeout " + rpcTimeout(retryCount));
connectionPool.switchConnection(connection);
return false;
}
}
+ private Request createRequest(FileReferenceDownload fileReferenceDownload) {
+ Request request = new Request("filedistribution.serveFile");
+ request.parameters().add(new StringValue(fileReferenceDownload.fileReference().value()));
+ request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1));
+ return request;
+ }
+
+ private Duration rpcTimeout(int retryCount) {
+ return Duration.ofSeconds(rpcTimeout.getSeconds()).plus(Duration.ofSeconds(retryCount * 10L));
+ }
+
private boolean validateResponse(Request request) {
if (request.isError()) {
return false;
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 89c20165390..74e0ab2c3d9 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -370,6 +370,12 @@ public class Flags {
"Triggers restart, takes effect immediately",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundBooleanFlag USE_FILE_DISTRIBUTION_CONNECTION_POOL = defineFeatureFlag(
+ "use-file-distribution-connection-pool", false,
+ List.of("hmusum"), "2021-11-16", "2021-12-16",
+ "Whether to use FileDistributionConnectionPool instead of JRTConnectionPool for file downloads",
+ "Takes effect on config server restart",
+ ZONE_ID);
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
diff --git a/http-utils/pom.xml b/http-utils/pom.xml
index e387e2c59e1..be62b7adb35 100644
--- a/http-utils/pom.xml
+++ b/http-utils/pom.xml
@@ -12,12 +12,6 @@
<packaging>jar</packaging>
<version>7-SNAPSHOT</version>
- <properties>
- <!-- vespa-http-client targets jdk8 and uses this library -->
- <!-- TODO remove once vespa-http-client no longer builds against jdk8 -->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<!-- provided -->
<dependency>
@@ -73,11 +67,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
<showDeprecation>true</showDeprecation>
<compilerArgs>
<arg>-Xlint:all</arg>
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
index a3bce4c687e..cfa0452ebf9 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngine.java
@@ -6,6 +6,7 @@ import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.container.image.Image;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import java.time.Duration;
@@ -40,8 +41,8 @@ public interface ContainerEngine {
/** Returns the network interface used by container in given context */
String networkInterface(NodeAgentContext context);
- /** Execute command inside container as root. Ignores non-zero exit code */
- CommandResult executeAsRoot(NodeAgentContext context, Duration timeout, String... command);
+ /** Execute command inside container as given user. Ignores non-zero exit code */
+ CommandResult execute(NodeAgentContext context, UnixUser user, Duration timeout, String... command);
/** Execute command inside the container's network namespace. Throws on non-zero exit code */
CommandResult executeInNetworkNamespace(NodeAgentContext context, String... command);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
index 3017773700a..8a66373c28b 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
@@ -7,6 +7,7 @@ import com.yahoo.vespa.hosted.node.admin.container.image.ContainerImageDownloade
import com.yahoo.vespa.hosted.node.admin.container.image.ContainerImagePruner;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandLine;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
@@ -66,13 +67,13 @@ public class ContainerOperations {
}
/** Executes a command inside container identified by given context. Does NOT throw on non-zero exit code */
- public CommandResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) {
- return executeCommandInContainerAsRoot(context, CommandLine.DEFAULT_TIMEOUT.toSeconds(), command);
+ public CommandResult executeCommandInContainer(NodeAgentContext context, UnixUser user, String... command) {
+ return executeCommandInContainer(context, user, CommandLine.DEFAULT_TIMEOUT, command);
}
/** Execute command inside container identified by given context. Does NOT throw on non-zero exit code */
- public CommandResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) {
- return containerEngine.executeAsRoot(context, Duration.ofSeconds(timeoutSeconds), command);
+ public CommandResult executeCommandInContainer(NodeAgentContext context, UnixUser user, Duration timeout, String... command) {
+ return containerEngine.execute(context, user, timeout, command);
}
/** Execute command in inside containers network namespace, identified by given context. Throws on non-zero exit code */
@@ -142,7 +143,7 @@ public class ContainerOperations {
private String executeNodeCtlInContainer(NodeAgentContext context, String program) {
String[] command = new String[] {context.paths().underVespaHome("bin/vespa-nodectl").pathInContainer(), program};
- return executeCommandInContainerAsRoot(context, command).getOutput();
+ return executeCommandInContainer(context, context.users().vespa(), command).getOutput();
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
index c5c8d0e121d..60435082745 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
@@ -29,38 +29,25 @@ public class CoreCollector {
private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("^Core was generated by `(?<path>.*?)'.$");
private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'");
private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'");
- static final String GDB_PATH_RHEL7_DT9 = "/opt/rh/devtoolset-9/root/bin/gdb";
- static final String GDB_PATH_RHEL7_DT10 = "/opt/rh/devtoolset-10/root/bin/gdb";
static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-10/root/bin/gdb";
static final Map<String, Object> JAVA_HEAP_DUMP_METADATA =
Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available"));
- private final ContainerOperations docker;
+ private final ContainerOperations container;
- public CoreCollector(ContainerOperations docker) {
- this.docker = docker;
+ public CoreCollector(ContainerOperations container) {
+ this.container = container;
}
String getGdbPath(NodeAgentContext context) {
- // TODO: Remove when we do not have any devtoolset-9 installs left
- String[] command_rhel7_dt9 = {"stat", GDB_PATH_RHEL7_DT9};
- if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt9).getExitCode() == 0) {
- return GDB_PATH_RHEL7_DT9;
- }
-
- String[] command_rhel7_dt10 = {"stat", GDB_PATH_RHEL7_DT10};
- if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt10).getExitCode() == 0) {
- return GDB_PATH_RHEL7_DT10;
- }
-
return GDB_PATH_RHEL8;
}
String readBinPathFallback(NodeAgentContext context, ContainerPath coredumpPath) {
String command = getGdbPath(context) + " -n -batch -core " + coredumpPath.pathInContainer() + " | grep \'^Core was generated by\'";
String[] wrappedCommand = {"/bin/sh", "-c", command};
- CommandResult result = docker.executeCommandInContainerAsRoot(context, wrappedCommand);
+ CommandResult result = container.executeCommandInContainer(context, context.users().root(), wrappedCommand);
Matcher matcher = CORE_GENERATOR_PATH_PATTERN.matcher(result.getOutput());
if (! matcher.find()) {
@@ -73,7 +60,7 @@ public class CoreCollector {
String readBinPath(NodeAgentContext context, ContainerPath coredumpPath) {
String[] command = {"file", coredumpPath.pathInContainer()};
try {
- CommandResult result = docker.executeCommandInContainerAsRoot(context, command);
+ CommandResult result = container.executeCommandInContainer(context, context.users().root(), command);
if (result.getExitCode() != 0) {
throw new ConvergenceException("file command failed with " + asString(result));
}
@@ -99,7 +86,7 @@ public class CoreCollector {
String threads = allThreads ? "thread apply all bt" : "bt";
String[] command = {getGdbPath(context), "-n", "-ex", threads, "-batch", binPath, coredumpPath.pathInContainer()};
- CommandResult result = docker.executeCommandInContainerAsRoot(context, command);
+ CommandResult result = container.executeCommandInContainer(context, context.users().root(), command);
if (result.getExitCode() != 0)
throw new ConvergenceException("Failed to read backtrace " + asString(result) + ", Command: " + Arrays.toString(command));
@@ -109,7 +96,7 @@ public class CoreCollector {
List<String> readJstack(NodeAgentContext context, ContainerPath coredumpPath, String binPath) {
String[] command = {"jhsdb", "jstack", "--exe", binPath, "--core", coredumpPath.pathInContainer()};
- CommandResult result = docker.executeCommandInContainerAsRoot(context, command);
+ CommandResult result = container.executeCommandInContainer(context, context.users().root(), command);
if (result.getExitCode() != 0)
throw new ConvergenceException("Failed to read jstack " + asString(result) + ", Command: " + Arrays.toString(command));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/AbstractProducer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/AbstractProducer.java
deleted file mode 100644
index a1416d3274c..00000000000
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/AbstractProducer.java
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.maintenance.servicedump;
-
-import com.yahoo.vespa.hosted.node.admin.container.ContainerOperations;
-import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
-import com.yahoo.vespa.hosted.node.admin.task.util.fs.ContainerPath;
-import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-import java.util.stream.Collectors;
-
-/**
- * @author bjorncs
- */
-abstract class AbstractProducer implements ArtifactProducer {
-
- private final Logger log = Logger.getLogger(getClass().getName());
-
- private final ContainerOperations container;
-
- protected AbstractProducer(ContainerOperations container) { this.container = container; }
-
- protected ContainerOperations container() { return container; }
-
- protected CommandResult executeCommand(NodeAgentContext ctx, List<String> command, boolean logOutput) throws IOException {
- CommandResult result = container.executeCommandInContainerAsRoot(ctx, command.toArray(new String[0]));
- String cmdString = command.stream().map(s -> "'" + s + "'").collect(Collectors.joining(" ", "\"", "\""));
- int exitCode = result.getExitCode();
- String output = result.getOutput().trim();
- String prefixedOutput = output.contains("\n")
- ? "\n" + output
- : (output.isEmpty() ? "<no output>" : output);
- if (exitCode > 0) {
- String errorMsg = logOutput
- ? String.format("Failed to execute %s (exited with code %d): %s", cmdString, exitCode, prefixedOutput)
- : String.format("Failed to execute %s (exited with code %d)", cmdString, exitCode);
- throw new IOException(errorMsg);
- } else {
- String logMsg = logOutput
- ? String.format("Executed command %s. Exited with code %d and output: %s", cmdString, exitCode, prefixedOutput)
- : String.format("Executed command %s. Exited with code %d.", cmdString, exitCode);
- ctx.log(log, logMsg);
- }
- return result;
- }
-
- protected int findVespaServicePid(NodeAgentContext ctx, String configId) throws IOException {
- ContainerPath findPidBinary = ctx.paths().underVespaHome("libexec/vespa/find-pid");
- CommandResult findPidResult = executeCommand(ctx, List.of(findPidBinary.pathInContainer(), configId), true);
- return Integer.parseInt(findPidResult.getOutput());
- }
-
- protected double duration(NodeAgentContext ctx, ServiceDumpReport.DumpOptions options, double defaultValue) {
- double duration = options != null && options.duration() != null && options.duration() > 0
- ? options.duration() : defaultValue;
- double maxDuration = 300;
- if (duration > maxDuration) {
- ctx.log(log, Level.WARNING,
- String.format("Specified duration %.3fs longer than max allowed (%.3fs)", duration, maxDuration));
- return maxDuration;
- }
- return duration;
- }
-
-}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
index 86dc1ed983d..b30b8e22fc5 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImpl.java
@@ -201,7 +201,7 @@ public class VespaServiceDumperImpl implements VespaServiceDumper {
@Override
public CommandResult executeCommandInNode(List<String> command, boolean logOutput) {
- CommandResult result = container.executeCommandInContainerAsRoot(nodeAgentCtx, command.toArray(new String[0]));
+ CommandResult result = container.executeCommandInContainer(nodeAgentCtx, nodeAgentCtx.users().vespa(), command.toArray(new String[0]));
String cmdString = command.stream().map(s -> "'" + s + "'").collect(Collectors.joining(" ", "\"", "\""));
int exitCode = result.getExitCode();
String output = result.getOutput().trim();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 92aacf8827b..f184deab375 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -438,19 +438,19 @@ public class NodeAgentImpl implements NodeAgent {
void doConverge(NodeAgentContext context) {
NodeSpec node = context.node();
Optional<Container> container = getContainer(context);
- if (!node.equals(lastNode)) {
- logChangesToNodeSpec(context, lastNode, node);
- // Current reboot generation uninitialized or incremented from outside to cancel reboot
- if (currentRebootGeneration < node.currentRebootGeneration())
- currentRebootGeneration = node.currentRebootGeneration();
+ // Current reboot generation uninitialized or incremented from outside to cancel reboot
+ if (currentRebootGeneration < node.currentRebootGeneration())
+ currentRebootGeneration = node.currentRebootGeneration();
- // Either we have changed allocation status (restart gen. only available to allocated nodes), or
- // restart generation has been incremented from outside to cancel restart
- if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() ||
- currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false))
- currentRestartGeneration = node.currentRestartGeneration();
+ // Either we have changed allocation status (restart gen. only available to allocated nodes), or
+ // restart generation has been incremented from outside to cancel restart
+ if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() ||
+ currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false))
+ currentRestartGeneration = node.currentRestartGeneration();
+ if (!node.equals(lastNode)) {
+ logChangesToNodeSpec(context, lastNode, node);
lastNode = node;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/TestProcessFactory.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/TestProcessFactory.java
index 6bd9deb76ed..063dc7f1324 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/TestProcessFactory.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/process/TestProcessFactory.java
@@ -93,8 +93,10 @@ public class TestProcessFactory implements ProcessFactory {
String actualCommandLineString = commandLine.toString();
if (!Objects.equals(actualCommandLineString, expectedCommandLineString)) {
muteVerifyAllCommandsExecuted = true;
- throw new IllegalArgumentException("Expected command #" + commandSequenceNumber + " to be '" +
- expectedCommandLineString + "' but got '" + actualCommandLineString + "'");
+ throw new IllegalArgumentException("Expected command #" + commandSequenceNumber + " to be: \n" +
+ " \"" + expectedCommandLineString + "\"\n" +
+ "but got:\n" +
+ " \"" + actualCommandLineString + "\"");
}
return toReturn;
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
index 3eab24a7a66..25cdff4b726 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerEngineMock.java
@@ -6,6 +6,7 @@ import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.container.image.Image;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import java.time.Duration;
@@ -109,7 +110,7 @@ public class ContainerEngineMock implements ContainerEngine {
}
@Override
- public CommandResult executeAsRoot(NodeAgentContext context, Duration timeout, String... command) {
+ public CommandResult execute(NodeAgentContext context, UnixUser user, Duration timeout, String... command) {
return new CommandResult(null, 0, "");
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
index ed45768aec8..8ab6bce2b8c 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
@@ -12,8 +12,6 @@ import org.junit.Test;
import java.util.List;
import java.util.Map;
-import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT10;
-import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT9;
import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL8;
import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.JAVA_HEAP_DUMP_METADATA;
import static org.junit.Assert.assertEquals;
@@ -63,10 +61,8 @@ public class CoreCollectorTest {
"execfn: '/usr/bin/program', platform: 'x86_64");
assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH));
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
-
String fallbackResponse = "/response/from/fallback";
- mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL7_DT9 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
+ mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `/response/from/fallback'.");
mockExec(cmd,
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style");
@@ -78,11 +74,8 @@ public class CoreCollectorTest {
@Test
public void extractsBinaryPathUsingGdbTest() {
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "The stat output");
-
final String[] cmd = new String[]{"/bin/sh", "-c",
- GDB_PATH_RHEL7_DT10 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
+ GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
mockExec(cmd, "Core was generated by `/usr/bin/program-from-gdb --identity foo/search/cluster.content_'.");
assertEquals("/usr/bin/program-from-gdb", coreCollector.readBinPathFallback(context, TEST_CORE_PATH));
@@ -93,34 +86,30 @@ public class CoreCollectorTest {
fail("Expected not to be able to get bin path");
} catch (RuntimeException e) {
assertEquals("Failed to extract binary path from GDB, result: exit status 1, output 'Error 123', command: " +
- "[/bin/sh, -c, /opt/rh/devtoolset-10/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
+ "[/bin/sh, -c, /opt/rh/gcc-toolset-10/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
}
}
@Test
public void extractsBacktraceUsingGdb() {
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
-
- mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false));
- mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
"", "Failure");
try {
coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false);
fail("Expected not to be able to read backtrace");
} catch (RuntimeException e) {
assertEquals("Failed to read backtrace exit status 1, output 'Failure', Command: " +
- "[" + GDB_PATH_RHEL7_DT9 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
+ "[" + GDB_PATH_RHEL8 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
}
}
@Test
public void extractsBacktraceFromAllThreadsUsingGdb() {
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
-
- mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "thread apply all bt", "-batch",
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch",
"/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, true));
@@ -131,8 +120,6 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch",
@@ -151,8 +138,7 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
- mockExec(new String[]{GDB_PATH_RHEL7_DT9 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
+ mockExec(new String[]{GDB_PATH_RHEL8 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
"", "Failure");
Map<String, Object> expectedData = Map.of("bin_path", TEST_BIN_PATH);
@@ -164,9 +150,6 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.pathInContainer()},
"dump.core.5954: ELF 64-bit LSB core file x86-64, version 1 (SYSV), too many program header sections (33172)");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
- mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
-
mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `" + JDK_PATH + " -Dconfig.id=default/container.11 -XX:+Pre'.");
@@ -194,7 +177,7 @@ public class CoreCollectorTest {
}
private void mockExec(NodeAgentContext context, String[] cmd, String output, String error) {
- when(docker.executeCommandInContainerAsRoot(context, cmd))
+ when(docker.executeCommandInContainer(context, context.users().root(), cmd))
.thenReturn(new CommandResult(null, error.isEmpty() ? 0 : 1, error.isEmpty() ? output : error));
}
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
index 68127231554..452efecefe1 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.maintenance.servicedump;
-import com.yahoo.yolean.concurrent.Sleeper;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
@@ -12,6 +11,7 @@ import com.yahoo.vespa.hosted.node.admin.maintenance.sync.SyncFileInfo;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextImpl;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.test.file.TestFileSystem;
+import com.yahoo.yolean.concurrent.Sleeper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
@@ -62,7 +62,7 @@ class VespaServiceDumperImplTest {
void invokes_perf_commands_when_generating_perf_report() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainerAsRoot(any(), any()))
+ when(operations.executeCommandInContainer(any(), any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
.thenReturn(new CommandResult(null, 0, ""));
@@ -78,13 +78,13 @@ class VespaServiceDumperImplTest {
.build();
reporter.processServiceDumpRequest(context);
- verify(operations).executeCommandInContainerAsRoot(
- context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
- verify(operations).executeCommandInContainerAsRoot(
- context, "perf", "record", "-g", "--output=/opt/vespa/tmp/vespa-service-dump/perf-record.bin",
+ verify(operations).executeCommandInContainer(
+ context, context.users().vespa(), "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
+ verify(operations).executeCommandInContainer(
+ context, context.users().vespa(), "perf", "record", "-g", "--output=/opt/vespa/tmp/vespa-service-dump/perf-record.bin",
"--pid=12345", "sleep", "45");
- verify(operations).executeCommandInContainerAsRoot(
- context, "bash", "-c", "perf report --input=/opt/vespa/tmp/vespa-service-dump/perf-record.bin" +
+ verify(operations).executeCommandInContainer(
+ context, context.users().vespa(), "bash", "-c", "perf report --input=/opt/vespa/tmp/vespa-service-dump/perf-record.bin" +
" > /opt/vespa/tmp/vespa-service-dump/perf-report.txt");
String expectedJson = "{\"createdMillis\":1600000000000,\"startedAt\":1600001000000,\"completedAt\":1600001000000," +
@@ -103,7 +103,7 @@ class VespaServiceDumperImplTest {
void invokes_jcmd_commands_when_creating_jfr_recording() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainerAsRoot(any(), any()))
+ when(operations.executeCommandInContainer(any(), any(), any()))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, "ok"))
.thenReturn(new CommandResult(null, 0, "name=host-admin success"));
@@ -120,12 +120,12 @@ class VespaServiceDumperImplTest {
.build();
reporter.processServiceDumpRequest(context);
- verify(operations).executeCommandInContainerAsRoot(
- context, "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
- verify(operations).executeCommandInContainerAsRoot(
- context, "jcmd", "12345", "JFR.start", "name=host-admin", "path-to-gc-roots=true", "settings=profile",
+ verify(operations).executeCommandInContainer(
+ context, context.users().vespa(), "/opt/vespa/libexec/vespa/find-pid", "default/container.1");
+ verify(operations).executeCommandInContainer(
+ context, context.users().vespa(), "jcmd", "12345", "JFR.start", "name=host-admin", "path-to-gc-roots=true", "settings=profile",
"filename=/opt/vespa/tmp/vespa-service-dump/recording.jfr", "duration=30s");
- verify(operations).executeCommandInContainerAsRoot(context, "jcmd", "12345", "JFR.check", "name=host-admin");
+ verify(operations).executeCommandInContainer(context, context.users().vespa(), "jcmd", "12345", "JFR.check", "name=host-admin");
String expectedJson = "{\"createdMillis\":1600000000000,\"startedAt\":1600001000000," +
"\"completedAt\":1600001000000," +
@@ -142,7 +142,7 @@ class VespaServiceDumperImplTest {
void handles_multiple_artifact_types() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainerAsRoot(any(), any()))
+ when(operations.executeCommandInContainer(any(), any(), any()))
// For perf report:
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
diff --git a/parent/pom.xml b/parent/pom.xml
index aea586f3949..c0ba936d5e0 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -922,6 +922,7 @@
<doclint>all</doclint>
<test.hide>true</test.hide>
+ <vespaClients.jdk.releaseVersion>8</vespaClients.jdk.releaseVersion>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt
index 36c7e386445..e08b951d3f3 100644
--- a/searchcore/CMakeLists.txt
+++ b/searchcore/CMakeLists.txt
@@ -102,6 +102,8 @@ vespa_define_module(
src/tests/proton/documentdb/threading_service_config
src/tests/proton/documentmetastore
src/tests/proton/documentmetastore/lidreusedelayer
+ src/tests/proton/documentmetastore/lid_allocator
+ src/tests/proton/documentmetastore/lid_state_vector
src/tests/proton/feed_and_search
src/tests/proton/feedoperation
src/tests/proton/feedtoken
diff --git a/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp b/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp
index 94300d4abac..8f10f4b8045 100644
--- a/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp
+++ b/searchcore/src/tests/proton/bucketdb/bucketdb/bucketdb_test.cpp
@@ -1,8 +1,11 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/document/base/documentid.h>
#include <vespa/searchcore/proton/bucketdb/bucket_db_explorer.h>
#include <vespa/searchcore/proton/bucketdb/bucketdb.h>
+#include <vespa/searchcore/proton/bucketdb/remove_batch_entry.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/stllike/asciistream.h>
+#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/log/log.h>
@@ -29,6 +32,24 @@ constexpr uint32_t DOCSIZE_2(10000u);
typedef BucketInfo::ReadyState RS;
typedef SubDbType SDT;
+namespace {
+
+constexpr uint32_t bucket_bits = 16;
+
+uint32_t num_buckets() { return (1u << bucket_bits); }
+
+BucketId make_bucket_id(uint32_t n) {
+ return BucketId(bucket_bits, n & (num_buckets() - 1));
+}
+
+GlobalId make_gid(uint32_t n, uint32_t i)
+{
+ DocumentId id(vespalib::make_string("id::test:n=%u:%u", n & (num_buckets() - 1), i));
+ return id.getGlobalId();
+}
+
+}
+
void
assertDocCount(uint32_t ready,
uint32_t notReady,
@@ -70,12 +91,20 @@ struct Fixture
Fixture()
: _db()
{}
+ void add(const GlobalId &gid, const Timestamp &timestamp, uint32_t docSize, SubDbType subDbType) {
+ BucketId bucket(bucket_bits, gid.convertToBucketId().getRawId());
+ _db.add(gid, bucket, timestamp, docSize, subDbType);
+ }
const BucketState &add(const Timestamp &timestamp, uint32_t docSize, SubDbType subDbType) {
return _db.add(GID_1, BUCKET_1, timestamp, docSize, subDbType);
}
const BucketState &add(const Timestamp &timestamp, SubDbType subDbType) {
return add(timestamp, DOCSIZE_1, subDbType);
}
+ void remove(const GlobalId& gid, const Timestamp &timestamp, uint32_t docSize, SubDbType subDbType) {
+ BucketId bucket(bucket_bits, gid.convertToBucketId().getRawId());
+ _db.remove(gid, bucket, timestamp, docSize, subDbType);
+ }
BucketState remove(const Timestamp &timestamp, uint32_t docSize, SubDbType subDbType) {
_db.remove(GID_1, BUCKET_1, timestamp, docSize, subDbType);
return get();
@@ -83,8 +112,14 @@ struct Fixture
BucketState remove(const Timestamp &timestamp, SubDbType subDbType) {
return remove(timestamp, DOCSIZE_1, subDbType);
}
+ void remove_batch(const std::vector<RemoveBatchEntry> &removed, SubDbType sub_db_type) {
+ _db.remove_batch(removed, sub_db_type);
+ }
+ BucketState get(BucketId bucket_id) const {
+ return _db.get(bucket_id);
+ }
BucketState get() const {
- return _db.get(BUCKET_1);
+ return get(BUCKET_1);
}
BucketChecksum getChecksum(const Timestamp &timestamp, uint32_t docSize, SubDbType subDbType) {
BucketDB db;
@@ -181,6 +216,36 @@ TEST_F("require that bucket checksum ignores document sizes", Fixture)
EXPECT_EQUAL(state1.getChecksum(), state2.getChecksum());
}
+TEST_F("require that remove batch works", Fixture)
+{
+ f.add(make_gid(4, 1), Timestamp(10), 100, SDT::READY);
+ f.add(make_gid(4, 2), Timestamp(11), 104, SDT::READY);
+ f.add(make_gid(4, 3), Timestamp(12), 102, SDT::READY);
+ f.add(make_gid(5, 4), Timestamp(13), 200, SDT::READY);
+ f.add(make_gid(5, 5), Timestamp(14), 270, SDT::READY);
+ f.add(make_gid(5, 6), Timestamp(15), 1000, SDT::READY);
+ auto state1 = f.get(make_bucket_id(4));
+ EXPECT_EQUAL(306u, state1.getReadyDocSizes());
+ EXPECT_EQUAL(3u, state1.getReadyCount());
+ auto state2 = f.get(make_bucket_id(5));
+ EXPECT_EQUAL(1470u, state2.getReadyDocSizes());
+ EXPECT_EQUAL(3u, state2.getReadyCount());
+ std::vector<RemoveBatchEntry> removed;
+ removed.emplace_back(make_gid(4, 1), make_bucket_id(4), Timestamp(10), 100);
+ removed.emplace_back(make_gid(4, 3), make_bucket_id(4), Timestamp(12), 102);
+ removed.emplace_back(make_gid(5, 5), make_bucket_id(5), Timestamp(14), 270);
+ f.remove_batch(removed, SDT::READY);
+ auto state3 = f.get(make_bucket_id(4));
+ EXPECT_EQUAL(104u, state3.getReadyDocSizes());
+ EXPECT_EQUAL(1u, state3.getReadyCount());
+ auto state4 = f.get(make_bucket_id(5));
+ EXPECT_EQUAL(1200u, state4.getReadyDocSizes());
+ EXPECT_EQUAL(2u, state4.getReadyCount());
+ f.remove(make_gid(4, 2), Timestamp(11), 104, SDT::READY);
+ f.remove(make_gid(5, 4), Timestamp(13), 200, SDT::READY);
+ f.remove(make_gid(5, 6), Timestamp(15), 1000, SDT::READY);
+}
+
TEST("require that bucket db can be explored")
{
BucketDBOwner db;
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
index 1808aed5d0b..d190d39ff80 100644
--- a/searchcore/src/tests/proton/docsummary/docsummary.cpp
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -65,6 +65,7 @@ using vespa::config::search::core::ProtonConfig;
using vespa::config::content::core::BucketspacesConfig;
using vespalib::eval::TensorSpec;
using vespalib::eval::SimpleValue;
+using vespalib::GateCallback;
using vespalib::Slime;
using namespace vespalib::slime;
@@ -253,8 +254,11 @@ public:
dms.commit(CommitParam(0u));
uint64_t serialNum = _ddb->getFeedHandler().inc_serial_num();
_aw->put(serialNum, doc, lid, std::shared_ptr<IDestructorCallback>());
- _aw->forceCommit(serialNum, std::shared_ptr<IDestructorCallback>());
- _ddb->getReadySubDB()->getAttributeManager()->getAttributeFieldWriter().sync_all();
+ {
+ vespalib::Gate gate;
+ _aw->forceCommit(serialNum, std::make_shared<GateCallback>(gate));
+ gate.await();
+ }
_sa->put(serialNum, lid, doc);
const GlobalId &gid = docId.getGlobalId();
BucketId bucketId(gid.convertToBucketId());
@@ -700,13 +704,18 @@ TEST("requireThatAttributesAreUsed")
search::AttributeVector *bjAttr = attributeManager->getWritableAttribute("bj");
auto bjTensorAttr = dynamic_cast<search::tensor::TensorAttribute *>(bjAttr);
- attributeFieldWriter.execute(attributeFieldWriter.getExecutorIdFromName(bjAttr->getNamePrefix()),
- [&]() {
- bjTensorAttr->setTensor(3, *make_tensor(TensorSpec("tensor(x{},y{})")
+ vespalib::Gate gate;
+ {
+ auto on_write_done = std::make_shared<GateCallback>(gate);
+ attributeFieldWriter.execute(attributeFieldWriter.getExecutorIdFromName(bjAttr->getNamePrefix()),
+ [&, on_write_done]() {
+ (void) on_write_done;
+ bjTensorAttr->setTensor(3, *make_tensor(TensorSpec("tensor(x{},y{})")
.add({{"x", "a"}, {"y", "b"}}, 4)));
- bjTensorAttr->commit();
+ bjTensorAttr->commit();
});
- attributeFieldWriter.sync_all();
+ }
+ gate.await();
DocsumReply::UP rep2 = dc._ddb->getDocsums(req);
TEST_DO(assertTensor(make_tensor(TensorSpec("tensor(x{},y{})")
diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
index e20c4268f88..b2acc8703f3 100644
--- a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
@@ -287,7 +287,7 @@ struct FastAccessFixture
vespalib::mkdir(BASE_DIR);
}
~FastAccessFixture() {
- _writeService.sync();
+ _writeService.sync_all_executors();
}
};
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
index 60d5f57ac8c..091292b3151 100644
--- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
@@ -305,7 +305,7 @@ struct FixtureBase
_bucketDBHandler(*_bucketDB),
_ctx(_writeService, _bucketDB, _bucketDBHandler),
_baseSchema(),
- _snapshot(new MyConfigSnapshot(_baseSchema, Traits::ConfigDir::dir())),
+ _snapshot(std::make_unique<MyConfigSnapshot>(_baseSchema, Traits::ConfigDir::dir())),
_baseDir(BASE_DIR + "/" + SUB_NAME, BASE_DIR),
_subDb(_cfg._cfg, _ctx._ctx),
_tmpFeedView()
@@ -313,9 +313,9 @@ struct FixtureBase
init();
}
~FixtureBase() {
- _writeService.sync();
+ _writeService.sync_all_executors();
_writeService.master().execute(makeLambdaTask([this]() { _subDb.close(); }));
- _writeService.sync();
+ _writeService.sync_all_executors();
}
template <typename FunctionType>
void runInMaster(FunctionType func) {
@@ -557,6 +557,60 @@ TEST_F("require that attribute manager can be reconfigured", SearchableFixture)
requireThatAttributeManagerCanBeReconfigured(f);
}
+TEST_F("require that subdb reflect retirement", FastAccessFixture)
+{
+ search::CompactionStrategy cfg(0.1, 0.3);
+
+ EXPECT_FALSE(f._subDb.isNodeRetired());
+ auto unretired_cfg = f._subDb.computeCompactionStrategy(cfg);
+ EXPECT_TRUE(cfg == unretired_cfg);
+
+ auto calc = std::make_shared<proton::test::BucketStateCalculator>();
+ calc->setNodeRetired(true);
+ f._subDb.setBucketStateCalculator(calc);
+ EXPECT_TRUE(f._subDb.isNodeRetired());
+ auto retired_cfg = f._subDb.computeCompactionStrategy(cfg);
+ EXPECT_TRUE(cfg != retired_cfg);
+ EXPECT_TRUE(search::CompactionStrategy(0.5, 0.5) == retired_cfg);
+
+ calc->setNodeRetired(false);
+ f._subDb.setBucketStateCalculator(calc);
+ EXPECT_FALSE(f._subDb.isNodeRetired());
+ unretired_cfg = f._subDb.computeCompactionStrategy(cfg);
+ EXPECT_TRUE(cfg == unretired_cfg);
+}
+
+TEST_F("require that attribute compaction config reflect retirement", FastAccessFixture) {
+ search::CompactionStrategy default_cfg(0.05, 0.2);
+ search::CompactionStrategy retired_cfg(0.5, 0.5);
+
+ auto guard = f._subDb.getAttributeManager()->getAttribute("attr1");
+ EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy());
+ EXPECT_EQUAL(default_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy());
+
+ auto calc = std::make_shared<proton::test::BucketStateCalculator>();
+ calc->setNodeRetired(true);
+ f._subDb.setBucketStateCalculator(calc);
+ f._writeService.sync_all_executors();
+ guard = f._subDb.getAttributeManager()->getAttribute("attr1");
+ EXPECT_EQUAL(retired_cfg, (*guard)->getConfig().getCompactionStrategy());
+ EXPECT_EQUAL(retired_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy());
+
+ f.basicReconfig(10);
+ f._writeService.sync_all_executors();
+ guard = f._subDb.getAttributeManager()->getAttribute("attr1");
+ EXPECT_EQUAL(retired_cfg, (*guard)->getConfig().getCompactionStrategy());
+ EXPECT_EQUAL(retired_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy());
+
+ calc->setNodeRetired(false);
+ f._subDb.setBucketStateCalculator(calc);
+ f._writeService.sync_all_executors();
+ guard = f._subDb.getAttributeManager()->getAttribute("attr1");
+ EXPECT_EQUAL(default_cfg, (*guard)->getConfig().getCompactionStrategy());
+ EXPECT_EQUAL(default_cfg, dynamic_cast<const proton::DocumentMetaStore &>(f._subDb.getDocumentMetaStoreContext().get()).getConfig().getCompactionStrategy());
+
+}
+
template <typename Fixture>
void
requireThatReconfiguredAttributesAreAccessibleViaFeedView(Fixture &f)
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
index 5384a985af0..977c899ab11 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -455,7 +455,7 @@ struct FeedHandlerFixture
}
~FeedHandlerFixture() {
- writeService.sync();
+ writeService.sync_all_executors();
}
template <class FunctionType>
inline void runAsMaster(FunctionType &&function) {
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index 97faa81b48a..e53468e0dd4 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -516,11 +516,11 @@ struct FixtureBase
}
void syncIndex() {
- _writeService.sync();
+ _writeService.sync_all_executors();
}
void sync() {
- _writeServiceReal.sync();
+ _writeServiceReal.sync_all_executors();
}
const test::DocumentMetaStoreObserver &metaStoreObserver() {
@@ -701,7 +701,7 @@ FixtureBase::FixtureBase()
}
FixtureBase::~FixtureBase() {
- _writeServiceReal.sync();
+ _writeServiceReal.sync_all_executors();
}
void
diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
index 5146a16272a..eb398b9ee48 100644
--- a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
@@ -246,7 +246,7 @@ struct FixtureBase {
void force_commit() {
runInMaster([this] () { static_cast<IFeedView&>(*feedview).forceCommit(serial_num); });
- writeService.sync();
+ writeService.sync_all_executors();
}
};
diff --git a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
index d201b02a61d..579633bfaa2 100644
--- a/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
+++ b/searchcore/src/tests/proton/documentmetastore/documentmetastore_test.cpp
@@ -378,123 +378,6 @@ TEST(DocumentMetaStore, generation_handling_is_working)
EXPECT_EQ(3u, gh.getCurrentGeneration());
}
-TEST(DocumentMetaStoreTest, basic_free_list_is_working)
-{
- GenerationHolder genHold;
- LidStateVector freeLids(100, 100, genHold, true, false);
- LidHoldList list;
- EXPECT_TRUE(freeLids.empty());
- EXPECT_EQ(0u, freeLids.count());
- EXPECT_EQ(0u, list.size());
-
- list.add(10, 10);
- EXPECT_TRUE(freeLids.empty());
- EXPECT_EQ(0u, freeLids.count());
- EXPECT_EQ(1u, list.size());
-
- list.add(20, 20);
- list.add(30, 30);
- EXPECT_TRUE(freeLids.empty());
- EXPECT_EQ(0u, freeLids.count());
- EXPECT_EQ(3u, list.size());
-
- list.trimHoldLists(20, freeLids);
- EXPECT_FALSE(freeLids.empty());
- EXPECT_EQ(1u, freeLids.count());
-
- EXPECT_EQ(10u, freeLids.getLowest());
- freeLids.clearBit(10);
- EXPECT_TRUE(freeLids.empty());
- EXPECT_EQ(0u, freeLids.count());
- EXPECT_EQ(2u, list.size());
-
- list.trimHoldLists(31, freeLids);
- EXPECT_FALSE(freeLids.empty());
- EXPECT_EQ(2u, freeLids.count());
-
- EXPECT_EQ(20u, freeLids.getLowest());
- freeLids.clearBit(20);
- EXPECT_FALSE(freeLids.empty());
- EXPECT_EQ(1u, freeLids.count());
- EXPECT_EQ(0u, list.size());
-
- EXPECT_EQ(30u, freeLids.getLowest());
- freeLids.clearBit(30);
- EXPECT_TRUE(freeLids.empty());
- EXPECT_EQ(0u, list.size());
- EXPECT_EQ(0u, freeLids.count());
-}
-
-void
-assertLidStateVector(const std::vector<uint32_t> &expLids, uint32_t lowest, uint32_t highest,
- const LidStateVector &actLids)
-{
- if (!expLids.empty()) {
- EXPECT_EQ(expLids.size(), actLids.count());
- uint32_t trueBit = 0;
- for (auto i : expLids) {
- EXPECT_TRUE(actLids.testBit(i));
- trueBit = actLids.getNextTrueBit(trueBit);
- EXPECT_EQ(i, trueBit);
- ++trueBit;
- }
- trueBit = actLids.getNextTrueBit(trueBit);
- EXPECT_EQ(actLids.size(), trueBit);
- EXPECT_EQ(lowest, actLids.getLowest());
- EXPECT_EQ(highest, actLids.getHighest());
- } else {
- EXPECT_TRUE(actLids.empty());
- }
-}
-
-TEST(DocumentMetaStoreTest, lid_state_vector_resizing_is_working)
-{
- GenerationHolder genHold;
- LidStateVector lids(1000, 1000, genHold, true, true);
- lids.setBit(3);
- lids.setBit(150);
- lids.setBit(270);
- lids.setBit(310);
- lids.setBit(440);
- lids.setBit(780);
- lids.setBit(930);
- assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
-
- lids.resizeVector(1500, 1500);
- assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
- lids.clearBit(3);
- assertLidStateVector({150,270,310,440,780,930}, 150, 930, lids);
- lids.clearBit(150);
- assertLidStateVector({270,310,440,780,930}, 270, 930, lids);
- lids.setBit(170);
- assertLidStateVector({170,270,310,440,780,930}, 170, 930, lids);
- lids.setBit(1490);
- assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
-
- lids.resizeVector(2000, 2000);
- assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
- lids.clearBit(170);
- assertLidStateVector({270,310,440,780,930,1490}, 270, 1490, lids);
- lids.clearBit(270);
- assertLidStateVector({310,440,780,930,1490}, 310, 1490, lids);
- lids.setBit(1990);
- assertLidStateVector({310,440,780,930,1490,1990}, 310, 1990, lids);
- lids.clearBit(310);
- assertLidStateVector({440,780,930,1490,1990}, 440, 1990, lids);
- lids.clearBit(440);
- assertLidStateVector({780,930,1490,1990}, 780, 1990, lids);
- lids.clearBit(780);
- assertLidStateVector({930,1490,1990}, 930, 1990, lids);
- lids.clearBit(930);
- assertLidStateVector({1490,1990}, 1490, 1990, lids);
- lids.clearBit(1490);
- assertLidStateVector({1990}, 1990, 1990, lids);
- lids.clearBit(1990);
- assertLidStateVector({}, 0, 0, lids);
-
- genHold.clearHoldLists();
-}
-
TEST(DocumentMetaStoreTest, lid_and_gid_space_is_reused)
{
auto dms = std::make_shared<DocumentMetaStore>(createBucketDB());
diff --git a/searchcore/src/tests/proton/documentmetastore/lid_allocator/CMakeLists.txt b/searchcore/src/tests/proton/documentmetastore/lid_allocator/CMakeLists.txt
new file mode 100644
index 00000000000..8fc5c23239f
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lid_allocator/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_lid_allocator_test_app TEST
+ SOURCES
+ lid_allocator_test.cpp
+ DEPENDS
+ searchcore_documentmetastore
+ GTest::GTest
+)
+vespa_add_test(NAME searchcore_lid_allocator_test_app COMMAND searchcore_lid_allocator_test_app)
+
diff --git a/searchcore/src/tests/proton/documentmetastore/lid_allocator/lid_allocator_test.cpp b/searchcore/src/tests/proton/documentmetastore/lid_allocator/lid_allocator_test.cpp
new file mode 100644
index 00000000000..b0f1220c768
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lid_allocator/lid_allocator_test.cpp
@@ -0,0 +1,162 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchcore/proton/documentmetastore/lid_allocator.h>
+#include <vespa/vespalib/util/generationholder.h>
+#include <vespa/vespalib/util/time.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <iostream>
+
+using vespalib::GenerationHolder;
+using vespalib::Timer;
+
+namespace proton {
+
+using documentmetastore::LidAllocator;
+
+class LidAllocatorTest : public ::testing::Test
+{
+protected:
+ GenerationHolder _gen_hold;
+ LidAllocator _allocator;
+
+ LidAllocatorTest()
+ : ::testing::Test(),
+ _gen_hold(),
+ _allocator(100, 100, _gen_hold)
+ {
+ }
+
+ ~LidAllocatorTest()
+ {
+ _gen_hold.clearHoldLists();
+ }
+
+ uint32_t get_size() { return _allocator.getActiveLids().size(); }
+
+ void construct_free_list() {
+ _allocator.constructFreeList(_allocator.getActiveLids().size());
+ _allocator.setFreeListConstructed();
+ }
+
+ void register_lids(const std::vector<uint32_t>& lids) {
+ for (uint32_t lid : lids) {
+ _allocator.registerLid(lid);
+ }
+ }
+
+ std::vector<uint32_t> alloc_lids(uint32_t count) {
+ std::vector<uint32_t> result;
+ for (uint32_t i = 0; i < count; ++i) {
+ result.emplace_back(_allocator.getFreeLid(get_size()));
+ }
+ return result;
+ }
+
+ void activate_lids(const std::vector<uint32_t>& lids, bool active) {
+ for (uint32_t lid : lids) {
+ _allocator.updateActiveLids(lid, active);
+ }
+ }
+
+ void unregister_lids(const std::vector<uint32_t>& lids) {
+ _allocator.unregister_lids(lids);
+ }
+
+ void hold_lids(const std::vector<uint32_t>& lids) {
+ _allocator.holdLids(lids, get_size(), 0);
+ }
+
+ void trim_hold_lists() {
+ _allocator.trimHoldLists(1);
+ }
+
+ std::vector<uint32_t> get_valid_lids() {
+ std::vector<uint32_t> result;
+ auto size = get_size();
+ for (uint32_t lid = 1; lid < size; ++lid) {
+ if (_allocator.validLid(lid)) {
+ result.emplace_back(lid);
+ }
+ }
+ return result;
+ }
+
+ std::vector<uint32_t> get_active_lids() {
+ std::vector<uint32_t> result;
+ auto active_lids = _allocator.getActiveLids();
+ uint32_t lid = active_lids.getNextTrueBit(1);
+ while (lid < active_lids.size()) {
+ if (active_lids.testBit(lid)) {
+ result.emplace_back(lid);
+ }
+ lid = active_lids.getNextTrueBit(lid + 1);
+ }
+ return result;
+ }
+
+ void
+ assert_valid_lids(const std::vector<uint32_t>& exp_lids) {
+ EXPECT_EQ(exp_lids, get_valid_lids());
+ }
+
+ void
+ assert_active_lids(const std::vector<uint32_t>& exp_lids) {
+ EXPECT_EQ(exp_lids, get_active_lids());
+ }
+
+};
+
+TEST_F(LidAllocatorTest, unregister_lids)
+{
+ register_lids({ 1, 2, 3, 4, 5, 6 });
+ activate_lids({ 4, 5, 6 }, true);
+ assert_valid_lids({1, 2, 3, 4, 5, 6});
+ assert_active_lids({4, 5, 6});
+ construct_free_list();
+ unregister_lids({1, 3, 5});
+ assert_valid_lids({2, 4, 6});
+ assert_active_lids({4, 6});
+ hold_lids({1, 3, 5});
+ trim_hold_lists();
+ EXPECT_EQ((std::vector<uint32_t>{1, 3, 5, 7, 8}), alloc_lids(5));
+}
+
+class LidAllocatorPerformanceTest : public LidAllocatorTest,
+ public testing::WithParamInterface<bool>
+{
+};
+
+TEST_P(LidAllocatorPerformanceTest, unregister_lids_performance)
+{
+ constexpr uint32_t test_size = 1000000;
+ _allocator.ensureSpace(test_size + 1, test_size + 1);
+ std::vector<std::vector<uint32_t>> buckets;
+ buckets.resize(1000);
+ auto reserve_size = (test_size + (buckets.size() - 1)) / buckets.size();
+for (auto& bucket : buckets) {
+ bucket.reserve(reserve_size);
+}
+ for (uint32_t i = 0; i < test_size; ++i) {
+ _allocator.registerLid(i + 1);
+ buckets[i % buckets.size()].emplace_back(i + 1);
+ }
+ construct_free_list();
+ Timer timer;
+ for (auto& bucket: buckets) {
+ if (GetParam()) {
+ unregister_lids(bucket);
+ } else {
+ for (auto lid : bucket) {
+ _allocator.unregisterLid(lid);
+ }
+ }
+ }
+ auto rate = test_size / vespalib::to_s(timer.elapsed());
+ std::cout << "Unregister rate: " << std::fixed << rate << std::endl;
+}
+
+VESPA_GTEST_INSTANTIATE_TEST_SUITE_P(LidAllocatorParameterizedPerformanceTest, LidAllocatorPerformanceTest, testing::Values(false, true));
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchcore/src/tests/proton/documentmetastore/lid_state_vector/CMakeLists.txt b/searchcore/src/tests/proton/documentmetastore/lid_state_vector/CMakeLists.txt
new file mode 100644
index 00000000000..652e83a6b79
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lid_state_vector/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_lid_state_vector_test_app TEST
+ SOURCES
+ lid_state_vector_test.cpp
+ DEPENDS
+ searchcore_documentmetastore
+ GTest::GTest
+)
+vespa_add_test(NAME searchcore_lid_state_vector_test_app COMMAND searchcore_lid_state_vector_test_app)
+
diff --git a/searchcore/src/tests/proton/documentmetastore/lid_state_vector/lid_state_vector_test.cpp b/searchcore/src/tests/proton/documentmetastore/lid_state_vector/lid_state_vector_test.cpp
new file mode 100644
index 00000000000..ab45cca0971
--- /dev/null
+++ b/searchcore/src/tests/proton/documentmetastore/lid_state_vector/lid_state_vector_test.cpp
@@ -0,0 +1,173 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchcore/proton/documentmetastore/lidstatevector.h>
+#include <vespa/searchcore/proton/documentmetastore/lid_hold_list.h>
+#include <vespa/vespalib/util/generationholder.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using vespalib::GenerationHolder;
+
+namespace proton {
+
+class LidStateVectorTest : public ::testing::Test
+{
+protected:
+ GenerationHolder _gen_hold;
+
+ LidStateVectorTest()
+ : ::testing::Test(),
+ _gen_hold()
+ {
+ }
+
+ ~LidStateVectorTest()
+ {
+ _gen_hold.clearHoldLists();
+ }
+
+};
+
+
+TEST_F(LidStateVectorTest, basic_free_list_is_working)
+{
+ LidStateVector freeLids(100, 100, _gen_hold, true, false);
+ LidHoldList list;
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQ(0u, freeLids.count());
+ EXPECT_EQ(0u, list.size());
+
+ list.add(10, 10);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQ(0u, freeLids.count());
+ EXPECT_EQ(1u, list.size());
+
+ list.add(20, 20);
+ list.add(30, 30);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQ(0u, freeLids.count());
+ EXPECT_EQ(3u, list.size());
+
+ list.trimHoldLists(20, freeLids);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQ(1u, freeLids.count());
+
+ EXPECT_EQ(10u, freeLids.getLowest());
+ freeLids.clearBit(10);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQ(0u, freeLids.count());
+ EXPECT_EQ(2u, list.size());
+
+ list.trimHoldLists(31, freeLids);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQ(2u, freeLids.count());
+
+ EXPECT_EQ(20u, freeLids.getLowest());
+ freeLids.clearBit(20);
+ EXPECT_FALSE(freeLids.empty());
+ EXPECT_EQ(1u, freeLids.count());
+ EXPECT_EQ(0u, list.size());
+
+ EXPECT_EQ(30u, freeLids.getLowest());
+ freeLids.clearBit(30);
+ EXPECT_TRUE(freeLids.empty());
+ EXPECT_EQ(0u, list.size());
+ EXPECT_EQ(0u, freeLids.count());
+}
+
+void
+assertLidStateVector(const std::vector<uint32_t> &expLids, uint32_t lowest, uint32_t highest,
+ const LidStateVector &actLids)
+{
+ if (!expLids.empty()) {
+ EXPECT_EQ(expLids.size(), actLids.count());
+ uint32_t trueBit = 0;
+ for (auto i : expLids) {
+ EXPECT_TRUE(actLids.testBit(i));
+ trueBit = actLids.getNextTrueBit(trueBit);
+ EXPECT_EQ(i, trueBit);
+ ++trueBit;
+ }
+ trueBit = actLids.getNextTrueBit(trueBit);
+ EXPECT_EQ(actLids.size(), trueBit);
+ } else {
+ EXPECT_TRUE(actLids.empty());
+ }
+ EXPECT_EQ(lowest, actLids.getLowest());
+ EXPECT_EQ(highest, actLids.getHighest());
+}
+
+TEST_F(LidStateVectorTest, lid_state_vector_resizing_is_working)
+{
+ LidStateVector lids(1000, 1000, _gen_hold, true, true);
+ lids.setBit(3);
+ lids.setBit(150);
+ lids.setBit(270);
+ lids.setBit(310);
+ lids.setBit(440);
+ lids.setBit(780);
+ lids.setBit(930);
+ assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
+
+ lids.resizeVector(1500, 1500);
+ assertLidStateVector({3,150,270,310,440,780,930}, 3, 930, lids);
+ lids.clearBit(3);
+ assertLidStateVector({150,270,310,440,780,930}, 150, 930, lids);
+ lids.clearBit(150);
+ assertLidStateVector({270,310,440,780,930}, 270, 930, lids);
+ lids.setBit(170);
+ assertLidStateVector({170,270,310,440,780,930}, 170, 930, lids);
+ lids.setBit(1490);
+ assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
+
+ lids.resizeVector(2000, 2000);
+ assertLidStateVector({170,270,310,440,780,930,1490}, 170, 1490, lids);
+ lids.clearBit(170);
+ assertLidStateVector({270,310,440,780,930,1490}, 270, 1490, lids);
+ lids.clearBit(270);
+ assertLidStateVector({310,440,780,930,1490}, 310, 1490, lids);
+ lids.setBit(1990);
+ assertLidStateVector({310,440,780,930,1490,1990}, 310, 1990, lids);
+ lids.clearBit(310);
+ assertLidStateVector({440,780,930,1490,1990}, 440, 1990, lids);
+ lids.clearBit(440);
+ assertLidStateVector({780,930,1490,1990}, 780, 1990, lids);
+ lids.clearBit(780);
+ assertLidStateVector({930,1490,1990}, 930, 1990, lids);
+ lids.clearBit(930);
+ assertLidStateVector({1490,1990}, 1490, 1990, lids);
+ lids.clearBit(1490);
+ assertLidStateVector({1990}, 1990, 1990, lids);
+ lids.clearBit(1990);
+ assertLidStateVector({}, 2000, 0, lids);
+}
+
+TEST_F(LidStateVectorTest, set_bits)
+{
+ LidStateVector lids(1000, 1000, _gen_hold, true, true);
+ EXPECT_EQ(100, lids.assert_not_set_bits({ 10, 40, 100 }));
+ assertLidStateVector({}, 1000, 0, lids);
+ EXPECT_EQ(100, lids.set_bits({ 10, 40, 100 }));
+ assertLidStateVector({ 10, 40, 100 }, 10, 100, lids);
+}
+
+TEST_F(LidStateVectorTest, clear_bits)
+{
+ LidStateVector lids(1000, 1000, _gen_hold, true, true);
+ lids.set_bits({ 10, 40, 100 });
+ lids.clear_bits({ 10, 100 });
+ assertLidStateVector({ 40 }, 40, 40, lids);
+}
+
+TEST_F(LidStateVectorTest, consider_clear_bits)
+{
+ LidStateVector lids(1000, 1000, _gen_hold, true, true);
+ lids.set_bits({ 40 });
+ lids.consider_clear_bits({ 10, 100 });
+ assertLidStateVector({ 40 }, 40, 40, lids);
+ lids.consider_clear_bits({ 10, 40, 100 });
+ assertLidStateVector({}, 1000, 0, lids);
+}
+
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
index c49ec67f220..9162972a4cb 100644
--- a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
@@ -185,7 +185,7 @@ public:
runInMaster([&] () { cycleLids(_lidReuseDelayer->getReuseLids()); });
}
- void sync() { _writeService.sync(); }
+ void sync() { _writeService.sync_all_executors(); }
};
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
index 4c442d38443..d34e2ae667e 100644
--- a/searchcore/src/tests/proton/index/indexmanager_test.cpp
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -128,7 +128,7 @@ struct IndexManagerTest : public ::testing::Test {
{
removeTestData();
vespalib::mkdir(index_dir, false);
- _writeService.sync();
+ _writeService.sync_all_executors();
resetIndexManager();
}
diff --git a/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp b/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp
index 139288f6b6f..1fadd3993ff 100644
--- a/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp
+++ b/searchcore/src/tests/proton/matching/partial_result/partial_result_test.cpp
@@ -23,7 +23,7 @@ void checkMerge(const std::vector<double> &a, const std::vector<double> &b,
EXPECT_EQUAL(a.size() + b.size(), res_a.totalHits());
ASSERT_EQUAL(expect.size(), res_a.size());
for (size_t i = 0; i < expect.size(); ++i) {
- EXPECT_EQUAL(expect[i], res_a.hit(i)._rankValue);
+ EXPECT_EQUAL(expect[i], res_a.hit(i).getRank());
}
}
@@ -70,10 +70,10 @@ TEST("require that partial results can be created without sort data") {
res.totalHits(1000);
EXPECT_EQUAL(1000u, res.totalHits());
ASSERT_EQUAL(2u, res.size());
- EXPECT_EQUAL(1u, res.hit(0)._docId);
- EXPECT_EQUAL(10.0, res.hit(0)._rankValue);
- EXPECT_EQUAL(2u, res.hit(1)._docId);
- EXPECT_EQUAL(5.0, res.hit(1)._rankValue);
+ EXPECT_EQUAL(1u, res.hit(0).getDocId());
+ EXPECT_EQUAL(10.0, res.hit(0).getRank());
+ EXPECT_EQUAL(2u, res.hit(1).getDocId());
+ EXPECT_EQUAL(5.0, res.hit(1).getRank());
}
TEST("require that partial results can be created with sort data") {
@@ -90,12 +90,12 @@ TEST("require that partial results can be created with sort data") {
res.totalHits(1000);
EXPECT_EQUAL(1000u, res.totalHits());
ASSERT_EQUAL(2u, res.size());
- EXPECT_EQUAL(1u, res.hit(0)._docId);
- EXPECT_EQUAL(10.0, res.hit(0)._rankValue);
+ EXPECT_EQUAL(1u, res.hit(0).getDocId());
+ EXPECT_EQUAL(10.0, res.hit(0).getRank());
EXPECT_EQUAL(str1.data(), res.sortData(0).first);
EXPECT_EQUAL(str1.size(), res.sortData(0).second);
- EXPECT_EQUAL(2u, res.hit(1)._docId);
- EXPECT_EQUAL(5.0, res.hit(1)._rankValue);
+ EXPECT_EQUAL(2u, res.hit(1).getDocId());
+ EXPECT_EQUAL(5.0, res.hit(1).getRank());
EXPECT_EQUAL(str2.data(), res.sortData(1).first);
EXPECT_EQUAL(str2.size(), res.sortData(1).second);
}
@@ -133,10 +133,10 @@ TEST("require that lower docid is preferred when sorting on rank") {
res_c.add(search::RankedHit(1, 1.0));
res_a.merge(res_b);
ASSERT_EQUAL(1u, res_a.size());
- EXPECT_EQUAL(2u, res_a.hit(0)._docId);
+ EXPECT_EQUAL(2u, res_a.hit(0).getDocId());
res_a.merge(res_c);
ASSERT_EQUAL(1u, res_a.size());
- EXPECT_EQUAL(1u, res_a.hit(0)._docId);
+ EXPECT_EQUAL(1u, res_a.hit(0).getDocId());
}
TEST("require that lower docid is preferred when using sortspec") {
@@ -149,10 +149,10 @@ TEST("require that lower docid is preferred when using sortspec") {
res_c.add(search::RankedHit(1, 1.0), PartialResult::SortRef(foo.data(), foo.size()));
res_a.merge(res_b);
ASSERT_EQUAL(1u, res_a.size());
- EXPECT_EQUAL(2u, res_a.hit(0)._docId);
+ EXPECT_EQUAL(2u, res_a.hit(0).getDocId());
res_a.merge(res_c);
ASSERT_EQUAL(1u, res_a.size());
- EXPECT_EQUAL(1u, res_a.hit(0)._docId);
+ EXPECT_EQUAL(1u, res_a.hit(0).getDocId());
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
index 4b6e67212e1..032be9e1dc8 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.cpp
@@ -575,6 +575,21 @@ AttributeManager::asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor>
{
for (const auto &attr : _attributes) {
if (attr.second.isExtra()) {
+ // We must skip extra attributes as they must be handled in other threads. (DocumentMetaStore)
+ continue;
+ }
+ AttributeVector::SP attrsp = attr.second.getAttribute();
+ _attributeFieldWriter.execute(_attributeFieldWriter.getExecutorIdFromName(attrsp->getNamePrefix()),
+ [attrsp, func]() { (*func)(*attrsp); });
+ }
+}
+
+void
+AttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const
+{
+ for (const auto &attr : _attributes) {
+ if (attr.second.isExtra()) {
+ // We must skip extra attributes as they must be handled in other threads.(DocumentMetaStore)
continue;
}
AttributeVector::SP attrsp = attr.second.getAttribute();
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h
index 64f0418c299..e2b9550435d 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attributemanager.h
@@ -178,6 +178,7 @@ public:
const std::vector<search::AttributeVector *> &getWritableAttributes() const override;
void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const override;
+ void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const override;
void asyncForAttribute(const vespalib::string &name, std::unique_ptr<IAttributeFunctor> func) const override;
ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const override;
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp
index 07bc1c638b5..c7ab83ae590 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.cpp
@@ -206,6 +206,22 @@ FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IConstAttributeFun
}
void
+FilterAttributeManager::asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const
+{
+ // Run by document db master thread
+ std::vector<AttributeGuard> completeList;
+ _mgr->getAttributeList(completeList);
+ vespalib::ISequencedTaskExecutor &attributeFieldWriter = getAttributeFieldWriter();
+ for (auto &guard : completeList) {
+ search::AttributeVector::SP attrsp = guard.getSP();
+ // Name must be extracted in document db master thread or attribute
+ // writer thread
+ attributeFieldWriter.execute(attributeFieldWriter.getExecutorIdFromName(attrsp->getNamePrefix()),
+ [attrsp, func]() { (*func)(*attrsp); });
+ }
+}
+
+void
FilterAttributeManager::asyncForAttribute(const vespalib::string &name, std::unique_ptr<IAttributeFunctor> func) const {
AttributeGuard::UP attr = _mgr->getAttribute(name);
if (!attr) { return; }
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h
index 9d09aef8faf..1ae5f452218 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h
+++ b/searchcore/src/vespa/searchcore/proton/attribute/filter_attribute_manager.h
@@ -52,6 +52,7 @@ public:
search::AttributeVector * getWritableAttribute(const vespalib::string &name) const override;
const std::vector<search::AttributeVector *> & getWritableAttributes() const override;
void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const override;
+ void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const override;
ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const override;
void setImportedAttributes(std::unique_ptr<ImportedAttributesRepo> attributes) override;
const ImportedAttributesRepo *getImportedAttributes() const override;
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h
index 65796fd4c74..d55cd45d014 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h
+++ b/searchcore/src/vespa/searchcore/proton/attribute/i_attribute_manager.h
@@ -98,6 +98,7 @@ struct IAttributeManager : public search::IAttributeManager
virtual const std::vector<search::AttributeVector *> &getWritableAttributes() const = 0;
virtual void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor> func) const = 0;
+ virtual void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor> func) const = 0;
virtual ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &name) const = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.cpp b/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.cpp
index 998da6b5789..c7a3103af22 100644
--- a/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.cpp
@@ -1,14 +1,18 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketdb.h"
+#include "remove_batch_entry.h"
#include <cassert>
#include <algorithm>
+#include <optional>
using document::GlobalId;
using storage::spi::BucketChecksum;
namespace proton {
+using bucketdb::RemoveBatchEntry;
+
BucketDB::BucketDB()
: _map(),
_cachedBucketId(),
@@ -65,6 +69,20 @@ BucketDB::remove(const GlobalId &gid,
}
void
+BucketDB::remove_batch(const std::vector<RemoveBatchEntry> &removed, SubDbType sub_db_type)
+{
+ std::optional<BucketId> prev_bucket_id;
+ BucketState* state = nullptr;
+ for (auto &entry : removed) {
+ if (!prev_bucket_id.has_value() || prev_bucket_id.value() != entry.get_bucket_id()) {
+ state = &_map[entry.get_bucket_id()];
+ prev_bucket_id = entry.get_bucket_id();
+ }
+ state->remove(entry.get_gid(), entry.get_timestamp(), entry.get_doc_size(), sub_db_type);
+ }
+}
+
+void
BucketDB::modify(const GlobalId &gid,
const BucketId &oldBucketId, const Timestamp &oldTimestamp, uint32_t oldDocSize,
const BucketId &newBucketId, const Timestamp &newTimestamp, uint32_t newDocSize,
diff --git a/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.h b/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.h
index 1723609e48e..2ea7594bde1 100644
--- a/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.h
+++ b/searchcore/src/vespa/searchcore/proton/bucketdb/bucketdb.h
@@ -7,6 +7,8 @@
#include <vespa/persistence/spi/result.h>
#include <map>
+namespace proton::bucketdb { class RemoveBatchEntry; }
+
namespace proton {
class BucketDB
@@ -42,6 +44,8 @@ public:
const BucketId &bucketId, const Timestamp &timestamp, uint32_t docSize,
SubDbType subDbType);
+ void remove_batch(const std::vector<bucketdb::RemoveBatchEntry> &removed, SubDbType sub_db_type);
+
void modify(const GlobalId &gid,
const BucketId &oldBucketId, const Timestamp &oldTimestamp, uint32_t oldDocSize,
const BucketId &newBucketId, const Timestamp &newTimestamp, uint32_t newDocSize,
diff --git a/searchcore/src/vespa/searchcore/proton/bucketdb/remove_batch_entry.h b/searchcore/src/vespa/searchcore/proton/bucketdb/remove_batch_entry.h
new file mode 100644
index 00000000000..1ab1adb1add
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/bucketdb/remove_batch_entry.h
@@ -0,0 +1,36 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/document/base/globalid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <persistence/spi/types.h>
+
+
+namespace proton::bucketdb {
+
+/*
+ * Class containing meta data for a single document being removed from
+ * bucket db.
+ */
+class RemoveBatchEntry {
+ document::GlobalId _gid;
+ document::BucketId _bucket_id;
+ storage::spi::Timestamp _timestamp;
+ uint32_t _doc_size;
+public:
+ RemoveBatchEntry(const document::GlobalId& gid, const document::BucketId& bucket_id, const storage::spi::Timestamp& timestamp, uint32_t doc_size) noexcept
+ : _gid(gid),
+ _bucket_id(bucket_id),
+ _timestamp(timestamp),
+ _doc_size(doc_size)
+ {
+ }
+
+ const document::GlobalId& get_gid() const noexcept { return _gid; }
+ const document::BucketId& get_bucket_id() const noexcept { return _bucket_id; }
+ const storage::spi::Timestamp& get_timestamp() const noexcept { return _timestamp; }
+ uint32_t get_doc_size() const noexcept { return _doc_size; }
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
index 3d7e1c1c774..13d4a39c8b1 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp
@@ -9,6 +9,7 @@
#include <vespa/persistence/spi/bucket_limits.h>
#include <vespa/searchcore/proton/bucketdb/bucketsessionbase.h>
#include <vespa/searchcore/proton/bucketdb/joinbucketssession.h>
+#include <vespa/searchcore/proton/bucketdb/remove_batch_entry.h>
#include <vespa/searchcore/proton/bucketdb/splitbucketsession.h>
#include <vespa/searchlib/attribute/load_utils.h>
#include <vespa/searchlib/attribute/readerbase.h>
@@ -30,6 +31,7 @@ LOG_SETUP(".proton.documentmetastore");
using document::BucketId;
using document::GlobalId;
using proton::bucketdb::BucketState;
+using proton::bucketdb::RemoveBatchEntry;
using proton::documentmetastore::GidToLidMapKey;
using search::AttributeVector;
using search::FileReader;
@@ -681,13 +683,15 @@ DocumentMetaStore::removeBatch(const std::vector<DocId> &lidsToRemove, const uin
remove_batch_internal_btree(removed);
_lidAlloc.unregister_lids(lidsToRemove);
{
- bucketdb::Guard bucketGuard = _bucketDB->takeGuard();
- // TODO: add remove_batch() method to BucketDB
+ std::vector<RemoveBatchEntry> bdb_removed;
+ bdb_removed.reserve(removed.size());
for (const auto& lid_and_meta : removed) {
auto& meta = lid_and_meta.second;
- bucketGuard->remove(meta.getGid(), meta.getBucketId().stripUnused(),
- meta.getTimestamp(), meta.getDocSize(), _subDbType);
+ bdb_removed.emplace_back(meta.getGid(), meta.getBucketId().stripUnused(),
+ meta.getTimestamp(), meta.getDocSize());
}
+ bucketdb::Guard bucketGuard = _bucketDB->takeGuard();
+ bucketGuard->remove_batch(bdb_removed, _subDbType);
}
incGeneration();
if (_op_listener) {
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
index fc5ecfee48b..467d12581c2 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/lid_allocator.cpp
@@ -82,9 +82,15 @@ LidAllocator::unregisterLid(DocId lid)
void
LidAllocator::unregister_lids(const std::vector<DocId>& lids)
{
- for (auto lid : lids) {
- unregisterLid(lid);
+ if (lids.empty()) {
+ return;
}
+ auto high = isFreeListConstructed() ? _pendingHoldLids.set_bits(lids) : _pendingHoldLids.assert_not_set_bits(lids);
+ assert(high < _usedLids.size());
+ _usedLids.clear_bits(lids);
+ assert(high < _activeLids.size());
+ _activeLids.consider_clear_bits(lids);
+ _numActiveLids = _activeLids.count();
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.cpp
index 49e8d3eb23a..7309f7a518c 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.cpp
@@ -90,6 +90,50 @@ LidStateVector::setBit(unsigned int idx)
_bv.setBitAndMaintainCount(idx);
}
+template <bool do_set>
+uint32_t
+LidStateVector::assert_is_not_set_then_set_bits_helper(const std::vector<uint32_t>& idxs)
+{
+ uint32_t size = _bv.size();
+ uint32_t high = 0;
+ uint32_t low = size;
+ for (auto idx : idxs) {
+ assert(idx < size);
+ if (idx > high) {
+ high = idx;
+ }
+ assert(!_bv.testBit(idx));
+ if (do_set) {
+ if (idx < low) {
+ low = idx;
+ }
+ _bv.setBitAndMaintainCount(idx);
+ }
+ }
+ if (do_set) {
+ if (_trackLowest && low < _lowest) {
+ _lowest = low;
+ }
+ if (_trackHighest && high > _highest) {
+ _highest = high;
+ }
+ }
+ return high;
+}
+
+uint32_t
+LidStateVector::assert_not_set_bits(const std::vector<uint32_t>& idxs)
+{
+ return assert_is_not_set_then_set_bits_helper<false>(idxs);
+}
+
+uint32_t
+LidStateVector::set_bits(const std::vector<uint32_t>& idxs)
+{
+ return assert_is_not_set_then_set_bits_helper<true>(idxs);
+}
+
+
void
LidStateVector::clearBit(unsigned int idx)
{
@@ -100,4 +144,30 @@ LidStateVector::clearBit(unsigned int idx)
maybeUpdateHighest();
}
+template <bool do_assert>
+void
+LidStateVector::assert_is_set_then_clear_bits_helper(const std::vector<uint32_t>& idxs)
+{
+ for (auto idx : idxs) {
+ if (do_assert) {
+ assert(_bv.testBit(idx));
+ }
+ _bv.clearBitAndMaintainCount(idx);
+ }
+ maybeUpdateLowest();
+ maybeUpdateHighest();
+}
+
+void
+LidStateVector::consider_clear_bits(const std::vector<uint32_t>& idxs)
+{
+ assert_is_set_then_clear_bits_helper<false>(idxs);
+}
+
+void
+LidStateVector::clear_bits(const std::vector<uint32_t>& idxs)
+{
+ assert_is_set_then_clear_bits_helper<true>(idxs);
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.h b/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.h
index be47676716b..74851635124 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.h
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/lidstatevector.h
@@ -25,6 +25,10 @@ class LidStateVector
if (_trackHighest && _highest != 0 && !_bv.testBit(_highest))
updateHighest();
}
+ template <bool do_set>
+ uint32_t assert_is_not_set_then_set_bits_helper(const std::vector<uint32_t>& idxs);
+ template <bool do_assert>
+ void assert_is_set_then_clear_bits_helper(const std::vector<uint32_t>& idxs);
public:
LidStateVector(unsigned int newSize, unsigned int newCapacity,
@@ -35,7 +39,11 @@ public:
void resizeVector(uint32_t newSize, uint32_t newCapacity);
void setBit(unsigned int idx);
+ uint32_t assert_not_set_bits(const std::vector<uint32_t>& idxs);
+ uint32_t set_bits(const std::vector<uint32_t>& idxs);
void clearBit(unsigned int idx);
+ void consider_clear_bits(const std::vector<uint32_t>& idxs);
+ void clear_bits(const std::vector<uint32_t>& idxs);
bool testBit(unsigned int idx) const { return _bv.testBit(idx); }
unsigned int size() const { return _bv.size(); }
unsigned int byteSize() const {
diff --git a/searchcore/src/vespa/searchcore/proton/matching/partial_result.cpp b/searchcore/src/vespa/searchcore/proton/matching/partial_result.cpp
index 6ae97a125ad..432752d69d0 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/partial_result.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/partial_result.cpp
@@ -7,10 +7,10 @@ namespace proton::matching {
namespace {
bool before(const search::RankedHit &a, const search::RankedHit &b) {
- if (a._rankValue != b._rankValue) {
- return (a._rankValue > b._rankValue);
+ if (a.getRank() != b.getRank()) {
+ return (a.getRank() > b.getRank());
}
- return (a._docId < b._docId);
+ return (a.getDocId() < b.getDocId());
}
void mergeHits(size_t maxHits,
diff --git a/searchcore/src/vespa/searchcore/proton/matching/result_processor.cpp b/searchcore/src/vespa/searchcore/proton/matching/result_processor.cpp
index 341bd3bb855..f332ca5ec26 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/result_processor.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/result_processor.cpp
@@ -108,7 +108,7 @@ ResultProcessor::extract_docid_ordering(const PartialResult &result) const
std::vector<std::pair<uint32_t,uint32_t>> list;
list.reserve(est_size);
for (size_t i = _offset; i < result.size(); ++i) {
- list.emplace_back(result.hit(i)._docId, list.size());
+ list.emplace_back(result.hit(i).getDocId(), list.size());
}
std::sort(list.begin(), list.end(), [](const auto &a, const auto &b){ return (a.first < b.first); });
return list;
@@ -142,11 +142,11 @@ ResultProcessor::makeReply(PartialResultUP full_result)
for (size_t i = 0; i < hitcnt; ++i) {
search::engine::SearchReply::Hit &dst = r.hits[i];
const search::RankedHit &src = result.hit(hitOffset + i);
- uint32_t docId = src._docId;
+ uint32_t docId = src.getDocId();
if (metaStore.getGidEvenIfMoved(docId, gid)) {
dst.gid = gid;
}
- dst.metric = src._rankValue;
+ dst.metric = src.getRank();
LOG(debug, "convertLidToGid: hit[%zu]: lid(%u) -> gid(%s)", i, docId, dst.gid.toString().c_str());
}
if (result.hasSortData() && (hitcnt > 0)) {
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index 645c9b15f07..ccfdb3b9b36 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -378,7 +378,7 @@ DocumentDB::enterOnlineState()
// Called by executor thread
// Ensure that all replayed operations are committed to memory structures
_feedView.get()->forceCommit(CommitParam(_feedHandler->getSerialNum()));
- _writeService.sync();
+ _writeService.sync_all_executors();
(void) _state.enterOnlineState();
// Consider delayed pruning of transaction log and config history
@@ -467,7 +467,7 @@ DocumentDB::applyConfig(DocumentDBConfig::SP configSnapshot, SerialNum serialNum
// Flush changes to attributes and memory index, cf. visibilityDelay
_feedView.get()->forceCommit(CommitParam(elidedConfigSave ? serialNum : serialNum - 1),
std::make_shared<vespalib::KeepAlive<FeedHandler::CommitResult>>(std::move(commit_result)));
- _writeService.sync();
+ _writeService.sync_all_executors();
}
if (params.shouldMaintenanceControllerChange()) {
_maintenanceController.killJobs();
@@ -575,15 +575,15 @@ DocumentDB::close()
// Caller should have removed document DB from feed router.
_refCount.waitForZeroRefCount();
- _writeService.sync();
+ _writeService.sync_all_executors();
// The attributes in the ready sub db is also the total set of attributes.
DocumentDBTaggedMetrics &metrics = getMetrics();
_metricsWireService.cleanAttributes(metrics.ready.attributes);
_metricsWireService.cleanAttributes(metrics.notReady.attributes);
- _writeService.sync();
+ _writeService.sync_all_executors();
masterExecute([this] () { closeSubDBs(); } );
- _writeService.sync();
+ _writeService.sync_all_executors();
// What about queued tasks ?
_writeService.shutdown();
_maintenanceController.kill();
@@ -920,7 +920,7 @@ DocumentDB::syncFeedView()
IFeedView::SP newFeedView(_subDBs.getFeedView());
_maintenanceController.killJobs();
- _writeService.sync();
+ _writeService.sync_all_executors();
_feedView.set(newFeedView);
_feedHandler->setActiveFeedView(newFeedView.get());
@@ -994,7 +994,7 @@ void
DocumentDB::stopMaintenance()
{
_maintenanceController.stop();
- _writeService.sync();
+ _writeService.sync_all_executors();
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
index d35aaf9f909..0e9ba7a24c8 100644
--- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
@@ -88,13 +88,12 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor & sh
ExecutorThreadingService::~ExecutorThreadingService() = default;
-vespalib::Syncable &
-ExecutorThreadingService::sync() {
+void
+ExecutorThreadingService::sync_all_executors() {
// We have multiple patterns where task A posts to B which post back to A
for (size_t i = 0; i < 2; i++) {
syncOnce();
}
- return *this;
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
index 51da27586f7..611cf64aa9c 100644
--- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
+++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
@@ -49,7 +49,7 @@ public:
/**
* Implements vespalib::Syncable
*/
- vespalib::Syncable &sync() override;
+ void sync_all_executors() override;
void shutdown();
diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
index 5bd9ba64bae..8451f3268b8 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
@@ -10,13 +10,11 @@
#include <vespa/searchcore/proton/attribute/attribute_collection_spec_factory.h>
#include <vespa/searchcore/proton/attribute/attribute_factory.h>
#include <vespa/searchcore/proton/attribute/attribute_manager_initializer.h>
-#include <vespa/searchcore/proton/attribute/attribute_populator.h>
#include <vespa/searchcore/proton/attribute/filter_attribute_manager.h>
#include <vespa/searchcore/proton/attribute/sequential_attributes_initializer.h>
#include <vespa/searchcore/proton/common/alloc_config.h>
#include <vespa/searchcore/proton/matching/sessionmanager.h>
#include <vespa/searchcore/proton/reprocessing/attribute_reprocessing_initializer.h>
-#include <vespa/searchcore/proton/reprocessing/document_reprocessing_handler.h>
#include <vespa/searchcore/proton/reprocessing/reprocess_documents_task.h>
#include <vespa/searchlib/docstore/document_store_visitor_progress.h>
#include <vespa/log/log.h>
@@ -275,6 +273,9 @@ FastAccessDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const
reconfigureAttributeMetrics(*newMgr, *oldMgr);
}
_iFeedView.set(_fastAccessFeedView.get());
+ if (isNodeRetired()) {
+ reconfigureAttributesConsideringNodeState();
+ }
}
return tasks;
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp
index ce7f1d70195..db2bb7ed2cb 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_feed_view.cpp
@@ -73,7 +73,7 @@ void
FastAccessFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op)
{
// Drain pending PutDoneContext and ForceCommitContext objects
- _writeService.sync();
+ _writeService.sync_all_executors();
_docIdLimit.set(op.getLidLimit());
getAttributeWriter()->compactLidSpace(op.getLidLimit(), op.getSerialNum());
Parent::handleCompactLidSpace(op);
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
index af746f9debb..c9294150f16 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
@@ -303,7 +303,7 @@ void
FeedHandler::performEof()
{
assert(_writeService.master().isCurrentThread());
- _writeService.sync();
+ _writeService.sync_all_executors();
LOG(debug, "Visiting done for transaction log domain '%s', eof received", _tlsMgr.getDomainName().c_str());
// Replay must be complete
if (_replay_end_serial_num != _serialNum) {
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
index 3322722a642..bf3589457f9 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
@@ -5,7 +5,6 @@
#include "document_subdb_initializer.h"
#include "reconfig_params.h"
#include "i_document_subdb_owner.h"
-#include "ibucketstatecalculator.h"
#include <vespa/searchcore/proton/attribute/attribute_writer.h>
#include <vespa/searchcore/proton/common/alloc_config.h>
#include <vespa/searchcore/proton/flushengine/threadedflushtarget.h>
@@ -43,8 +42,7 @@ SearchableDocSubDB::SearchableDocSubDB(const Config &cfg, const Context &ctx)
getSubDbName(), ctx._fastUpdCtx._storeOnlyCtx._owner.getDistributionKey()),
_warmupExecutor(ctx._warmupExecutor),
_realGidToLidChangeHandler(std::make_shared<GidToLidChangeHandler>()),
- _flushConfig(),
- _nodeRetired(false)
+ _flushConfig()
{
_gidToLidChangeHandler = _realGidToLidChangeHandler;
}
@@ -177,14 +175,14 @@ SearchableDocSubDB::applyFlushConfig(const DocumentDBFlushConfig &flushConfig)
void
SearchableDocSubDB::propagateFlushConfig()
{
- uint32_t maxFlushed = _nodeRetired ? _flushConfig.getMaxFlushedRetired() : _flushConfig.getMaxFlushed();
+ uint32_t maxFlushed = isNodeRetired() ? _flushConfig.getMaxFlushedRetired() : _flushConfig.getMaxFlushed();
_indexMgr->setMaxFlushed(maxFlushed);
}
void
SearchableDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &calc)
{
- _nodeRetired = calc->nodeRetired();
+ FastAccessDocSubDB::setBucketStateCalculator(calc);
propagateFlushConfig();
}
@@ -246,7 +244,7 @@ SearchableDocSubDB::reconfigure(std::unique_ptr<Configure> configure)
{
assert(_writeService.master().isCurrentThread());
- _writeService.sync();
+ _writeService.sync_all_executors();
// Everything should be quiet now.
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
index c310aeb2a2b..2e7aac0a8d3 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
@@ -81,7 +81,6 @@ private:
vespalib::SyncableThreadExecutor &_warmupExecutor;
std::shared_ptr<GidToLidChangeHandler> _realGidToLidChangeHandler;
DocumentDBFlushConfig _flushConfig;
- bool _nodeRetired;
// Note: lifetime of indexManager must be handled by caller.
std::shared_ptr<initializer::InitializerTask>
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
index 7ab60270411..97e55c37aff 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
@@ -1,5 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "storeonlydocsubdb.h"
#include "docstorevalidator.h"
#include "document_subdb_initializer.h"
#include "document_subdb_initializer_result.h"
@@ -7,7 +8,7 @@
#include "i_document_subdb_owner.h"
#include "minimal_document_retriever.h"
#include "reconfig_params.h"
-#include "storeonlydocsubdb.h"
+#include "ibucketstatecalculator.h"
#include <vespa/searchcore/proton/attribute/attribute_writer.h>
#include <vespa/searchcore/proton/bucketdb/ibucketdbhandlerinitializer.h>
#include <vespa/searchcore/proton/common/alloc_config.h>
@@ -111,6 +112,8 @@ StoreOnlyDocSubDB::StoreOnlyDocSubDB(const Config &cfg, const Context &ctx)
_dmsFlushTarget(),
_dmsShrinkTarget(),
_pendingLidsForCommit(std::make_shared<PendingLidTracker>()),
+ _nodeRetired(false),
+ _lastConfiguredCompactionStrategy(),
_subDbId(cfg._subDbId),
_subDbType(cfg._subDbType),
_fileHeaderContext(ctx._fileHeaderContext, _docTypeName, _baseDir),
@@ -280,6 +283,7 @@ StoreOnlyDocSubDB::setupDocumentMetaStore(DocumentMetaStoreInitializerResult::SP
_dmsShrinkTarget = std::make_shared<ShrinkLidSpaceFlushTarget>("documentmetastore.shrink", Type::GC,
Component::ATTRIBUTE, _flushedDocumentMetaStoreSerialNum,
_dmsFlushTarget->getLastFlushTime(), dms);
+ _lastConfiguredCompactionStrategy = dms->getConfig().getCompactionStrategy();
}
DocumentSubDbInitializer::UP
@@ -413,22 +417,68 @@ StoreOnlyDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const
return IReprocessingTask::List();
}
+namespace {
+
+constexpr double RETIRED_DEAD_RATIO = 0.5;
+
+struct UpdateConfig : public search::attribute::IAttributeFunctor {
+ UpdateConfig(search::CompactionStrategy compactionStrategy) noexcept
+ : _compactionStrategy(compactionStrategy)
+ {}
+ void operator()(search::attribute::IAttributeVector &iAttributeVector) override {
+ auto attributeVector = dynamic_cast<search::AttributeVector *>(&iAttributeVector);
+ if (attributeVector != nullptr) {
+ auto cfg = attributeVector->getConfig();
+ cfg.setCompactionStrategy(_compactionStrategy);
+ attributeVector->update_config(cfg);
+ }
+ }
+ search::CompactionStrategy _compactionStrategy;
+};
+
+}
+
+search::CompactionStrategy
+StoreOnlyDocSubDB::computeCompactionStrategy(search::CompactionStrategy strategy) const {
+ return isNodeRetired()
+ ? search::CompactionStrategy(RETIRED_DEAD_RATIO, RETIRED_DEAD_RATIO)
+ : strategy;
+}
+
void
StoreOnlyDocSubDB::reconfigure(const search::LogDocumentStore::Config & config, const AllocStrategy& alloc_strategy)
{
+ _lastConfiguredCompactionStrategy = alloc_strategy.get_compaction_strategy();
auto cfg = _dms->getConfig();
GrowStrategy grow = alloc_strategy.get_grow_strategy();
// Amortize memory spike cost over N docs
grow.setDocsGrowDelta(grow.getDocsGrowDelta() + alloc_strategy.get_amortize_count());
cfg.setGrowStrategy(grow);
- cfg.setCompactionStrategy(alloc_strategy.get_compaction_strategy());
+ cfg.setCompactionStrategy(computeCompactionStrategy(alloc_strategy.get_compaction_strategy()));
_dms->update_config(cfg); // Update grow and compaction config
_rSummaryMgr->reconfigure(config);
}
void
-StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> &)
-{
+StoreOnlyDocSubDB::setBucketStateCalculator(const std::shared_ptr<IBucketStateCalculator> & calc) {
+ bool wasNodeRetired = isNodeRetired();
+ _nodeRetired = calc->nodeRetired();
+ if (wasNodeRetired != isNodeRetired()) {
+ search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
+ auto cfg = _dms->getConfig();
+ cfg.setCompactionStrategy(compactionStrategy);
+ _dms->update_config(cfg);
+ reconfigureAttributesConsideringNodeState();
+ }
+}
+
+void
+StoreOnlyDocSubDB::reconfigureAttributesConsideringNodeState() {
+ search::CompactionStrategy compactionStrategy = computeCompactionStrategy(_lastConfiguredCompactionStrategy);
+ auto attrMan = getAttributeManager();
+ if (attrMan) {
+ attrMan->asyncForEachAttribute(std::make_shared<UpdateConfig>(compactionStrategy));
+ }
}
proton::IAttributeManager::SP
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
index 50e5ffba0e2..7051722f605 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
@@ -152,6 +152,8 @@ private:
DocumentMetaStoreFlushTarget::SP _dmsFlushTarget;
std::shared_ptr<ShrinkLidSpaceFlushTarget> _dmsShrinkTarget;
std::shared_ptr<PendingLidTrackerBase> _pendingLidsForCommit;
+ bool _nodeRetired;
+ search::CompactionStrategy _lastConfiguredCompactionStrategy;
IFlushTargetList getFlushTargets() override;
protected:
@@ -180,9 +182,8 @@ protected:
StoreOnlyFeedView::Context getStoreOnlyFeedViewContext(const DocumentDBConfig &configSnapshot);
StoreOnlyFeedView::PersistentParams getFeedViewPersistentParams();
vespalib::string getSubDbName() const;
-
- void reconfigure(const search::LogDocumentStore::Config & protonConfig,
- const AllocStrategy& alloc_strategy);
+ void reconfigure(const search::LogDocumentStore::Config & protonConfig, const AllocStrategy& alloc_strategy);
+ void reconfigureAttributesConsideringNodeState();
public:
StoreOnlyDocSubDB(const Config &cfg, const Context &ctx);
~StoreOnlyDocSubDB() override;
@@ -233,6 +234,9 @@ public:
std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
PendingLidTrackerBase & getUncommittedLidsTracker() override { return *_pendingLidsForCommit; }
+ search::CompactionStrategy computeCompactionStrategy(search::CompactionStrategy strategy) const;
+ bool isNodeRetired() const { return _nodeRetired; }
+
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h b/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h
index deb6639c855..abc8eb679dd 100644
--- a/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h
+++ b/searchcore/src/vespa/searchcore/proton/test/mock_attribute_manager.h
@@ -86,8 +86,9 @@ public:
const std::vector<search::AttributeVector *> &getWritableAttributes() const override {
return _writables;
}
- void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor>) const override {
- }
+ void asyncForEachAttribute(std::shared_ptr<IConstAttributeFunctor>) const override { }
+ void asyncForEachAttribute(std::shared_ptr<IAttributeFunctor>) const override { }
+
ExclusiveAttributeReadAccessor::UP getExclusiveReadAccessor(const vespalib::string &) const override {
return ExclusiveAttributeReadAccessor::UP();
}
diff --git a/searchcore/src/vespa/searchcore/proton/test/thread_utils.h b/searchcore/src/vespa/searchcore/proton/test/thread_utils.h
index 84b7c12cba6..6b08eecf61f 100644
--- a/searchcore/src/vespa/searchcore/proton/test/thread_utils.h
+++ b/searchcore/src/vespa/searchcore/proton/test/thread_utils.h
@@ -14,7 +14,7 @@ void
runInMaster(searchcorespi::index::IThreadingService &writeService, FunctionType func)
{
writeService.master().execute(vespalib::makeLambdaTask(std::move(func)));
- writeService.sync();
+ writeService.sync_all_executors();
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h
index 94b62962f04..f1213d51296 100644
--- a/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h
+++ b/searchcore/src/vespa/searchcore/proton/test/threading_service_observer.h
@@ -43,8 +43,8 @@ public:
return _attributeFieldWriter;
}
- vespalib::Syncable &sync() override {
- return _service.sync();
+ void sync_all_executors() override {
+ _service.sync_all_executors();
}
searchcorespi::index::IThreadService &master() override {
diff --git a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
index be8c9ef7d86..f30aec94d53 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
+++ b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
@@ -2,7 +2,6 @@
#pragma once
#include "i_thread_service.h"
-#include <vespa/vespalib/util/syncable.h>
namespace vespalib { class ISequencedTaskExecutor; }
namespace searchcorespi::index {
@@ -57,13 +56,15 @@ namespace searchcorespi::index {
* TODO: * indexFieldInverter and indexFieldWriter can be collapsed to one. Both need sequencing,
* but they sequence on different things so efficiency will be the same and just depends on #threads
*/
-struct IThreadingService : public vespalib::Syncable
+struct IThreadingService
{
IThreadingService(const IThreadingService &) = delete;
IThreadingService & operator = (const IThreadingService &) = delete;
IThreadingService() = default;
virtual ~IThreadingService() = default;
+ virtual void sync_all_executors() = 0;
+
virtual IThreadService &master() = 0;
virtual IThreadService &index() = 0;
virtual IThreadService &summary() = 0;
diff --git a/searchlib/src/tests/attribute/searchcontext/searchcontext_test.cpp b/searchlib/src/tests/attribute/searchcontext/searchcontext_test.cpp
index 4076194542a..de54386e4af 100644
--- a/searchlib/src/tests/attribute/searchcontext/searchcontext_test.cpp
+++ b/searchlib/src/tests/attribute/searchcontext/searchcontext_test.cpp
@@ -487,7 +487,7 @@ SearchContextTest::checkResultSet(const ResultSet & rs, const DocSet & expected,
ASSERT_TRUE(array != nullptr);
uint32_t i = 0;
for (auto iter = expected.begin(); iter != expected.end(); ++iter, ++i) {
- EXPECT_TRUE(array[i]._docId == *iter);
+ EXPECT_TRUE(array[i].getDocId() == *iter);
}
}
}
@@ -1517,10 +1517,10 @@ SearchContextTest::requireThatSearchIsWorkingAfterClearDoc(const vespalib::strin
EXPECT_EQUAL(4u, rs->getNumHits());
ASSERT_TRUE(4u == rs->getNumHits());
const RankedHit * array = rs->getArray();
- EXPECT_EQUAL(1u, array[0]._docId);
- EXPECT_EQUAL(2u, array[1]._docId);
- EXPECT_EQUAL(3u, array[2]._docId);
- EXPECT_EQUAL(4u, array[3]._docId);
+ EXPECT_EQUAL(1u, array[0].getDocId());
+ EXPECT_EQUAL(2u, array[1].getDocId());
+ EXPECT_EQUAL(3u, array[2].getDocId());
+ EXPECT_EQUAL(4u, array[3].getDocId());
}
a->clearDoc(1);
a->clearDoc(3);
@@ -1529,8 +1529,8 @@ SearchContextTest::requireThatSearchIsWorkingAfterClearDoc(const vespalib::strin
ResultSetPtr rs = performSearch(v, term);
EXPECT_EQUAL(2u, rs->getNumHits());
const RankedHit * array = rs->getArray();
- EXPECT_EQUAL(2u, array[0]._docId);
- EXPECT_EQUAL(4u, array[1]._docId);
+ EXPECT_EQUAL(2u, array[0].getDocId());
+ EXPECT_EQUAL(4u, array[1].getDocId());
}
}
@@ -1578,9 +1578,9 @@ SearchContextTest::requireThatSearchIsWorkingAfterLoadAndClearDoc(const vespalib
const RankedHit * array = rs->getArray();
for (uint32_t i = 0; i < 14; ++i) {
if (i < 5) {
- EXPECT_EQUAL(i + 1, array[i]._docId);
+ EXPECT_EQUAL(i + 1, array[i].getDocId());
} else
- EXPECT_EQUAL(i + 2, array[i]._docId);
+ EXPECT_EQUAL(i + 2, array[i].getDocId());
}
}
ValueType buf;
@@ -1682,15 +1682,15 @@ SearchContextTest::requireThatFlagAttributeIsWorkingWhenNewDocsAreAdded()
{
ResultSetPtr rs = performSearch(fa, "<24");
EXPECT_EQUAL(2u, rs->getNumHits());
- EXPECT_EQUAL(1u, rs->getArray()[0]._docId);
- EXPECT_EQUAL(2u, rs->getArray()[1]._docId);
+ EXPECT_EQUAL(1u, rs->getArray()[0].getDocId());
+ EXPECT_EQUAL(2u, rs->getArray()[1].getDocId());
}
{
ResultSetPtr rs = performSearch(fa, "24");
EXPECT_EQUAL(3u, rs->getNumHits());
- EXPECT_EQUAL(1u, rs->getArray()[0]._docId);
- EXPECT_EQUAL(2u, rs->getArray()[1]._docId);
- EXPECT_EQUAL(4u, rs->getArray()[2]._docId);
+ EXPECT_EQUAL(1u, rs->getArray()[0].getDocId());
+ EXPECT_EQUAL(2u, rs->getArray()[1].getDocId());
+ EXPECT_EQUAL(4u, rs->getArray()[2].getDocId());
}
}
{
@@ -1717,15 +1717,15 @@ SearchContextTest::requireThatFlagAttributeIsWorkingWhenNewDocsAreAdded()
EXPECT_EQUAL(exp50.size(), rs1->getNumHits());
EXPECT_EQUAL(exp50.size(), rs2->getNumHits());
for (size_t j = 0; j < exp50.size(); ++j) {
- EXPECT_EQUAL(exp50[j], rs1->getArray()[j]._docId);
- EXPECT_EQUAL(exp50[j], rs2->getArray()[j]._docId);
+ EXPECT_EQUAL(exp50[j], rs1->getArray()[j].getDocId());
+ EXPECT_EQUAL(exp50[j], rs2->getArray()[j].getDocId());
}
}
{
ResultSetPtr rs = performSearch(fa, "60");
EXPECT_EQUAL(exp60.size(), rs->getNumHits());
for (size_t j = 0; j < exp60.size(); ++j) {
- EXPECT_EQUAL(exp60[j], rs->getArray()[j]._docId);
+ EXPECT_EQUAL(exp60[j], rs->getArray()[j].getDocId());
}
}
}
diff --git a/searchlib/src/tests/grouping/grouping_test.cpp b/searchlib/src/tests/grouping/grouping_test.cpp
index ef4930de8ce..2eab66cb3b7 100644
--- a/searchlib/src/tests/grouping/grouping_test.cpp
+++ b/searchlib/src/tests/grouping/grouping_test.cpp
@@ -105,7 +105,7 @@ public:
hit._rankValue = rank;
_hits.push_back(hit);
for (uint32_t pos = (_hits.size() - 1);
- pos > 0 && (_hits[pos]._rankValue > _hits[pos - 1]._rankValue);
+ pos > 0 && (_hits[pos].getRank() > _hits[pos - 1].getRank());
--pos)
{
std::swap(_hits[pos], _hits[pos - 1]);
diff --git a/searchlib/src/tests/groupingengine/groupingengine_benchmark.cpp b/searchlib/src/tests/groupingengine/groupingengine_benchmark.cpp
index 66fa359f1a3..e82079073e7 100644
--- a/searchlib/src/tests/groupingengine/groupingengine_benchmark.cpp
+++ b/searchlib/src/tests/groupingengine/groupingengine_benchmark.cpp
@@ -88,7 +88,7 @@ public:
hit._rankValue = rank;
_hits.push_back(hit);
for (uint32_t pos = (_hits.size() - 1);
- pos > 0 && (_hits[pos]._rankValue > _hits[pos - 1]._rankValue);
+ pos > 0 && (_hits[pos].getRank() > _hits[pos - 1].getRank());
--pos)
{
std::swap(_hits[pos], _hits[pos - 1]);
diff --git a/searchlib/src/tests/groupingengine/groupingengine_test.cpp b/searchlib/src/tests/groupingengine/groupingengine_test.cpp
index a0179c36c23..d54b68388e4 100644
--- a/searchlib/src/tests/groupingengine/groupingengine_test.cpp
+++ b/searchlib/src/tests/groupingengine/groupingengine_test.cpp
@@ -87,7 +87,7 @@ public:
hit._rankValue = rank;
_hits.push_back(hit);
for (uint32_t pos = (_hits.size() - 1);
- pos > 0 && (_hits[pos]._rankValue > _hits[pos - 1]._rankValue);
+ pos > 0 && (_hits[pos].getRank() > _hits[pos - 1].getRank());
--pos)
{
std::swap(_hits[pos], _hits[pos - 1]);
diff --git a/searchlib/src/tests/hitcollector/hitcollector_test.cpp b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
index 617e0e85824..ed68c47ea23 100644
--- a/searchlib/src/tests/hitcollector/hitcollector_test.cpp
+++ b/searchlib/src/tests/hitcollector/hitcollector_test.cpp
@@ -70,8 +70,8 @@ void checkResult(const ResultSet & rs, const std::vector<RankedHit> & exp)
ASSERT_EQUAL(rs.getArrayUsed(), exp.size());
for (uint32_t i = 0; i < exp.size(); ++i) {
- EXPECT_EQUAL(rh[i]._docId, exp[i]._docId);
- EXPECT_EQUAL(rh[i]._rankValue + 1.0, exp[i]._rankValue + 1.0);
+ EXPECT_EQUAL(rh[i].getDocId(), exp[i].getDocId());
+ EXPECT_EQUAL(rh[i].getRank() + 1.0, exp[i].getRank() + 1.0);
}
} else {
ASSERT_TRUE(rs.getArray() == nullptr);
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index ca30fe2d35e..a9b7bbd4603 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -478,14 +478,21 @@ getFeatureStoreMemStats(const FieldIndexCollection &fieldIndexes)
void
myCommit(FieldIndexCollection &fieldIndexes, ISequencedTaskExecutor &pushThreads)
{
- uint32_t fieldId = 0;
- for (auto &fieldIndex : fieldIndexes.getFieldIndexes()) {
- pushThreads.execute(fieldId,
- [fieldIndex(fieldIndex.get())]()
- { fieldIndex->commit(); });
- ++fieldId;
+ vespalib::Gate gate;
+ {
+ auto gate_callback = std::make_shared<vespalib::GateCallback>(gate);
+ uint32_t fieldId = 0;
+ for (auto &fieldIndex : fieldIndexes.getFieldIndexes()) {
+ pushThreads.execute(fieldId,
+ [fieldIndex(fieldIndex.get()), gate_callback]()
+ {
+ (void) gate_callback;
+ fieldIndex->commit();
+ });
+ ++fieldId;
+ }
}
- pushThreads.sync_all();
+ gate.await();
}
void
diff --git a/searchlib/src/tests/sortresults/sorttest.cpp b/searchlib/src/tests/sortresults/sorttest.cpp
index cd892800ca5..bbd6d0b72ce 100644
--- a/searchlib/src/tests/sortresults/sorttest.cpp
+++ b/searchlib/src/tests/sortresults/sorttest.cpp
@@ -41,17 +41,17 @@ test_sort(unsigned int caseNum, unsigned int n, unsigned int ntop)
}
FastS_SortResults(array, n, ntop);
- minmax = array[ntop - 1]._rankValue;
+ minmax = array[ntop - 1].getRank();
for(i = 0; i < n; i++) {
if (i < ntop && i > 0
- && array[i]._rankValue > array[i - 1]._rankValue) {
+ && array[i].getRank() > array[i - 1].getRank()) {
printf("ERROR: rank(%d) > rank(%d)\n",
i, i - 1);
ok = false;
break;
}
if (i >= ntop &&
- array[i]._rankValue > minmax) {
+ array[i].getRank() > minmax) {
printf("ERROR: rank(%d) > rank(%d)\n",
i, ntop - 1);
ok = false;
diff --git a/searchlib/src/tests/sortspec/multilevelsort.cpp b/searchlib/src/tests/sortspec/multilevelsort.cpp
index f438fce0e7f..576e1d1336c 100644
--- a/searchlib/src/tests/sortspec/multilevelsort.cpp
+++ b/searchlib/src/tests/sortspec/multilevelsort.cpp
@@ -275,21 +275,21 @@ MultilevelSortTest::sortAndCheck(const std::vector<Spec> &spec, uint32_t num,
for (uint32_t j = 0; j < spec.size(); ++j) {
int cmp = 0;
if (spec[j]._type == RANK) {
- if (hits[i]._rankValue < hits[i+1]._rankValue) {
+ if (hits[i].getRank() < hits[i+1].getRank()) {
cmp = -1;
- } else if (hits[i]._rankValue > hits[i+1]._rankValue) {
+ } else if (hits[i].getRank() > hits[i+1].getRank()) {
cmp = 1;
}
} else if (spec[j]._type == DOCID) {
- if (hits[i]._docId < hits[i+1]._docId) {
+ if (hits[i].getDocId() < hits[i+1].getDocId()) {
cmp = -1;
- } else if (hits[i]._docId > hits[i+1]._docId) {
+ } else if (hits[i].getDocId() > hits[i+1].getDocId()) {
cmp = 1;
}
} else {
AttributeVector *av = vec[spec[j]._name].get();
cmp = compare(av, spec[j]._type,
- hits[i]._docId, hits[i+1]._docId);
+ hits[i].getDocId(), hits[i+1].getDocId());
}
if (spec[j]._asc) {
EXPECT_TRUE(cmp <= 0);
diff --git a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
index 68098d6c35a..f373b5fc0b3 100644
--- a/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
+++ b/searchlib/src/vespa/searchlib/aggregation/grouping.cpp
@@ -205,13 +205,13 @@ void Grouping::postProcess()
void Grouping::aggregateWithoutClock(const RankedHit * rankedHit, unsigned int len) {
for(unsigned int i(0); i < len; i++) {
- aggregate(rankedHit[i]._docId, rankedHit[i]._rankValue);
+ aggregate(rankedHit[i].getDocId(), rankedHit[i].getRank());
}
}
void Grouping::aggregateWithClock(const RankedHit * rankedHit, unsigned int len) {
for(unsigned int i(0); (i < len) && !hasExpired(); i++) {
- aggregate(rankedHit[i]._docId, rankedHit[i]._rankValue);
+ aggregate(rankedHit[i].getDocId(), rankedHit[i].getRank());
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
index b31e726b103..fdf9ab624ad 100644
--- a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
+++ b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h
@@ -29,7 +29,7 @@ public:
uint64_t
operator()(const LoadedEnumAttribute &v)
{
- return (static_cast<uint64_t>(v._enum) << 32) | v._docId;
+ return (static_cast<uint64_t>(v._enum) << 32) | v.getDocId();
}
};
diff --git a/searchlib/src/vespa/searchlib/attribute/loadedvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedvalue.h
index 701ccdc902c..b8f938838d2 100644
--- a/searchlib/src/vespa/searchlib/attribute/loadedvalue.h
+++ b/searchlib/src/vespa/searchlib/attribute/loadedvalue.h
@@ -103,9 +103,10 @@ public:
T _value;
uint32_t _eidx;
};
+
uint32_t _docId;
uint32_t _idx;
- vespalib::datastore::EntryRef _pidx;
+ vespalib::datastore::EntryRef _pidx;
private:
int32_t _weight;
Value _value;
diff --git a/searchlib/src/vespa/searchlib/common/sortresults.cpp b/searchlib/src/vespa/searchlib/common/sortresults.cpp
index 7a54de708d0..7510ae162ce 100644
--- a/searchlib/src/vespa/searchlib/common/sortresults.cpp
+++ b/searchlib/src/vespa/searchlib/common/sortresults.cpp
@@ -51,7 +51,7 @@ FastS_insertion_sort(RankedHit a[], uint32_t n)
for (i=1; i<n ; i++) {
swap = a[i];
j = i;
- while (R(swap._rankValue) > R(a[j-1]._rankValue)) {
+ while (R(swap.getRank()) > R(a[j-1].getRank())) {
a[j] = a[j-1];
if (!(--j)) break;;
}
@@ -74,13 +74,13 @@ FastS_radixsort(RankedHit a[], uint32_t n, uint32_t ntop)
memset(cnt, 0, 256*sizeof(uint32_t));
// Count occurrences [NB: will fail with n < 3]
for(i = 0; i < n - 3; i += 4) {
- cnt[(R(a[i]._rankValue) >> SHIFT) & 0xFF]++;
- cnt[(R(a[i + 1]._rankValue) >> SHIFT) & 0xFF]++;
- cnt[(R(a[i + 2]._rankValue) >> SHIFT) & 0xFF]++;
- cnt[(R(a[i + 3]._rankValue) >> SHIFT) & 0xFF]++;
+ cnt[(R(a[i].getRank()) >> SHIFT) & 0xFF]++;
+ cnt[(R(a[i + 1].getRank()) >> SHIFT) & 0xFF]++;
+ cnt[(R(a[i + 2].getRank()) >> SHIFT) & 0xFF]++;
+ cnt[(R(a[i + 3].getRank()) >> SHIFT) & 0xFF]++;
}
for(; i < n; i++)
- cnt[(R(a[i]._rankValue) >> SHIFT) & 0xFF]++;
+ cnt[(R(a[i].getRank()) >> SHIFT) & 0xFF]++;
// Accumulate cnt positions
sorted = (cnt[0]==n);
@@ -109,14 +109,14 @@ FastS_radixsort(RankedHit a[], uint32_t n, uint32_t ntop)
// Grab first element to move
j = ptr[i];
swap = a[j];
- k = (R(swap._rankValue) >> SHIFT) & 0xFF;
+ k = (R(swap.getRank()) >> SHIFT) & 0xFF;
// Swap into correct class until cycle completed
if (i!=k) {
do {
temp = a[ptr[k]];
a[ptr[k]++] = swap;
- k = (R((swap = temp)._rankValue) >> SHIFT) & 0xFF;
+ k = (R((swap = temp).getRank()) >> SHIFT) & 0xFF;
remain--;
} while (i!=k);
// Place last element in cycle
@@ -265,11 +265,11 @@ FastS_SortSpec::initSortData(const RankedHit *hits, uint32_t n)
written = sizeof(hits->_docId) + sizeof(_partitionId);
break;
case ASC_RANK:
- serializeForSort<convertForSort<search::HitRank, true> >(hits[i]._rankValue, mySortData);
+ serializeForSort<convertForSort<search::HitRank, true> >(hits[i].getRank(), mySortData);
written = sizeof(hits->_rankValue);
break;
case DESC_RANK:
- serializeForSort<convertForSort<search::HitRank, false> >(hits[i]._rankValue, mySortData);
+ serializeForSort<convertForSort<search::HitRank, false> >(hits[i].getRank(), mySortData);
written = sizeof(hits->_rankValue);
break;
case ASC_VECTOR:
diff --git a/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.cpp b/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.cpp
index 77c2aa3072a..ea278ebf607 100644
--- a/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.cpp
+++ b/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.cpp
@@ -18,7 +18,7 @@ TermFieldMatchData::TermFieldMatchData() :
}
TermFieldMatchData::TermFieldMatchData(const TermFieldMatchData & rhs) :
- _docId(rhs._docId),
+ _docId(rhs.getDocId()),
_fieldId(rhs._fieldId),
_flags(rhs._flags),
_sz(0),
diff --git a/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp b/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp
index 20fd333442b..8183cb005fe 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/document_inverter_context.cpp
@@ -12,19 +12,23 @@ using index::SchemaIndexFields;
namespace {
template <typename Context>
-void make_contexts(const SchemaIndexFields& schema_index_fields, ISequencedTaskExecutor& executor, std::vector<Context>& contexts)
+void make_contexts(const index::Schema& schema, const SchemaIndexFields& schema_index_fields, ISequencedTaskExecutor& executor, std::vector<Context>& contexts)
{
using ExecutorId = ISequencedTaskExecutor::ExecutorId;
using IdMapping = std::vector<std::tuple<ExecutorId, bool, uint32_t>>;
IdMapping map;
for (uint32_t field_id : schema_index_fields._textFields) {
// TODO: Add bias when sharing sequenced task executor between document types
- map.emplace_back(executor.getExecutorId(field_id), false, field_id);
+ auto& name = schema.getIndexField(field_id).getName();
+ auto id = executor.getExecutorIdFromName(name);
+ map.emplace_back(id, false, field_id);
}
uint32_t uri_field_id = 0;
for (auto& uri_field : schema_index_fields._uriFields) {
// TODO: Add bias when sharing sequenced task executor between document types
- map.emplace_back(executor.getExecutorId(uri_field._all), true, uri_field_id);
+ auto& name = schema.getIndexField(uri_field._all).getName();
+ auto id = executor.getExecutorIdFromName(name);
+ map.emplace_back(id, true, uri_field_id);
++uri_field_id;
}
std::sort(map.begin(), map.end());
@@ -140,8 +144,8 @@ DocumentInverterContext::~DocumentInverterContext() = default;
void
DocumentInverterContext::setup_contexts()
{
- make_contexts(_schema_index_fields, _invert_threads, _invert_contexts);
- make_contexts(_schema_index_fields, _push_threads, _push_contexts);
+ make_contexts(_schema, _schema_index_fields, _invert_threads, _invert_contexts);
+ make_contexts(_schema, _schema_index_fields, _push_threads, _push_contexts);
if (&_invert_threads == &_push_threads) {
uint32_t bias = _schema_index_fields._textFields.size() + _schema_index_fields._uriFields.size();
switch_to_alternate_ids(_push_threads, _push_contexts, bias);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_remover.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_remover.h
index f8328d15289..429eea038c9 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_remover.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_remover.h
@@ -21,6 +21,7 @@ private:
struct WordFieldDocTuple {
vespalib::datastore::EntryRef _wordRef;
uint32_t _docId;
+
WordFieldDocTuple() noexcept :
_wordRef(0),
_docId(0)
diff --git a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
index b2b1d49bae9..3293019e538 100644
--- a/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/hitcollector.cpp
@@ -195,7 +195,7 @@ mergeHitsIntoResultSet(const std::vector<HitCollector::Hit> &hits, ResultSet &re
uint32_t rhCur(0);
uint32_t rhEnd(result.getArrayUsed());
for (const auto &hit : hits) {
- while (rhCur != rhEnd && result[rhCur]._docId != hit.first) {
+ while (rhCur != rhEnd && result[rhCur].getDocId() != hit.first) {
// just set the iterators right
++rhCur;
}
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
index a94f6087a0d..d0a75930ed5 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h
@@ -77,8 +77,8 @@ public:
bool operator<(const PendingOp &rhs) const {
if (_wordIdx != rhs._wordIdx)
return _wordIdx < rhs._wordIdx;
- if (_docId != rhs._docId)
- return _docId < rhs._docId;
+ if (_docId != rhs.getDocId())
+ return _docId < rhs.getDocId();
return _seq < rhs._seq;
}
};
diff --git a/security-utils/pom.xml b/security-utils/pom.xml
index 4e33e31c8c4..b7c7c110ad8 100644
--- a/security-utils/pom.xml
+++ b/security-utils/pom.xml
@@ -12,12 +12,6 @@
<packaging>bundle</packaging>
<version>7-SNAPSHOT</version>
- <properties>
- <!-- vespa-http-client targets jdk8 and uses this library -->
- <!-- TODO remove once vespa-http-client no longer builds against jdk8 -->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<!-- provided -->
<dependency>
@@ -73,11 +67,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
<showDeprecation>true</showDeprecation>
<compilerArgs>
<arg>-Xlint:all</arg>
diff --git a/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp
index 647bcf9bcee..ef7f8bfb0f6 100644
--- a/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp
+++ b/staging_vespalib/src/tests/sequencedtaskexecutor/sequencedtaskexecutor_test.cpp
@@ -242,11 +242,16 @@ TEST("require that you get correct number of executors") {
TEST("require that you distribute well") {
auto seven = SequencedTaskExecutor::create(sequenced_executor, 7);
const SequencedTaskExecutor & seq = dynamic_cast<const SequencedTaskExecutor &>(*seven);
+ const uint32_t NUM_EXACT = 8 * seven->getNumExecutors();
EXPECT_EQUAL(7u, seven->getNumExecutors());
EXPECT_EQUAL(97u, seq.getComponentHashSize());
EXPECT_EQUAL(0u, seq.getComponentEffectiveHashSize());
for (uint32_t id=0; id < 1000; id++) {
- EXPECT_EQUAL((id%97)%7, seven->getExecutorId(id).getId());
+ if (id < NUM_EXACT) {
+ EXPECT_EQUAL(id % seven->getNumExecutors(), seven->getExecutorId(id).getId());
+ } else {
+ EXPECT_EQUAL(((id - NUM_EXACT) % 97) % seven->getNumExecutors(), seven->getExecutorId(id).getId());
+ }
}
EXPECT_EQUAL(97u, seq.getComponentHashSize());
EXPECT_EQUAL(97u, seq.getComponentEffectiveHashSize());
@@ -264,7 +269,7 @@ TEST("require that similar names get perfect distribution with 4 executors") {
EXPECT_EQUAL(3u, four->getExecutorIdFromName("f8").getId());
}
-TEST("require that similar names gets 7/8 unique ids with 8 executors") {
+TEST("require that similar names get perfect distribution with 8 executors") {
auto four = SequencedTaskExecutor::create(sequenced_executor, 8);
EXPECT_EQUAL(0u, four->getExecutorIdFromName("f1").getId());
EXPECT_EQUAL(1u, four->getExecutorIdFromName("f2").getId());
@@ -272,8 +277,8 @@ TEST("require that similar names gets 7/8 unique ids with 8 executors") {
EXPECT_EQUAL(3u, four->getExecutorIdFromName("f4").getId());
EXPECT_EQUAL(4u, four->getExecutorIdFromName("f5").getId());
EXPECT_EQUAL(5u, four->getExecutorIdFromName("f6").getId());
- EXPECT_EQUAL(2u, four->getExecutorIdFromName("f7").getId());
- EXPECT_EQUAL(6u, four->getExecutorIdFromName("f8").getId());
+ EXPECT_EQUAL(6u, four->getExecutorIdFromName("f7").getId());
+ EXPECT_EQUAL(7u, four->getExecutorIdFromName("f8").getId());
}
TEST("Test creation of different types") {
diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
index f92e1655e7d..954a63978f3 100644
--- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.cpp
@@ -14,6 +14,8 @@ namespace {
constexpr uint32_t stackSize = 128_Ki;
constexpr uint8_t MAGIC = 255;
+constexpr uint32_t NUM_PERFECT_PER_EXECUTOR = 8;
+constexpr uint16_t INVALID_KEY = 0x8000;
bool
isLazy(const std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> & executors) {
@@ -25,6 +27,16 @@ isLazy(const std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> & ex
return true;
}
+ssize_t
+find(uint16_t key, const uint16_t values[], size_t numValues) {
+ for (size_t i(0); i < numValues; i++) {
+ if (key == values[i]) return i;
+ if (INVALID_KEY == values[i]) return -1;
+ }
+ return -1;
+}
+
+
}
std::unique_ptr<ISequencedTaskExecutor>
@@ -35,14 +47,14 @@ SequencedTaskExecutor::create(vespalib::Runnable::init_fun_t func, uint32_t thre
size_t num_strands = std::min(taskLimit, threads*32);
return std::make_unique<AdaptiveSequencedExecutor>(num_strands, threads, kindOfWatermark, taskLimit);
} else {
- auto executors = std::make_unique<std::vector<std::unique_ptr<SyncableThreadExecutor>>>();
- executors->reserve(threads);
+ auto executors = std::vector<std::unique_ptr<SyncableThreadExecutor>>();
+ executors.reserve(threads);
for (uint32_t id = 0; id < threads; ++id) {
if (optimize == OptimizeFor::THROUGHPUT) {
uint32_t watermark = kindOfWatermark == 0 ? taskLimit / 2 : kindOfWatermark;
- executors->push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, reactionTime));
+ executors.push_back(std::make_unique<SingleExecutor>(func, taskLimit, watermark, reactionTime));
} else {
- executors->push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit, func));
+ executors.push_back(std::make_unique<BlockingThreadStackExecutor>(1, stackSize, taskLimit, func));
}
}
return std::unique_ptr<ISequencedTaskExecutor>(new SequencedTaskExecutor(std::move(executors)));
@@ -54,21 +66,26 @@ SequencedTaskExecutor::~SequencedTaskExecutor()
sync_all();
}
-SequencedTaskExecutor::SequencedTaskExecutor(std::unique_ptr<std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>>> executors)
- : ISequencedTaskExecutor(executors->size()),
+SequencedTaskExecutor::SequencedTaskExecutor(std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> executors)
+ : ISequencedTaskExecutor(executors.size()),
_executors(std::move(executors)),
- _lazyExecutors(isLazy(*_executors)),
- _component2Id(vespalib::hashtable_base::getModuloStl(getNumExecutors()*8), MAGIC),
+ _lazyExecutors(isLazy(_executors)),
+ _component2IdPerfect(std::make_unique<PerfectKeyT[]>(getNumExecutors()*NUM_PERFECT_PER_EXECUTOR)),
+ _component2IdImperfect(vespalib::hashtable_base::getModuloStl(getNumExecutors()*NUM_PERFECT_PER_EXECUTOR), MAGIC),
_mutex(),
_nextId(0)
{
assert(getNumExecutors() < 256);
+
+ for (size_t i(0); i < getNumExecutors() * NUM_PERFECT_PER_EXECUTOR; i++) {
+ _component2IdPerfect[i] = INVALID_KEY;
+ }
}
void
SequencedTaskExecutor::setTaskLimit(uint32_t taskLimit)
{
- for (const auto &executor : *_executors) {
+ for (const auto &executor : _executors) {
executor->setTaskLimit(taskLimit);
}
}
@@ -76,15 +93,15 @@ SequencedTaskExecutor::setTaskLimit(uint32_t taskLimit)
void
SequencedTaskExecutor::executeTask(ExecutorId id, vespalib::Executor::Task::UP task)
{
- assert(id.getId() < _executors->size());
- auto rejectedTask = (*_executors)[id.getId()]->execute(std::move(task));
+ assert(id.getId() < _executors.size());
+ auto rejectedTask = _executors[id.getId()]->execute(std::move(task));
assert(!rejectedTask);
}
void
SequencedTaskExecutor::sync_all() {
wakeup();
- for (auto &executor : *_executors) {
+ for (auto &executor : _executors) {
executor->sync();
}
}
@@ -92,7 +109,7 @@ SequencedTaskExecutor::sync_all() {
void
SequencedTaskExecutor::wakeup() {
if (_lazyExecutors) {
- for (auto &executor : *_executors) {
+ for (auto &executor : _executors) {
//Enforce parallel wakeup of napping executors.
executor->wakeup();
}
@@ -103,7 +120,7 @@ ExecutorStats
SequencedTaskExecutor::getStats()
{
ExecutorStats accumulatedStats;
- for (auto &executor :* _executors) {
+ for (auto &executor : _executors) {
accumulatedStats.aggregate(executor->getStats());
}
return accumulatedStats;
@@ -111,15 +128,41 @@ SequencedTaskExecutor::getStats()
ISequencedTaskExecutor::ExecutorId
SequencedTaskExecutor::getExecutorId(uint64_t componentId) const {
- uint32_t shrunkId = componentId % _component2Id.size();
- uint8_t executorId = _component2Id[shrunkId];
+ auto id = getExecutorIdPerfect(componentId);
+ return id ? id.value() : getExecutorIdImPerfect(componentId);
+}
+
+std::optional<ISequencedTaskExecutor::ExecutorId>
+SequencedTaskExecutor::getExecutorIdPerfect(uint64_t componentId) const {
+ PerfectKeyT key = componentId & 0x7fff;
+ ssize_t pos = find(key, _component2IdPerfect.get(), getNumExecutors() * NUM_PERFECT_PER_EXECUTOR);
+ if (pos < 0) {
+ std::unique_lock guard(_mutex);
+ pos = find(key, _component2IdPerfect.get(), getNumExecutors() * NUM_PERFECT_PER_EXECUTOR);
+ if (pos < 0) {
+ pos = find(INVALID_KEY, _component2IdPerfect.get(), getNumExecutors() * NUM_PERFECT_PER_EXECUTOR);
+ if (pos >= 0) {
+ _component2IdPerfect[pos] = key;
+ } else {
+ // There was a race for the last spots
+ return std::optional<ISequencedTaskExecutor::ExecutorId>();
+ }
+ }
+ }
+ return std::optional<ISequencedTaskExecutor::ExecutorId>(ExecutorId(pos % getNumExecutors()));
+}
+
+ISequencedTaskExecutor::ExecutorId
+SequencedTaskExecutor::getExecutorIdImPerfect(uint64_t componentId) const {
+ uint32_t shrunkId = componentId % _component2IdImperfect.size();
+ uint8_t executorId = _component2IdImperfect[shrunkId];
if (executorId == MAGIC) {
std::lock_guard guard(_mutex);
- if (_component2Id[shrunkId] == MAGIC) {
- _component2Id[shrunkId] = _nextId % getNumExecutors();
+ if (_component2IdImperfect[shrunkId] == MAGIC) {
+ _component2IdImperfect[shrunkId] = _nextId % getNumExecutors();
_nextId++;
}
- executorId = _component2Id[shrunkId];
+ executorId = _component2IdImperfect[shrunkId];
}
return ExecutorId(executorId);
}
@@ -127,10 +170,10 @@ SequencedTaskExecutor::getExecutorId(uint64_t componentId) const {
const vespalib::SyncableThreadExecutor*
SequencedTaskExecutor::first_executor() const
{
- if (_executors->empty()) {
+ if (_executors.empty()) {
return nullptr;
}
- return _executors->front().get();
+ return _executors.front().get();
}
} // namespace search
diff --git a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h
index 245d6d29780..06e7fa65ac2 100644
--- a/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h
+++ b/staging_vespalib/src/vespa/vespalib/util/sequencedtaskexecutor.h
@@ -38,18 +38,22 @@ public:
/**
* For testing only
*/
- uint32_t getComponentHashSize() const { return _component2Id.size(); }
+ uint32_t getComponentHashSize() const { return _component2IdImperfect.size(); }
uint32_t getComponentEffectiveHashSize() const { return _nextId; }
const vespalib::SyncableThreadExecutor* first_executor() const;
private:
- explicit SequencedTaskExecutor(std::unique_ptr<std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>>> executor);
-
- std::unique_ptr<std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>>> _executors;
- const bool _lazyExecutors;
- mutable std::vector<uint8_t> _component2Id;
- mutable std::mutex _mutex;
- mutable uint32_t _nextId;
+ explicit SequencedTaskExecutor(std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> executor);
+ std::optional<ExecutorId> getExecutorIdPerfect(uint64_t componentId) const;
+ ExecutorId getExecutorIdImPerfect(uint64_t componentId) const;
+
+ std::vector<std::unique_ptr<vespalib::SyncableThreadExecutor>> _executors;
+ using PerfectKeyT = uint16_t;
+ const bool _lazyExecutors;
+ mutable std::unique_ptr<PerfectKeyT[]> _component2IdPerfect;
+ mutable std::vector<uint8_t> _component2IdImperfect;
+ mutable std::mutex _mutex;
+ mutable uint32_t _nextId;
};
diff --git a/standalone-container/src/main/sh/standalone-container.sh b/standalone-container/src/main/sh/standalone-container.sh
index 9e888bdfea2..9edea41ac8b 100755
--- a/standalone-container/src/main/sh/standalone-container.sh
+++ b/standalone-container/src/main/sh/standalone-container.sh
@@ -169,6 +169,7 @@ StartCommand() {
--add-opens=java.base/java.nio=ALL-UNNAMED \
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
+ --add-opens=java.base/sun.security.util=ALL-UNNAMED \
-Djava.library.path="$VESPA_HOME/lib64" \
-Djava.awt.headless=true \
-Dsun.rmi.dgc.client.gcInterval=3600000 \
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index 7348cfc328b..bee7650aebd 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -25,6 +25,7 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
mergelimitertest.cpp
mergeoperationtest.cpp
multi_thread_stripe_access_guard_test.cpp
+ node_supported_features_repo_test.cpp
nodeinfotest.cpp
nodemaintenancestatstrackertest.cpp
operation_sequencer_test.cpp
diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp
index 15aada37c9b..7c97c962a97 100644
--- a/storage/src/tests/distributor/blockingoperationstartertest.cpp
+++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp
@@ -100,6 +100,9 @@ struct FakeDistributorStripeOperationContext : public DistributorStripeOperation
const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const override {
abort();
}
+ const NodeSupportedFeaturesRepo& node_supported_features_repo() const noexcept override {
+ abort();
+ }
};
struct BlockingOperationStarterTest : Test {
diff --git a/storage/src/tests/distributor/distributor_stripe_test.cpp b/storage/src/tests/distributor/distributor_stripe_test.cpp
index 902dd6454f1..8c2ebc983fa 100644
--- a/storage/src/tests/distributor/distributor_stripe_test.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test.cpp
@@ -185,6 +185,12 @@ struct DistributorStripeTest : Test, DistributorStripeTestUtil {
configure_stripe(builder);
}
+ void configure_use_unordered_merge_chaining(bool use_unordered) {
+ ConfigBuilder builder;
+ builder.useUnorderedMergeChaining = use_unordered;
+ configure_stripe(builder);
+ }
+
bool scheduler_has_implicitly_clear_priority_on_schedule_set() const noexcept {
return _stripe->_scheduler->implicitly_clear_priority_on_schedule();
}
@@ -982,4 +988,15 @@ TEST_F(DistributorStripeTest, closing_aborts_gets_started_outside_stripe_thread)
EXPECT_EQ(api::ReturnCode::ABORTED, _sender.reply(0)->getResult().getResult());
}
+TEST_F(DistributorStripeTest, use_unordered_merge_chaining_config_is_propagated_to_internal_config)
+{
+ setup_stripe(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
+
+ configure_use_unordered_merge_chaining(true);
+ EXPECT_TRUE(getConfig().use_unordered_merge_chaining());
+
+ configure_use_unordered_merge_chaining(false);
+ EXPECT_FALSE(getConfig().use_unordered_merge_chaining());
+}
+
}
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.cpp b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
index c5c51e64e68..b96b2dda1cb 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
@@ -9,8 +9,10 @@
#include <vespa/storage/distributor/distributor_stripe_component.h>
#include <vespa/storage/distributor/distributormetricsset.h>
#include <vespa/storage/distributor/ideal_state_total_metrics.h>
+#include <vespa/storage/distributor/node_supported_features_repo.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
using document::test::makeBucketSpace;
using document::test::makeDocumentBucket;
@@ -526,6 +528,13 @@ DistributorStripeTestUtil::db_memory_sample_interval() const noexcept {
return _stripe->db_memory_sample_interval();
}
+void
+DistributorStripeTestUtil::set_node_supported_features(uint16_t node, const NodeSupportedFeatures& features) {
+ vespalib::hash_map<uint16_t, NodeSupportedFeatures> new_features;
+ new_features[node] = features;
+ _stripe->update_node_supported_features_repo(_stripe->node_supported_features_repo().make_union_of(new_features));
+}
+
const lib::Distribution&
DistributorStripeTestUtil::getDistribution() const {
return getBucketSpaceRepo().get(makeBucketSpace()).getDistribution();
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.h b/storage/src/tests/distributor/distributor_stripe_test_util.h
index b1e90821e3b..3226c16aba3 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.h
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.h
@@ -26,6 +26,7 @@ class DocumentSelectionParser;
class ExternalOperationHandler;
class IdealStateManager;
class IdealStateMetricSet;
+class NodeSupportedFeatures;
class Operation;
class StripeBucketDBUpdater;
@@ -150,6 +151,7 @@ public:
[[nodiscard]] const PendingMessageTracker& pending_message_tracker() const noexcept;
[[nodiscard]] PendingMessageTracker& pending_message_tracker() noexcept;
[[nodiscard]] std::chrono::steady_clock::duration db_memory_sample_interval() const noexcept;
+ void set_node_supported_features(uint16_t node, const NodeSupportedFeatures& features);
const lib::Distribution& getDistribution() const;
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 65ee5254193..54bd06c98e0 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <tests/common/dummystoragelink.h>
+
#include <tests/distributor/distributor_stripe_test_util.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -12,6 +12,7 @@
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/text/stringtokenizer.h>
+#include <charconv>
using document::test::makeDocumentBucket;
using document::test::makeBucketSpace;
@@ -37,6 +38,7 @@ struct MergeOperationTest : Test, DistributorStripeTestUtil {
}
std::shared_ptr<MergeOperation> setup_minimal_merge_op();
+ std::shared_ptr<MergeOperation> setup_simple_merge_op(const std::vector<uint16_t>& nodes);
std::shared_ptr<MergeOperation> setup_simple_merge_op();
void assert_simple_merge_bucket_command();
void assert_simple_delete_bucket_command();
@@ -47,13 +49,13 @@ std::shared_ptr<MergeOperation>
MergeOperationTest::setup_minimal_merge_op()
{
document::BucketId bucket_id(16, 1);
- auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(bucket_id), toVector<uint16_t>(0, 1, 2)));
+ auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(bucket_id), {0, 1, 2}));
op->setIdealStateManager(&getIdealStateManager());
return op;
}
std::shared_ptr<MergeOperation>
-MergeOperationTest::setup_simple_merge_op()
+MergeOperationTest::setup_simple_merge_op(const std::vector<uint16_t>& nodes)
{
getClock().setAbsoluteTimeInSeconds(10);
@@ -64,12 +66,18 @@ MergeOperationTest::setup_simple_merge_op()
enable_cluster_state("distributor:1 storage:3");
- auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), toVector<uint16_t>(0, 1, 2)));
+ auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), nodes));
op->setIdealStateManager(&getIdealStateManager());
op->start(_sender, framework::MilliSecTime(0));
return op;
}
+std::shared_ptr<MergeOperation>
+MergeOperationTest::setup_simple_merge_op()
+{
+ return setup_simple_merge_op({0, 1, 2});
+}
+
void
MergeOperationTest::assert_simple_merge_bucket_command()
{
@@ -150,8 +158,10 @@ std::string getNodeList(std::string state, uint32_t redundancy, std::string exis
num.erase(pos);
trusted = true;
}
- bucketDB[i] = BucketCopy(0, atoi(num.c_str()),
- api::BucketInfo(1, 2, 3));
+ uint16_t node;
+ [[maybe_unused]] auto [ptr, ec] = std::from_chars(num.data(), num.data() + num.size(), node);
+ assert(ec == std::errc{});
+ bucketDB[i] = BucketCopy(0, node, api::BucketInfo(1, 2, 3));
bucketDB[i].setTrusted(trusted);
}
std::vector<MergeMetaData> nodes(st.size());
@@ -553,4 +563,44 @@ TEST_F(MergeOperationTest, on_throttled_updates_metrics)
EXPECT_EQ(1, metrics->throttled.getValue());
}
+TEST_F(MergeOperationTest, unordered_merges_only_sent_iff_config_enabled_and_all_nodes_support_feature) {
+ setup_stripe(Redundancy(4), NodeCount(4), "distributor:1 storage:4");
+ NodeSupportedFeatures with_unordered;
+ with_unordered.unordered_merge_chaining = true;
+
+ set_node_supported_features(1, with_unordered);
+ set_node_supported_features(2, with_unordered);
+
+ auto config = make_config();
+ config->set_use_unordered_merge_chaining(true);
+ configure_stripe(std::move(config));
+
+ // Only nodes {1, 2} support unordered merging; merges should be ordered (sent to lowest index node 1).
+ setup_simple_merge_op({1, 2, 3}); // Note: these will be re-ordered in ideal state order internally
+ ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000000, "
+ "cluster state version: 0, nodes: [2, 1, 3], chain: [], "
+ "reasons to start: ) => 1",
+ _sender.getLastCommand(true));
+
+ // All involved nodes support unordered merging; merges should be unordered (sent to ideal node 2)
+ setup_simple_merge_op({1, 2});
+ ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000001, "
+ "cluster state version: 0, nodes: [2, 1], chain: [] (unordered forwarding), "
+ "reasons to start: ) => 2",
+ _sender.getLastCommand(true));
+
+ _sender.clear();
+
+ config = make_config();
+ config->set_use_unordered_merge_chaining(false);
+ configure_stripe(std::move(config));
+
+ // If config is not enabled, should send ordered even if nodes support the feature.
+ setup_simple_merge_op({2, 1});
+ ASSERT_EQ("MergeBucketCommand(BucketId(0x4000000000000001), to time 10000002, "
+ "cluster state version: 0, nodes: [2, 1], chain: [], "
+ "reasons to start: ) => 1",
+ _sender.getLastCommand(true));
+}
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/mock_tickable_stripe.h b/storage/src/tests/distributor/mock_tickable_stripe.h
index 38fc0c599a2..ec2f978c029 100644
--- a/storage/src/tests/distributor/mock_tickable_stripe.h
+++ b/storage/src/tests/distributor/mock_tickable_stripe.h
@@ -33,6 +33,10 @@ struct MockTickableStripe : TickableStripe {
void update_read_snapshot_after_activation(const lib::ClusterStateBundle&) override { abort(); }
void clear_read_only_bucket_repo_databases() override { abort(); }
+ void update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo>) override {
+ abort();
+ }
+
void report_bucket_db_status(document::BucketSpace, std::ostream&) const override { abort(); }
StripeAccessGuard::PendingOperationStats pending_operation_stats() const override { abort(); }
void report_single_bucket_requests(vespalib::xml::XmlOutputStream&) const override { abort(); }
diff --git a/storage/src/tests/distributor/node_supported_features_repo_test.cpp b/storage/src/tests/distributor/node_supported_features_repo_test.cpp
new file mode 100644
index 00000000000..990e0fc50a3
--- /dev/null
+++ b/storage/src/tests/distributor/node_supported_features_repo_test.cpp
@@ -0,0 +1,52 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/storage/distributor/node_supported_features_repo.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace ::testing;
+
+namespace storage::distributor {
+
+struct NodeSupportedFeaturesRepoTest : Test {
+ using FeatureMap = vespalib::hash_map<uint16_t, NodeSupportedFeatures>;
+ NodeSupportedFeaturesRepo _repo;
+
+ static NodeSupportedFeatures set_features() noexcept {
+ NodeSupportedFeatures f;
+ f.unordered_merge_chaining = true;
+ return f;
+ }
+
+ static NodeSupportedFeatures unset_features() noexcept {
+ return {};
+ }
+};
+
+TEST_F(NodeSupportedFeaturesRepoTest, feature_set_is_empty_by_default) {
+ EXPECT_EQ(_repo.node_supported_features(0), unset_features());
+ EXPECT_EQ(_repo.node_supported_features(12345), unset_features());
+}
+
+TEST_F(NodeSupportedFeaturesRepoTest, make_union_of_can_add_new_feature_mapping) {
+ FeatureMap fm;
+ fm[1] = set_features();
+ fm[60] = set_features();
+ auto new_repo = _repo.make_union_of(fm);
+ EXPECT_EQ(new_repo->node_supported_features(0), unset_features());
+ EXPECT_EQ(new_repo->node_supported_features(1), set_features());
+ EXPECT_EQ(new_repo->node_supported_features(60), set_features());
+}
+
+TEST_F(NodeSupportedFeaturesRepoTest, make_union_of_updates_existing_feature_mappings) {
+ FeatureMap fm;
+ fm[1] = set_features();
+ fm[60] = set_features();
+ auto new_repo = _repo.make_union_of(fm);
+ fm[1] = unset_features();
+ new_repo = new_repo->make_union_of(fm);
+ EXPECT_EQ(new_repo->node_supported_features(1), unset_features());
+ EXPECT_EQ(new_repo->node_supported_features(60), set_features());
+}
+
+}
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index fe8a607c9ae..3ed5e9f4a8d 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -4,6 +4,7 @@
#include <vespa/storage/distributor/top_level_bucket_db_updater.h>
#include <vespa/storage/distributor/bucket_space_distribution_context.h>
#include <vespa/storage/distributor/distributormetricsset.h>
+#include <vespa/storage/distributor/node_supported_features_repo.h>
#include <vespa/storage/distributor/pending_bucket_space_db_transition.h>
#include <vespa/storage/distributor/outdated_nodes_map.h>
#include <vespa/storage/storageutil/distributorstatecache.h>
@@ -119,6 +120,21 @@ public:
invalid_bucket_count));
}
+ void fake_bucket_reply(const lib::ClusterState &state,
+ const api::StorageCommand &cmd,
+ uint32_t bucket_count,
+ const std::function<void(api::RequestBucketInfoReply&)>& reply_decorator)
+ {
+ ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
+ const api::StorageMessageAddress& address(*cmd.getAddress());
+ auto reply = make_fake_bucket_reply(state,
+ dynamic_cast<const RequestBucketInfoCommand &>(cmd),
+ address.getIndex(),
+ bucket_count, 0);
+ reply_decorator(*reply);
+ bucket_db_updater().onRequestBucketInfoReply(reply);
+ }
+
void send_fake_reply_for_single_bucket_request(
const api::RequestBucketInfoCommand& rbi)
{
@@ -232,7 +248,7 @@ public:
}
}
- api::StorageMessageAddress storage_address(uint16_t node) {
+ static api::StorageMessageAddress storage_address(uint16_t node) {
static vespalib::string _storage("storage");
return api::StorageMessageAddress(&_storage, lib::NodeType::STORAGE, node);
}
@@ -1299,7 +1315,7 @@ TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
for (uint32_t i = 0; i < 3; ++i) {
- nodes.push_back(api::MergeBucketCommand::Node(i));
+ nodes.emplace_back(i);
}
api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
@@ -2662,4 +2678,37 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabl
EXPECT_FALSE(def_rs.is_routable());
}
+TEST_F(BucketDBUpdaterSnapshotTest, node_feature_sets_are_aggregated_from_nodes_and_propagated_to_stripes) {
+ lib::ClusterState state("distributor:1 storage:3");
+ set_cluster_state(state);
+ uint32_t expected_msgs = message_count(3), dummy_buckets_to_return = 1;
+
+ // Known feature sets are initially empty.
+ auto stripes = distributor_stripes();
+ for (auto* s : stripes) {
+ for (uint16_t i : {0, 1, 2}) {
+ EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(i).unordered_merge_chaining);
+ }
+ }
+
+ ASSERT_EQ(expected_msgs, _sender.commands().size());
+ for (uint32_t i = 0; i < _sender.commands().size(); i++) {
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i),
+ dummy_buckets_to_return, [i](auto& reply) noexcept {
+ // Pretend nodes 1 and 2 are on a shiny version with unordered merge chaining supported.
+ // Node 0 does not support the fanciness.
+ if (i > 0) {
+ reply.supported_node_features().unordered_merge_chaining = true;
+ }
+ }));
+ }
+
+ // Node features should be propagated to all stripes
+ for (auto* s : stripes) {
+ EXPECT_FALSE(s->node_supported_features_repo().node_supported_features(0).unordered_merge_chaining);
+ EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(1).unordered_merge_chaining);
+ EXPECT_TRUE(s->node_supported_features_repo().node_supported_features(2).unordered_merge_chaining);
+ }
+}
+
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
index 636a09d1f6e..2a61141865a 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
@@ -115,13 +115,13 @@ TopLevelDistributorTestUtil::handle_top_level_message(const std::shared_ptr<api:
void
TopLevelDistributorTestUtil::close()
{
- _component.reset(0);
- if (_distributor.get()) {
+ _component.reset();
+ if (_distributor) {
_stripe_pool->stop_and_join(); // Must be tagged as stopped prior to onClose
_distributor->onClose();
}
_sender.clear();
- _node.reset(0);
+ _node.reset();
_config = getStandardConfig(false);
}
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index e8f8e425af4..0f844ab6b4f 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -52,15 +52,18 @@ struct MergeBuilder {
~MergeBuilder();
MergeBuilder& nodes(uint16_t n0) {
+ _nodes.clear();
_nodes.push_back(n0);
return *this;
}
MergeBuilder& nodes(uint16_t n0, uint16_t n1) {
+ _nodes.clear();
_nodes.push_back(n0);
_nodes.push_back(n1);
return *this;
}
MergeBuilder& nodes(uint16_t n0, uint16_t n1, uint16_t n2) {
+ _nodes.clear();
_nodes.push_back(n0);
_nodes.push_back(n1);
_nodes.push_back(n2);
@@ -146,7 +149,8 @@ struct MergeThrottlerTest : Test {
api::ReturnCode::Result expectedResultCode);
void fill_throttler_queue_with_n_commands(uint16_t throttler_index, size_t queued_count);
- void receive_chained_merge_with_full_queue(bool disable_queue_limits);
+ void fill_up_throttler_active_window_and_queue(uint16_t node_idx);
+ void receive_chained_merge_with_full_queue(bool disable_queue_limits, bool unordered_fwd = false);
std::shared_ptr<api::MergeBucketCommand> peek_throttler_queue_top(size_t throttler_idx) {
auto& queue = _throttlers[throttler_idx]->getMergeQueue();
@@ -1197,7 +1201,7 @@ TEST_F(MergeThrottlerTest, busy_returned_on_full_queue_for_merges_sent_from_dist
}
void
-MergeThrottlerTest::receive_chained_merge_with_full_queue(bool disable_queue_limits)
+MergeThrottlerTest::receive_chained_merge_with_full_queue(bool disable_queue_limits, bool unordered_fwd)
{
// Note: uses node with index 1 to not be the first node in chain
_throttlers[1]->set_disable_queue_limits_for_chained_merges(disable_queue_limits);
@@ -1218,10 +1222,15 @@ MergeThrottlerTest::receive_chained_merge_with_full_queue(bool disable_queue_lim
// Send down another merge with non-empty chain. It should _not_ be busy bounced
// (if limits disabled) as it has already been accepted into another node's merge window.
{
- std::vector<MergeBucketCommand::Node> nodes({{0}, {1}, {2}});
+ std::vector<MergeBucketCommand::Node> nodes({{2}, {1}, {0}});
auto cmd = std::make_shared<MergeBucketCommand>(
makeDocumentBucket(BucketId(32, 0xf000baaa)), nodes, 1234, 1);
- cmd->setChain(std::vector<uint16_t>({0})); // Forwarded from node 0
+ if (!unordered_fwd) {
+ cmd->setChain(std::vector<uint16_t>({0})); // Forwarded from node 0
+ } else {
+ cmd->setChain(std::vector<uint16_t>({2})); // Forwarded from node 2, i.e. _not_ the lowest index
+ }
+ cmd->set_use_unordered_forwarding(unordered_fwd);
_topLinks[1]->sendDown(cmd);
}
}
@@ -1249,11 +1258,34 @@ TEST_F(MergeThrottlerTest, forwarded_merge_has_higher_pri_when_chain_limits_disa
EXPECT_FALSE(highest_pri_merge->getChain().empty()); // Should be the forwarded merge
}
+TEST_F(MergeThrottlerTest, forwarded_unordered_merge_is_directly_accepted_into_active_window) {
+ // Unordered forwarding is orthogonal to disabled chain limits config, so we implicitly test that too.
+ ASSERT_NO_FATAL_FAILURE(receive_chained_merge_with_full_queue(true, true));
+
+ // Unordered merge is immediately forwarded to the next node
+ _topLinks[1]->waitForMessage(MessageType::MERGEBUCKET, _messageWaitTime);
+ auto fwd = std::dynamic_pointer_cast<api::MergeBucketCommand>(
+ _topLinks[1]->getAndRemoveMessage(MessageType::MERGEBUCKET));
+ ASSERT_TRUE(fwd);
+ EXPECT_TRUE(fwd->use_unordered_forwarding());
+ EXPECT_EQ(fwd->getChain(), std::vector<uint16_t>({2, 1}));
+}
+
+TEST_F(MergeThrottlerTest, non_forwarded_unordered_merge_is_enqueued_if_active_window_full)
+{
+ fill_throttler_queue_with_n_commands(1, 0); // Fill active window entirely
+ {
+ std::vector<MergeBucketCommand::Node> nodes({{1}, {2}, {0}});
+ auto cmd = std::make_shared<MergeBucketCommand>(
+ makeDocumentBucket(BucketId(32, 0xf000baaa)), nodes, 1234, 1);
+ cmd->set_use_unordered_forwarding(true);
+ _topLinks[1]->sendDown(cmd);
+ }
+ waitUntilMergeQueueIs(*_throttlers[1], 1, _messageWaitTime); // Should be in queue, not active window
+}
+
TEST_F(MergeThrottlerTest, broken_cycle) {
- std::vector<MergeBucketCommand::Node> nodes;
- nodes.push_back(1);
- nodes.push_back(0);
- nodes.push_back(2);
+ std::vector<MergeBucketCommand::Node> nodes({1, 0, 2});
{
std::vector<uint16_t> chain;
chain.push_back(0);
@@ -1268,10 +1300,7 @@ TEST_F(MergeThrottlerTest, broken_cycle) {
// Send cycled merge which will be executed
{
- std::vector<uint16_t> chain;
- chain.push_back(0);
- chain.push_back(1);
- chain.push_back(2);
+ std::vector<uint16_t> chain({0, 1, 2});
auto cmd = std::make_shared<MergeBucketCommand>(
makeDocumentBucket(BucketId(32, 0xfeef00)), nodes, 1234, 1, chain);
_topLinks[1]->sendDown(cmd);
@@ -1425,9 +1454,10 @@ TEST_F(MergeThrottlerTest, source_only_merges_are_not_affected_by_backpressure)
void MergeThrottlerTest::fill_throttler_queue_with_n_commands(uint16_t throttler_index, size_t queued_count) {
size_t max_pending = _throttlers[throttler_index]->getThrottlePolicy().getMaxPendingCount();
for (size_t i = 0; i < max_pending + queued_count; ++i) {
- _topLinks[throttler_index]->sendDown(MergeBuilder(document::BucketId(16, i)).create());
+ _topLinks[throttler_index]->sendDown(MergeBuilder(document::BucketId(16, i))
+ .nodes(throttler_index, throttler_index + 1)
+ .create());
}
-
// Wait till we have max_pending merge forwards and queued_count enqueued.
_topLinks[throttler_index]->waitForMessages(max_pending, _messageWaitTime);
waitUntilMergeQueueIs(*_throttlers[throttler_index], queued_count, _messageWaitTime);
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.cpp b/storage/src/vespa/storage/config/distributorconfiguration.cpp
index a23d00ee6a3..8a40899165f 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.cpp
+++ b/storage/src/vespa/storage/config/distributorconfiguration.cpp
@@ -50,6 +50,7 @@ DistributorConfiguration::DistributorConfiguration(StorageComponent& component)
_prioritize_global_bucket_merges(true),
_enable_revert(true),
_implicitly_clear_priority_on_schedule(false),
+ _use_unordered_merge_chaining(false),
_minimumReplicaCountingMode(ReplicaCountingMode::TRUSTED)
{
}
@@ -171,6 +172,7 @@ DistributorConfiguration::configure(const vespa::config::content::core::StorDist
_max_activation_inhibited_out_of_sync_groups = config.maxActivationInhibitedOutOfSyncGroups;
_enable_revert = config.enableRevert;
_implicitly_clear_priority_on_schedule = config.implicitlyClearBucketPriorityOnSchedule;
+ _use_unordered_merge_chaining = config.useUnorderedMergeChaining;
_minimumReplicaCountingMode = config.minimumReplicaCountingMode;
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index 7b4e082d1ed..ea1aca17116 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -267,6 +267,12 @@ public:
[[nodiscard]] bool implicitly_clear_priority_on_schedule() const noexcept {
return _implicitly_clear_priority_on_schedule;
}
+ void set_use_unordered_merge_chaining(bool unordered) noexcept {
+ _use_unordered_merge_chaining = unordered;
+ }
+ [[nodiscard]] bool use_unordered_merge_chaining() const noexcept {
+ return _use_unordered_merge_chaining;
+ }
uint32_t num_distributor_stripes() const noexcept { return _num_distributor_stripes; }
@@ -324,6 +330,7 @@ private:
bool _prioritize_global_bucket_merges;
bool _enable_revert;
bool _implicitly_clear_priority_on_schedule;
+ bool _use_unordered_merge_chaining;
DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode;
diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def
index 8a9fdf74802..8021075faa3 100644
--- a/storage/src/vespa/storage/config/stor-distributormanager.def
+++ b/storage/src/vespa/storage/config/stor-distributormanager.def
@@ -286,3 +286,10 @@ num_distributor_stripes int default=0 restart
## bucket due to being blocked by concurrent operations. This avoids potential head-of-line
## blocking of later buckets in the priority database.
implicitly_clear_bucket_priority_on_schedule bool default=false
+
+## Enables sending merges that are forwarded between content nodes in ideal state node key
+## order, instead of strictly increasing node key order (which is the default).
+## Even if this config is set to true, unordered merges will only be sent if _all_ nodes
+## involved in a given merge have previously reported (as part of bucket info fetching)
+## that they support the unordered merge feature.
+use_unordered_merge_chaining bool default=false
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index 52171406ebf..470bfb69abb 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -32,6 +32,7 @@ vespa_add_library(storage_distributor
messagetracker.cpp
min_replica_provider.cpp
multi_threaded_stripe_access_guard.cpp
+ node_supported_features_repo.cpp
nodeinfo.cpp
operation_routing_snapshot.cpp
operation_sequencer.cpp
diff --git a/storage/src/vespa/storage/distributor/distributor_operation_context.h b/storage/src/vespa/storage/distributor/distributor_operation_context.h
index 934c5e364d8..bceb4ed1377 100644
--- a/storage/src/vespa/storage/distributor/distributor_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_operation_context.h
@@ -17,7 +17,7 @@ class DistributorBucketSpaceRepo;
*/
class DistributorOperationContext {
public:
- virtual ~DistributorOperationContext() {}
+ virtual ~DistributorOperationContext() = default;
virtual api::Timestamp generate_unique_timestamp() = 0;
virtual const BucketSpaceStateMap& bucket_space_states() const noexcept = 0;
virtual BucketSpaceStateMap& bucket_space_states() noexcept = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index 9f565686216..50c70306d92 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -6,6 +6,7 @@
#include "distributor_stripe.h"
#include "distributormetricsset.h"
#include "idealstatemetricsset.h"
+#include "node_supported_features_repo.h"
#include "operation_sequencer.h"
#include "ownership_transfer_safe_time_point_calculator.h"
#include "storage_node_up_states.h"
@@ -68,6 +69,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_recoveryTimeStarted(_component.getClock()),
_tickResult(framework::ThreadWaitInfo::NO_MORE_CRITICAL_WORK_KNOWN),
_bucketIdHasher(std::make_unique<BucketGcTimeCalculator::BucketIdIdentityHasher>()),
+ _node_supported_features_repo(std::make_shared<const NodeSupportedFeaturesRepo>()),
_metricLock(),
_maintenanceStats(),
_bucketSpacesStats(),
@@ -872,6 +874,12 @@ DistributorStripe::clear_read_only_bucket_repo_databases()
}
void
+DistributorStripe::update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo)
+{
+ _node_supported_features_repo = std::move(features_repo);
+}
+
+void
DistributorStripe::report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const
{
ideal_state_manager().dump_bucket_space_db_status(bucket_space, out);
@@ -889,4 +897,10 @@ DistributorStripe::report_delayed_single_bucket_requests(vespalib::xml::XmlOutpu
bucket_db_updater().report_delayed_single_bucket_requests(xos);
}
+const NodeSupportedFeaturesRepo&
+DistributorStripe::node_supported_features_repo() const noexcept
+{
+ return *_node_supported_features_repo;
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 5ba682d46e3..ce6a2071efd 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -160,6 +160,8 @@ public:
return *_bucketIdHasher;
}
+ const NodeSupportedFeaturesRepo& node_supported_features_repo() const noexcept override;
+
StripeBucketDBUpdater& bucket_db_updater() { return _bucketDBUpdater; }
const StripeBucketDBUpdater& bucket_db_updater() const { return _bucketDBUpdater; }
IdealStateManager& ideal_state_manager() { return _idealStateManager; }
@@ -283,6 +285,7 @@ private:
void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
void clear_read_only_bucket_repo_databases() override;
+ void update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo) override;
void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const override;
void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
void report_delayed_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
@@ -338,6 +341,7 @@ private:
framework::ThreadWaitInfo _tickResult;
BucketDBMetricUpdater _bucketDBMetricUpdater;
std::unique_ptr<BucketGcTimeCalculator::BucketIdHasher> _bucketIdHasher;
+ std::shared_ptr<const NodeSupportedFeaturesRepo> _node_supported_features_repo;
mutable std::mutex _metricLock;
/**
* Maintenance stats for last completed database scan iteration.
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index f2d2afb8fee..aa0a2289727 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -277,6 +277,12 @@ DistributorStripeComponent::storage_node_is_up(document::BucketSpace bucket_spac
return ns.getState().oneOf(storage_node_up_states());
}
+const NodeSupportedFeaturesRepo&
+DistributorStripeComponent::node_supported_features_repo() const noexcept
+{
+ return _distributor.node_supported_features_repo();
+}
+
std::unique_ptr<document::select::Node>
DistributorStripeComponent::parse_selection(const vespalib::string& selection) const
{
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index b274e21ac7c..5bcf9eec76d 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -70,7 +70,7 @@ public:
*/
void update_bucket_database(const document::Bucket& bucket,
const BucketCopy& changed_node,
- uint32_t update_flags = 0) override {
+ uint32_t update_flags) override {
update_bucket_database(bucket,
toVector<BucketCopy>(changed_node),
update_flags);
@@ -79,9 +79,9 @@ public:
/**
* Adds the given copies to the bucket database.
*/
- virtual void update_bucket_database(const document::Bucket& bucket,
- const std::vector<BucketCopy>& changed_nodes,
- uint32_t update_flags = 0) override;
+ void update_bucket_database(const document::Bucket& bucket,
+ const std::vector<BucketCopy>& changed_nodes,
+ uint32_t update_flags) override;
/**
* Removes a copy from the given bucket from the bucket database.
@@ -165,6 +165,8 @@ public:
return getDistributor().getBucketIdHasher();
}
+ const NodeSupportedFeaturesRepo& node_supported_features_repo() const noexcept override;
+
// Implements DocumentSelectionParser
std::unique_ptr<document::select::Node> parse_selection(const vespalib::string& selection) const override;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
index 4f39dd7e5bc..dfed59499c6 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
@@ -16,6 +16,7 @@ namespace storage {
namespace storage::distributor {
class DistributorMetricSet;
+class NodeSupportedFeaturesRepo;
class PendingMessageTracker;
/**
@@ -61,6 +62,7 @@ public:
virtual const DistributorConfiguration& getConfig() const = 0;
virtual ChainedMessageSender& getMessageSender() = 0;
virtual const BucketGcTimeCalculator::BucketIdHasher& getBucketIdHasher() const = 0;
+ virtual const NodeSupportedFeaturesRepo& node_supported_features_repo() const noexcept = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
index 5919261ab43..d6f4e5694f6 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
@@ -16,6 +16,7 @@ namespace storage::lib { class ClusterStateBundle; }
namespace storage::distributor {
class PendingMessageTracker;
+class NodeSupportedFeaturesRepo;
/**
* Interface with functionality that is used when handling distributor stripe operations.
@@ -57,6 +58,7 @@ public:
virtual const lib::ClusterStateBundle& cluster_state_bundle() const = 0;
virtual bool storage_node_is_up(document::BucketSpace bucket_space, uint32_t node_index) const = 0;
virtual const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const = 0;
+ virtual const NodeSupportedFeaturesRepo& node_supported_features_repo() const noexcept = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
index 1a44b79ac3a..b00e4ce3cba 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
@@ -132,6 +132,14 @@ void MultiThreadedStripeAccessGuard::clear_read_only_bucket_repo_databases() {
});
}
+void MultiThreadedStripeAccessGuard::update_node_supported_features_repo(
+ std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo)
+{
+ for_each_stripe([&](TickableStripe& stripe) {
+ stripe.update_node_supported_features_repo(features_repo);
+ });
+}
+
void MultiThreadedStripeAccessGuard::report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
for_each_stripe([&](TickableStripe& stripe) {
stripe.report_bucket_db_status(bucket_space, out);
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
index 53799fa338b..c52a01fdded 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
@@ -54,6 +54,8 @@ public:
void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) override;
void clear_read_only_bucket_repo_databases() override;
+ void update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo) override;
+
void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const override;
PendingOperationStats pending_operation_stats() const override;
void report_single_bucket_requests(vespalib::xml::XmlOutputStream& xos) const override;
diff --git a/storage/src/vespa/storage/distributor/node_supported_features.h b/storage/src/vespa/storage/distributor/node_supported_features.h
new file mode 100644
index 00000000000..fb9cc68e970
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/node_supported_features.h
@@ -0,0 +1,19 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace storage::distributor {
+
+/**
+ * Collection of distinct features supported by a particular content node.
+ *
+ * Communicated to a distributor via bucket info exchanges. All features
+ * are initially expected to be unsupported.
+ */
+struct NodeSupportedFeatures {
+ bool unordered_merge_chaining = false;
+
+ bool operator==(const NodeSupportedFeatures&) const noexcept = default;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/node_supported_features_repo.cpp b/storage/src/vespa/storage/distributor/node_supported_features_repo.cpp
new file mode 100644
index 00000000000..e125f360cec
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/node_supported_features_repo.cpp
@@ -0,0 +1,37 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "node_supported_features_repo.h"
+#include <vespa/vespalib/stllike/hash_map.hpp>
+
+namespace storage::distributor {
+
+NodeSupportedFeaturesRepo::NodeSupportedFeaturesRepo() = default;
+
+NodeSupportedFeaturesRepo::NodeSupportedFeaturesRepo(
+ vespalib::hash_map<uint16_t, NodeSupportedFeatures> features,
+ PrivateCtorTag)
+ : _node_features(std::move(features))
+{}
+
+NodeSupportedFeaturesRepo::~NodeSupportedFeaturesRepo() = default;
+
+const NodeSupportedFeatures&
+NodeSupportedFeaturesRepo::node_supported_features(uint16_t node_idx) const noexcept
+{
+ static const NodeSupportedFeatures default_features;
+ const auto iter = _node_features.find(node_idx);
+ return (iter != _node_features.end() ? iter->second : default_features);
+}
+
+std::shared_ptr<const NodeSupportedFeaturesRepo>
+NodeSupportedFeaturesRepo::make_union_of(const vespalib::hash_map<uint16_t, NodeSupportedFeatures>& node_features) const
+{
+ auto new_features = _node_features; // Must be by copy.
+ // We always let the _new_ features update any existing mapping.
+ for (const auto& nf : node_features) {
+ new_features[nf.first] = nf.second;
+ }
+ return std::make_shared<NodeSupportedFeaturesRepo>(std::move(new_features), PrivateCtorTag{});
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/node_supported_features_repo.h b/storage/src/vespa/storage/distributor/node_supported_features_repo.h
new file mode 100644
index 00000000000..cc40c27b8e2
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/node_supported_features_repo.h
@@ -0,0 +1,37 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "node_supported_features.h"
+#include <vespa/vespalib/stllike/hash_map.h>
+#include <memory>
+
+namespace storage::distributor {
+
+/**
+ * Repo of known mappings from node distribution key to feature set supported by
+ * the content node with the given distribution key.
+ *
+ * Entirely immutable; copy-on-write via make_union_of().
+ */
+class NodeSupportedFeaturesRepo {
+ const vespalib::hash_map<uint16_t, NodeSupportedFeatures> _node_features;
+ struct PrivateCtorTag {};
+public:
+ NodeSupportedFeaturesRepo();
+
+ NodeSupportedFeaturesRepo(vespalib::hash_map<uint16_t, NodeSupportedFeatures> features, PrivateCtorTag);
+ ~NodeSupportedFeaturesRepo();
+
+ // Returns supported node features for node with distribution key node_idx, or a default feature set
+ // with all features unset if node has no known mapping.
+ [[nodiscard]] const NodeSupportedFeatures& node_supported_features(uint16_t node_idx) const noexcept;
+
+ // Returns a new repo instance containing the union key->features set of self and node_features.
+ // If there is a duplicate mapping between the two, the features in node_features take precedence
+ // and will be stored in the new repo.
+ [[nodiscard]] std::shared_ptr<const NodeSupportedFeaturesRepo>
+ make_union_of(const vespalib::hash_map<uint16_t, NodeSupportedFeatures>& node_features) const;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index f951a880e5d..d220a71966f 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -4,6 +4,7 @@
#include <vespa/storage/distributor/idealstatemanager.h>
#include <vespa/storage/distributor/idealstatemetricsset.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
+#include <vespa/storage/distributor/node_supported_features_repo.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
@@ -137,9 +138,8 @@ MergeOperation::onStart(DistributorStripeMessageSender& sender)
getBucketId(),
_limiter,
nodes);
- for (uint32_t i=0; i<nodes.size(); ++i) {
- _mnodes.push_back(api::MergeBucketCommand::Node(
- nodes[i]._nodeIndex, nodes[i]._sourceOnly));
+ for (const auto& node : nodes) {
+ _mnodes.emplace_back(node._nodeIndex, node._sourceOnly);
}
if (_mnodes.size() > 1) {
@@ -148,11 +148,16 @@ MergeOperation::onStart(DistributorStripeMessageSender& sender)
_mnodes,
_manager->operation_context().generate_unique_timestamp(),
clusterState.getVersion());
-
- // Due to merge forwarding/chaining semantics, we must always send
- // the merge command to the lowest indexed storage node involved in
- // the merge in order to avoid deadlocks.
- std::sort(_mnodes.begin(), _mnodes.end(), NodeIndexComparator());
+ const bool may_send_unordered = (_manager->operation_context().distributor_config().use_unordered_merge_chaining()
+ && all_involved_nodes_support_unordered_merge_chaining());
+ if (!may_send_unordered) {
+ // Due to merge forwarding/chaining semantics, we must always send
+ // the merge command to the lowest indexed storage node involved in
+ // the merge in order to avoid deadlocks.
+ std::sort(_mnodes.begin(), _mnodes.end(), NodeIndexComparator());
+ } else {
+ msg->set_use_unordered_forwarding(true);
+ }
LOG(debug, "Sending %s to storage node %u", msg->toString().c_str(),
_mnodes[0].index);
@@ -262,7 +267,7 @@ void
MergeOperation::onReceive(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply> & msg)
{
- if (_removeOperation.get()) {
+ if (_removeOperation) {
if (_removeOperation->onReceiveInternal(msg)) {
_ok = _removeOperation->ok();
if (!_ok) {
@@ -277,7 +282,7 @@ MergeOperation::onReceive(DistributorStripeMessageSender& sender,
return;
}
- api::MergeBucketReply& reply(dynamic_cast<api::MergeBucketReply&>(*msg));
+ auto& reply = dynamic_cast<api::MergeBucketReply&>(*msg);
LOG(debug,
"Merge operation for bucket %s finished",
getBucketId().toString().c_str());
@@ -367,6 +372,16 @@ bool MergeOperation::is_global_bucket_merge() const noexcept {
return getBucket().getBucketSpace() == document::FixedBucketSpaces::global_space();
}
+bool MergeOperation::all_involved_nodes_support_unordered_merge_chaining() const noexcept {
+ const auto& features_repo = _manager->operation_context().node_supported_features_repo();
+ for (uint16_t node : getNodes()) {
+ if (!features_repo.node_supported_features(node).unordered_merge_chaining) {
+ return false;
+ }
+ }
+ return true;
+}
+
MergeBucketMetricSet*
MergeOperation::get_merge_metrics()
{
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
index 832c0f99681..014bae842fa 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
@@ -64,6 +64,7 @@ private:
void deleteSourceOnlyNodes(const BucketDatabase::Entry& currentState,
DistributorStripeMessageSender& sender);
bool is_global_bucket_merge() const noexcept;
+ bool all_involved_nodes_support_unordered_merge_chaining() const noexcept;
MergeBucketMetricSet* get_merge_metrics();
};
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
index 1c1c9f4a431..8183b013668 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.cpp
@@ -9,6 +9,7 @@
#include <vespa/storageframework/defaultimplementation/clock/realclock.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/xmlstream.hpp>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <climits>
#include <vespa/log/bufferedlogger.h>
@@ -44,7 +45,8 @@ PendingClusterState::PendingClusterState(
_clusterStateVersion(_cmd->getClusterStateBundle().getVersion()),
_isVersionedTransition(true),
_bucketOwnershipTransfer(false),
- _pendingTransitions()
+ _pendingTransitions(),
+ _node_features()
{
logConstructionInformation();
initializeBucketSpaceTransitions(false, outdatedNodesMap);
@@ -67,7 +69,8 @@ PendingClusterState::PendingClusterState(
_clusterStateVersion(0),
_isVersionedTransition(false),
_bucketOwnershipTransfer(true),
- _pendingTransitions()
+ _pendingTransitions(),
+ _node_features()
{
logConstructionInformation();
initializeBucketSpaceTransitions(true, OutdatedNodesMap());
@@ -287,6 +290,9 @@ PendingClusterState::onRequestBucketInfoReply(const std::shared_ptr<api::Request
auto transitionIter = _pendingTransitions.find(bucketSpaceAndNode.bucketSpace);
assert(transitionIter != _pendingTransitions.end());
transitionIter->second->onRequestBucketInfoReply(*reply, bucketSpaceAndNode.node);
+
+ update_node_supported_features_from_reply(iter->second.node, *reply);
+
_sentMessages.erase(iter);
return true;
@@ -304,21 +310,6 @@ PendingClusterState::resendDelayedMessages() {
}
}
-std::string
-PendingClusterState::requestNodesToString() const
-{
- std::ostringstream ost;
- for (uint32_t i = 0; i < _requestedNodes.size(); ++i) {
- if (_requestedNodes[i]) {
- if (ost.str().length() > 0) {
- ost << ",";
- }
- ost << i;
- }
- }
- return ost.str();
-}
-
void
PendingClusterState::merge_into_bucket_databases(StripeAccessGuard& guard)
{
@@ -366,4 +357,14 @@ PendingClusterState::getPrevClusterStateBundleString() const {
return _prevClusterStateBundle.getBaselineClusterState()->toString();
}
+void
+PendingClusterState::update_node_supported_features_from_reply(uint16_t node, const api::RequestBucketInfoReply& reply)
+{
+ const auto& src_feat = reply.supported_node_features();
+ NodeSupportedFeatures dest_feat;
+ dest_feat.unordered_merge_chaining = src_feat.unordered_merge_chaining;
+ // This will overwrite per bucket-space reply, but does not matter since it's independent of bucket space.
+ _node_features.insert(std::make_pair(node, dest_feat));
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/pendingclusterstate.h b/storage/src/vespa/storage/distributor/pendingclusterstate.h
index 0d07730d9ee..1a2f8901b47 100644
--- a/storage/src/vespa/storage/distributor/pendingclusterstate.h
+++ b/storage/src/vespa/storage/distributor/pendingclusterstate.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include "node_supported_features.h"
#include "pending_bucket_space_db_transition_entry.h"
#include "clusterinformation.h"
#include <vespa/storage/common/storagelink.h>
@@ -9,6 +10,7 @@
#include <vespa/storageframework/generic/clock/clock.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
#include <vespa/vespalib/util/xmlserializable.h>
+#include <vespa/vespalib/stllike/hash_map.h>
#include "outdated_nodes_map.h"
#include <unordered_map>
#include <deque>
@@ -151,9 +153,14 @@ public:
// Get pending transition for a specific bucket space. Only used by unit test.
PendingBucketSpaceDbTransition &getPendingBucketSpaceDbTransition(document::BucketSpace bucketSpace);
+ // May be a subset of the nodes in the cluster, depending on how many nodes were consulted
+ // as part of the pending cluster state. Caller must take care to aggregate features.
+ const vespalib::hash_map<uint16_t, NodeSupportedFeatures>& gathered_node_supported_features() const noexcept {
+ return _node_features;
+ }
+
void printXml(vespalib::XmlOutputStream&) const override;
Summary getSummary() const;
- std::string requestNodesToString() const;
private:
// With 100ms resend timeout, this requires a particular node to have failed
@@ -170,7 +177,7 @@ private:
DistributorMessageSender& sender,
const BucketSpaceStateMap& bucket_space_states,
const std::shared_ptr<api::SetSystemStateCommand>& newStateCmd,
- const OutdatedNodesMap &outdatedNodesMap,
+ const OutdatedNodesMap& outdatedNodesMap,
api::Timestamp creationTimestamp);
/**
@@ -213,6 +220,7 @@ private:
std::string getNewClusterStateBundleString() const;
std::string getPrevClusterStateBundleString() const;
void update_reply_failure_statistics(const api::ReturnCode& result, const BucketSpaceAndNode& source);
+ void update_node_supported_features_from_reply(uint16_t node, const api::RequestBucketInfoReply& reply);
std::shared_ptr<api::SetSystemStateCommand> _cmd;
@@ -233,6 +241,7 @@ private:
bool _isVersionedTransition;
bool _bucketOwnershipTransfer;
std::unordered_map<document::BucketSpace, std::unique_ptr<PendingBucketSpaceDbTransition>, document::BucketSpace::hash> _pendingTransitions;
+ vespalib::hash_map<uint16_t, NodeSupportedFeatures> _node_features;
};
}
diff --git a/storage/src/vespa/storage/distributor/stripe_access_guard.h b/storage/src/vespa/storage/distributor/stripe_access_guard.h
index bfc53c0ed82..2ed40cfcf2e 100644
--- a/storage/src/vespa/storage/distributor/stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/stripe_access_guard.h
@@ -20,6 +20,8 @@ namespace vespalib::xml { class XmlOutputStream; }
namespace storage::distributor {
+class NodeSupportedFeaturesRepo;
+
/**
* A stripe access guard guarantees that the holder of a guard can access underlying
* stripes via it in a thread safe manner. In particular, while any access guard is
@@ -57,6 +59,8 @@ public:
virtual void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) = 0;
virtual void clear_read_only_bucket_repo_databases() = 0;
+ virtual void update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo) = 0;
+
struct PendingOperationStats {
size_t external_load_operations;
size_t maintenance_operations;
diff --git a/storage/src/vespa/storage/distributor/tickable_stripe.h b/storage/src/vespa/storage/distributor/tickable_stripe.h
index d58b1e2e6aa..e458043ac64 100644
--- a/storage/src/vespa/storage/distributor/tickable_stripe.h
+++ b/storage/src/vespa/storage/distributor/tickable_stripe.h
@@ -15,6 +15,8 @@ namespace vespalib::xml { class XmlOutputStream; }
namespace storage::distributor {
+class NodeSupportedFeaturesRepo;
+
/**
* A tickable stripe is the minimal binding glue between the stripe's worker thread and
* the actual implementation. Primarily allows for easier testing without having to
@@ -58,6 +60,8 @@ public:
virtual void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) = 0;
virtual void update_read_snapshot_after_activation(const lib::ClusterStateBundle& activated_state) = 0;
virtual void clear_read_only_bucket_repo_databases() = 0;
+ virtual void update_node_supported_features_repo(std::shared_ptr<const NodeSupportedFeaturesRepo> features_repo) = 0;
+
// Functions used for state reporting
virtual void report_bucket_db_status(document::BucketSpace bucket_space, std::ostream& out) const = 0;
virtual StripeAccessGuard::PendingOperationStats pending_operation_stats() const = 0;
diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
index 8fc6d7576c9..613f0f6ce09 100644
--- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.cpp
@@ -7,6 +7,7 @@
#include "top_level_distributor.h"
#include "distributor_bucket_space.h"
#include "distributormetricsset.h"
+#include "node_supported_features_repo.h"
#include "simpleclusterinformation.h"
#include "stripe_access_guard.h"
#include <vespa/document/bucket/fixed_bucket_spaces.h>
@@ -47,11 +48,12 @@ TopLevelBucketDBUpdater::TopLevelBucketDBUpdater(const DistributorNodeContext& n
_chained_sender(chained_sender),
_outdated_nodes_map(),
_transition_timer(_node_ctx.clock()),
+ _node_supported_features_repo(std::make_shared<const NodeSupportedFeaturesRepo>()),
_stale_reads_enabled(false)
{
// FIXME STRIPE top-level Distributor needs a proper way to track the current cluster state bundle!
propagate_active_state_bundle_internally(true); // We're just starting up so assume ownership transfer.
- bootstrap_distribution_config(bootstrap_distribution);
+ bootstrap_distribution_config(std::move(bootstrap_distribution));
}
TopLevelBucketDBUpdater::~TopLevelBucketDBUpdater() = default;
@@ -393,6 +395,10 @@ TopLevelBucketDBUpdater::activate_pending_cluster_state(StripeAccessGuard& guard
guard.notify_distribution_change_enabled();
}
+ _node_supported_features_repo = _node_supported_features_repo->make_union_of(
+ _pending_cluster_state->gathered_node_supported_features());
+ guard.update_node_supported_features_repo(_node_supported_features_repo);
+
guard.update_read_snapshot_after_activation(_pending_cluster_state->getNewClusterStateBundle());
_pending_cluster_state.reset();
_outdated_nodes_map.clear();
diff --git a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
index f35991c20f3..b1065e708a4 100644
--- a/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/top_level_bucket_db_updater.h
@@ -30,6 +30,7 @@ struct BucketSpaceDistributionConfigs;
class BucketSpaceDistributionContext;
class ClusterStateBundleActivationListener;
class DistributorInterface;
+class NodeSupportedFeaturesRepo;
class StripeAccessor;
class StripeAccessGuard;
@@ -122,6 +123,7 @@ private:
ChainedMessageSender& _chained_sender;
OutdatedNodesMap _outdated_nodes_map;
framework::MilliSecTimer _transition_timer;
+ std::shared_ptr<const NodeSupportedFeaturesRepo> _node_supported_features_repo;
std::atomic<bool> _stale_reads_enabled;
};
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.cpp b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
index 05e50492206..bc2f54e5a50 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.cpp
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
@@ -113,30 +113,40 @@ MergeThrottler::MergeOperationMetrics::MergeOperationMetrics(const std::string&
}
MergeThrottler::MergeOperationMetrics::~MergeOperationMetrics() = default;
-MergeThrottler::MergeNodeSequence::MergeNodeSequence(
- const api::MergeBucketCommand& cmd,
- uint16_t thisIndex)
+MergeThrottler::MergeNodeSequence::MergeNodeSequence(const api::MergeBucketCommand& cmd, uint16_t thisIndex)
: _cmd(cmd),
_sortedNodes(cmd.getNodes()),
- _sortedIndex(std::numeric_limits<std::size_t>::max()),
- _thisIndex(thisIndex)
+ _sortedIndex(UINT16_MAX),
+ _unordered_index(UINT16_MAX),
+ _thisIndex(thisIndex),
+ _use_unordered_forwarding(cmd.use_unordered_forwarding())
{
// Sort the node vector so that we can find out if we're the
// last node in the chain or if we should forward the merge
std::sort(_sortedNodes.begin(), _sortedNodes.end(), NodeComparator());
- assert(!_sortedNodes.empty());
- for (std::size_t i = 0; i < _sortedNodes.size(); ++i) {
+ assert(!_sortedNodes.empty() && (_sortedNodes.size() < UINT16_MAX));
+ for (uint16_t i = 0; i < static_cast<uint16_t>(_sortedNodes.size()); ++i) {
if (_sortedNodes[i].index == _thisIndex) {
_sortedIndex = i;
break;
}
}
+ const auto& nodes = unordered_nodes();
+ for (uint16_t i = 0; i < static_cast<uint16_t>(nodes.size()); ++i) {
+ if (nodes[i].index == _thisIndex) {
+ _unordered_index = i;
+ break;
+ }
+ }
}
uint16_t
MergeThrottler::MergeNodeSequence::getNextNodeInChain() const
{
assert(_cmd.getChain().size() < _sortedNodes.size());
+ if (_use_unordered_forwarding) {
+ return unordered_nodes()[_cmd.getChain().size() + 1].index;
+ }
// assert(_sortedNodes[_cmd.getChain().size()].index == _thisIndex);
if (_sortedNodes[_cmd.getChain().size()].index != _thisIndex) {
// Some added paranoia output
@@ -153,7 +163,11 @@ MergeThrottler::MergeNodeSequence::isChainCompleted() const
{
if (_cmd.getChain().size() != _sortedNodes.size()) return false;
- for (std::size_t i = 0; i < _cmd.getChain().size(); ++i) {
+ if (_use_unordered_forwarding) {
+ return true; // Expect chain to be correct if size matches node sequence size. TODO can't we always do this?
+ }
+
+ for (size_t i = 0; i < _cmd.getChain().size(); ++i) {
if (_cmd.getChain()[i] != _sortedNodes[i].index) {
return false;
}
@@ -162,10 +176,10 @@ MergeThrottler::MergeNodeSequence::isChainCompleted() const
}
bool
-MergeThrottler::MergeNodeSequence::chainContainsIndex(uint16_t idx) const
+MergeThrottler::MergeNodeSequence::chain_contains_this_node() const noexcept
{
- for (std::size_t i = 0; i < _cmd.getChain().size(); ++i) {
- if (_cmd.getChain()[i] == idx) {
+ for (size_t i = 0; i < _cmd.getChain().size(); ++i) {
+ if (_cmd.getChain()[i] == _thisIndex) {
return true;
}
}
@@ -358,6 +372,7 @@ MergeThrottler::forwardCommandToNode(
fwdMerge->setSourceIndex(mergeCmd.getSourceIndex());
fwdMerge->setPriority(mergeCmd.getPriority());
fwdMerge->setTimeout(mergeCmd.getTimeout());
+ fwdMerge->set_use_unordered_forwarding(mergeCmd.use_unordered_forwarding());
msgGuard.sendUp(fwdMerge);
}
@@ -374,7 +389,7 @@ api::StorageMessage::SP
MergeThrottler::getNextQueuedMerge()
{
if (_queue.empty()) {
- return api::StorageMessage::SP();
+ return {};
}
auto iter = _queue.begin();
@@ -385,7 +400,7 @@ MergeThrottler::getNextQueuedMerge()
}
void
-MergeThrottler::enqueueMerge(
+MergeThrottler::enqueue_merge_for_later_processing(
const api::StorageMessage::SP& msg,
MessageGuard& msgGuard)
{
@@ -395,9 +410,10 @@ MergeThrottler::enqueueMerge(
if (!validateNewMerge(mergeCmd, nodeSeq, msgGuard)) {
return;
}
- const bool is_forwarded_merge = _disable_queue_limits_for_chained_merges && !mergeCmd.getChain().empty();
+ // TODO remove once unordered merges are default, since forwarded unordered merges are never enqueued
+ const bool is_forwarded_merge = _disable_queue_limits_for_chained_merges && !mergeCmd.from_distributor();
_queue.emplace(msg, _queueSequence++, is_forwarded_merge);
- _metrics->queueSize.set(_queue.size());
+ _metrics->queueSize.set(static_cast<int64_t>(_queue.size()));
}
bool
@@ -682,11 +698,30 @@ bool MergeThrottler::backpressure_mode_active() const {
return backpressure_mode_active_no_lock();
}
-bool MergeThrottler::allow_merge_with_queue_full(const api::MergeBucketCommand& cmd) const noexcept {
- // We let any merge through that has already passed through at least one other node's merge
- // window, as that has already taken up a logical resource slot on all those nodes. Busy-bouncing
- // a merge at that point would undo a great amount of thumb-twiddling and waiting.
- return (_disable_queue_limits_for_chained_merges && !cmd.getChain().empty());
+bool MergeThrottler::allow_merge_despite_full_window(const api::MergeBucketCommand& cmd) noexcept {
+ // We cannot let forwarded unordered merges fall into the queue, as that might lead to a deadlock.
+ // See comment in may_allow_into_queue() for rationale.
+ return (cmd.use_unordered_forwarding() && !cmd.from_distributor());
+}
+
+bool MergeThrottler::may_allow_into_queue(const api::MergeBucketCommand& cmd) const noexcept {
+ // We cannot let forwarded unordered merges fall into the queue, as that might lead to a deadlock.
+ // Consider the following scenario, with two nodes C0 and C1, each with a low window size of 1 (low
+ // limit chosen for demonstration purposes, but is entirely generalizable):
+ // 1. Node 0 receives merge M_x for nodes [0, 1], places in active window, forwards to node 1
+ // 2. Node 1 receives merge M_y for nodes [1, 0], places in active window, forwards to node 0
+ // 3. Node 0 receives merge M_y from node 1. Active window is full, so places in queue
+ // 4. Node 1 receives merge M_x from node 0. Active window is full, so places in queue
+ // 5. Neither M_x nor M_y will ever complete since they're waiting for resources that cannot be
+ // freed up before they themselves complete. Classic deadlock(tm).
+ //
+ // We do, however, allow enqueueing unordered merges that come straight from the distributor, as
+ // those cannot cause a deadlock at that point in time.
+ if (cmd.use_unordered_forwarding()) {
+ return cmd.from_distributor();
+ }
+ return ((_queue.size() < _maxQueueSize)
+ || (_disable_queue_limits_for_chained_merges && !cmd.from_distributor()));
}
// Must be run from worker thread
@@ -716,10 +751,10 @@ MergeThrottler::handleMessageDown(
if (isMergeAlreadyKnown(msg)) {
processCycledMergeCommand(msg, msgGuard);
- } else if (canProcessNewMerge()) {
+ } else if (canProcessNewMerge() || allow_merge_despite_full_window(mergeCmd)) {
processNewMergeCommand(msg, msgGuard);
- } else if ((_queue.size() < _maxQueueSize) || allow_merge_with_queue_full(mergeCmd)) {
- enqueueMerge(msg, msgGuard); // Queue for later processing
+ } else if (may_allow_into_queue(mergeCmd)) {
+ enqueue_merge_for_later_processing(msg, msgGuard);
} else {
// No more room at the inn. Return BUSY so that the
// distributor will wait a bit before retrying
@@ -773,7 +808,7 @@ MergeThrottler::validateNewMerge(
<< _component.getIndex()
<< ", which is not in its forwarding chain";
LOG(error, "%s", oss.str().data());
- } else if (mergeCmd.getChain().size() >= nodeSeq.getSortedNodes().size()) {
+ } else if (mergeCmd.getChain().size() >= nodeSeq.unordered_nodes().size()) {
// Chain is full but we haven't seen the merge! This means
// the node has probably gone down with a merge it previously
// forwarded only now coming back to haunt it.
@@ -781,7 +816,7 @@ MergeThrottler::validateNewMerge(
<< " is not in node's internal state, but has a "
<< "full chain, meaning it cannot be forwarded.";
LOG(debug, "%s", oss.str().data());
- } else if (nodeSeq.chainContainsIndex(nodeSeq.getThisNodeIndex())) {
+ } else if (nodeSeq.chain_contains_this_node()) {
oss << mergeCmd.toString()
<< " is not in node's internal state, but contains "
<< "this node in its non-full chain. This should not happen!";
@@ -831,7 +866,9 @@ MergeThrottler::processNewMergeCommand(
// If chain is empty and this node is not the lowest
// index in the nodeset, immediately execute. Required for
// backwards compatibility with older distributor versions.
- if (mergeCmd.getChain().empty()
+ // TODO remove this
+ if (mergeCmd.from_distributor()
+ && !mergeCmd.use_unordered_forwarding()
&& (nodeSeq.getSortedNodes()[0].index != _component.getIndex()))
{
LOG(debug, "%s has empty chain and was sent to node that "
@@ -1039,7 +1076,6 @@ bool
MergeThrottler::onSetSystemState(
const std::shared_ptr<api::SetSystemStateCommand>& stateCmd)
{
-
LOG(debug,
"New cluster state arrived with version %u, flushing "
"all outdated queued merges",
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.h b/storage/src/vespa/storage/storageserver/mergethrottler.h
index da301172a3a..c115d36ad89 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.h
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.h
@@ -161,7 +161,7 @@ private:
ActiveMergeMap _merges;
MergePriorityQueue _queue;
- std::size_t _maxQueueSize;
+ size_t _maxQueueSize;
mbus::StaticThrottlePolicy::UP _throttlePolicy;
uint64_t _queueSequence; // TODO: move into a stable priority queue class
mutable std::mutex _messageLock;
@@ -220,7 +220,7 @@ public:
std::mutex& getStateLock() { return _stateLock; }
Metrics& getMetrics() { return *_metrics; }
- std::size_t getMaxQueueSize() const { return _maxQueueSize; }
+ size_t getMaxQueueSize() const { return _maxQueueSize; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
void reportHtmlStatus(std::ostream&, const framework::HttpUrlPath&) const override;
private:
@@ -230,17 +230,18 @@ private:
struct MergeNodeSequence {
const api::MergeBucketCommand& _cmd;
std::vector<api::MergeBucketCommand::Node> _sortedNodes;
- std::size_t _sortedIndex; // Index of current storage node in the sorted node sequence
+ uint16_t _sortedIndex; // Index of current storage node in the sorted node sequence
+ uint16_t _unordered_index;
const uint16_t _thisIndex; // Index of the current storage node
+ bool _use_unordered_forwarding;
MergeNodeSequence(const api::MergeBucketCommand& cmd, uint16_t thisIndex);
- std::size_t getSortedIndex() const { return _sortedIndex; }
const std::vector<api::MergeBucketCommand::Node>& getSortedNodes() const {
return _sortedNodes;
}
bool isIndexUnknown() const {
- return (_sortedIndex == std::numeric_limits<std::size_t>::max());
+ return (_sortedIndex == UINT16_MAX);
}
/**
* This node is the merge executor if it's the first element in the
@@ -252,11 +253,17 @@ private:
uint16_t getExecutorNodeIndex() const{
return _cmd.getNodes()[0].index;
}
- bool isLastNode() const {
- return (_sortedIndex == _sortedNodes.size() - 1);
+ const std::vector<api::MergeBucketCommand::Node>& unordered_nodes() const noexcept {
+ return _cmd.getNodes();
}
- bool chainContainsIndex(uint16_t idx) const;
- uint16_t getThisNodeIndex() const { return _thisIndex; }
+ [[nodiscard]] bool isLastNode() const {
+ if (!_use_unordered_forwarding) {
+ return (_sortedIndex == _sortedNodes.size() - 1);
+ } else {
+ return (_unordered_index == (unordered_nodes().size() - 1));
+ }
+ }
+ [[nodiscard]] bool chain_contains_this_node() const noexcept;
/**
* Gets node to forward to in strictly increasing order.
*/
@@ -339,7 +346,7 @@ private:
* @return Highest priority waiting merge or null SP if queue is empty
*/
api::StorageMessage::SP getNextQueuedMerge();
- void enqueueMerge(const api::StorageMessage::SP& msg, MessageGuard& msgGuard);
+ void enqueue_merge_for_later_processing(const api::StorageMessage::SP& msg, MessageGuard& msgGuard);
/**
* @return true if throttle policy says at least one additional
@@ -347,12 +354,13 @@ private:
*/
bool canProcessNewMerge() const;
- bool merge_is_backpressure_throttled(const api::MergeBucketCommand& cmd) const;
+ [[nodiscard]] bool merge_is_backpressure_throttled(const api::MergeBucketCommand& cmd) const;
void bounce_backpressure_throttled_merge(const api::MergeBucketCommand& cmd, MessageGuard& guard);
- bool merge_has_this_node_as_source_only_node(const api::MergeBucketCommand& cmd) const;
- bool backpressure_mode_active_no_lock() const;
+ [[nodiscard]] bool merge_has_this_node_as_source_only_node(const api::MergeBucketCommand& cmd) const;
+ [[nodiscard]] bool backpressure_mode_active_no_lock() const;
void backpressure_bounce_all_queued_merges(MessageGuard& guard);
- bool allow_merge_with_queue_full(const api::MergeBucketCommand& cmd) const noexcept;
+ [[nodiscard]] static bool allow_merge_despite_full_window(const api::MergeBucketCommand& cmd) noexcept;
+ [[nodiscard]] bool may_allow_into_queue(const api::MergeBucketCommand& cmd) const noexcept;
void sendReply(const api::MergeBucketCommand& cmd,
const api::ReturnCode& result,
diff --git a/storageapi/src/tests/mbusprot/storageprotocoltest.cpp b/storageapi/src/tests/mbusprot/storageprotocoltest.cpp
index a6021a7cfd2..6a00ddc8d8e 100644
--- a/storageapi/src/tests/mbusprot/storageprotocoltest.cpp
+++ b/storageapi/src/tests/mbusprot/storageprotocoltest.cpp
@@ -410,6 +410,12 @@ TEST_P(StorageProtocolTest, request_bucket_info) {
// "Last modified" not counted by operator== for some reason. Testing
// separately until we can figure out if this is by design or not.
EXPECT_EQ(lastMod, entries[0]._info.getLastModified());
+
+ if (GetParam().getMajor() >= 7) {
+ EXPECT_TRUE(reply2->supported_node_features().unordered_merge_chaining);
+ } else {
+ EXPECT_FALSE(reply2->supported_node_features().unordered_merge_chaining);
+ }
}
}
@@ -471,12 +477,18 @@ TEST_P(StorageProtocolTest, merge_bucket) {
chain.push_back(14);
auto cmd = std::make_shared<MergeBucketCommand>(_bucket, nodes, Timestamp(1234), 567, chain);
+ cmd->set_use_unordered_forwarding(true);
auto cmd2 = copyCommand(cmd);
EXPECT_EQ(_bucket, cmd2->getBucket());
EXPECT_EQ(nodes, cmd2->getNodes());
EXPECT_EQ(Timestamp(1234), cmd2->getMaxTimestamp());
EXPECT_EQ(uint32_t(567), cmd2->getClusterStateVersion());
EXPECT_EQ(chain, cmd2->getChain());
+ if (GetParam().getMajor() >= 7) {
+ EXPECT_EQ(cmd2->use_unordered_forwarding(), cmd->use_unordered_forwarding());
+ } else {
+ EXPECT_FALSE(cmd2->use_unordered_forwarding());
+ }
auto reply = std::make_shared<MergeBucketReply>(*cmd);
auto reply2 = copyReply(reply);
diff --git a/storageapi/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto b/storageapi/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
index 34d67fdc00c..7f7ab1d7c0b 100644
--- a/storageapi/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
+++ b/storageapi/src/vespa/storageapi/mbusprot/protobuf/maintenance.proto
@@ -38,6 +38,7 @@ message MergeBucketRequest {
uint64 max_timestamp = 3;
repeated MergeNode nodes = 4;
repeated uint32 node_chain = 5;
+ bool unordered_forwarding = 6;
}
message MergeBucketResponse {
@@ -108,8 +109,14 @@ message BucketAndBucketInfo {
BucketInfo bucket_info = 2;
}
+message SupportedNodeFeatures {
+ bool unordered_merge_chaining = 1;
+}
+
message RequestBucketInfoResponse {
repeated BucketAndBucketInfo bucket_infos = 1;
+ // Only present for full bucket info fetches (not for explicit buckets)
+ SupportedNodeFeatures supported_node_features = 2;
}
message NotifyBucketChangeRequest {
diff --git a/storageapi/src/vespa/storageapi/mbusprot/protocolserialization7.cpp b/storageapi/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
index bb4cb6e24a3..8425294cbbd 100644
--- a/storageapi/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
+++ b/storageapi/src/vespa/storageapi/mbusprot/protocolserialization7.cpp
@@ -766,6 +766,7 @@ void ProtocolSerialization7::onEncode(GBBuf& buf, const api::MergeBucketCommand&
set_merge_nodes(*req.mutable_nodes(), msg.getNodes());
req.set_max_timestamp(msg.getMaxTimestamp());
req.set_cluster_state_version(msg.getClusterStateVersion());
+ req.set_unordered_forwarding(msg.use_unordered_forwarding());
for (uint16_t chain_node : msg.getChain()) {
req.add_node_chain(chain_node);
}
@@ -787,6 +788,7 @@ api::StorageCommand::UP ProtocolSerialization7::onDecodeMergeBucketCommand(BBuf&
chain.emplace_back(node);
}
cmd->setChain(std::move(chain));
+ cmd->set_use_unordered_forwarding(req.unordered_forwarding());
return cmd;
});
}
@@ -999,6 +1001,10 @@ void ProtocolSerialization7::onEncode(GBBuf& buf, const api::RequestBucketInfoRe
bucket_and_info->set_raw_bucket_id(entry._bucketId.getRawId());
set_bucket_info(*bucket_and_info->mutable_bucket_info(), entry._info);
}
+ // We mark features as available at protocol level. Only included for full bucket fetch responses.
+ if (msg.full_bucket_fetch()) {
+ res.mutable_supported_node_features()->set_unordered_merge_chaining(true);
+ }
});
}
@@ -1035,6 +1041,11 @@ api::StorageReply::UP ProtocolSerialization7::onDecodeRequestBucketInfoReply(con
dest_entries[i]._bucketId = document::BucketId(proto_entry.raw_bucket_id());
dest_entries[i]._info = get_bucket_info(proto_entry.bucket_info());
}
+ if (res.has_supported_node_features()) {
+ const auto& src_features = res.supported_node_features();
+ auto& dest_features = reply->supported_node_features();
+ dest_features.unordered_merge_chaining = src_features.unordered_merge_chaining();
+ }
return reply;
});
}
diff --git a/storageapi/src/vespa/storageapi/message/bucket.cpp b/storageapi/src/vespa/storageapi/message/bucket.cpp
index 360db5ea3d7..04a40fbc885 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.cpp
+++ b/storageapi/src/vespa/storageapi/message/bucket.cpp
@@ -107,7 +107,8 @@ MergeBucketCommand::MergeBucketCommand(
_nodes(nodes),
_maxTimestamp(maxTimestamp),
_clusterStateVersion(clusterStateVersion),
- _chain(chain)
+ _chain(chain),
+ _use_unordered_forwarding(false)
{}
MergeBucketCommand::~MergeBucketCommand() = default;
@@ -128,6 +129,9 @@ MergeBucketCommand::print(std::ostream& out, bool verbose, const std::string& in
out << _chain[i];
}
out << "]";
+ if (_use_unordered_forwarding) {
+ out << " (unordered forwarding)";
+ }
out << ", reasons to start: " << _reason;
out << ")";
if (verbose) {
diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h
index c24ed55d7a8..5fd79ffffea 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.h
+++ b/storageapi/src/vespa/storageapi/message/bucket.h
@@ -118,6 +118,7 @@ private:
Timestamp _maxTimestamp;
uint32_t _clusterStateVersion;
std::vector<uint16_t> _chain;
+ bool _use_unordered_forwarding;
public:
MergeBucketCommand(const document::Bucket &bucket,
@@ -133,6 +134,11 @@ public:
uint32_t getClusterStateVersion() const { return _clusterStateVersion; }
void setClusterStateVersion(uint32_t version) { _clusterStateVersion = version; }
void setChain(const std::vector<uint16_t>& chain) { _chain = chain; }
+ void set_use_unordered_forwarding(bool unordered_forwarding) noexcept {
+ _use_unordered_forwarding = unordered_forwarding;
+ }
+ [[nodiscard]] bool use_unordered_forwarding() const noexcept { return _use_unordered_forwarding; }
+ [[nodiscard]] bool from_distributor() const noexcept { return _chain.empty(); }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGECOMMAND(MergeBucketCommand, onMergeBucket)
};
@@ -385,19 +391,30 @@ public:
: _bucketId(id), _info(info) {}
friend std::ostream& operator<<(std::ostream& os, const Entry&);
};
- typedef vespalib::Array<Entry> EntryVector;
+ struct SupportedNodeFeatures {
+ bool unordered_merge_chaining = false;
+ };
+ using EntryVector = vespalib::Array<Entry>;
private:
- EntryVector _buckets;
- bool _full_bucket_fetch;
- document::BucketId _super_bucket_id;
+ EntryVector _buckets;
+ bool _full_bucket_fetch;
+ document::BucketId _super_bucket_id;
+ SupportedNodeFeatures _supported_node_features;
public:
explicit RequestBucketInfoReply(const RequestBucketInfoCommand& cmd);
- ~RequestBucketInfoReply();
+ ~RequestBucketInfoReply() override;
const EntryVector & getBucketInfo() const { return _buckets; }
EntryVector & getBucketInfo() { return _buckets; }
[[nodiscard]] bool full_bucket_fetch() const noexcept { return _full_bucket_fetch; }
+ // Only contains useful information if full_bucket_fetch() == true
+ [[nodiscard]] const SupportedNodeFeatures& supported_node_features() const noexcept {
+ return _supported_node_features;
+ }
+ [[nodiscard]] SupportedNodeFeatures& supported_node_features() noexcept {
+ return _supported_node_features;
+ }
const document::BucketId& super_bucket_id() const { return _super_bucket_id; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGEREPLY(RequestBucketInfoReply, onRequestBucketInfoReply)
diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml
index 930d31beb6c..aff625fe3a4 100644
--- a/vespa-feed-client-cli/pom.xml
+++ b/vespa-feed-client-cli/pom.xml
@@ -12,11 +12,6 @@
<packaging>jar</packaging>
<version>7-SNAPSHOT</version>
- <properties>
- <!-- Used by internal properties that are still using JDK8-->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<!-- compile scope -->
<dependency>
@@ -56,11 +51,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
<showDeprecation>true</showDeprecation>
<compilerArgs>
<arg>-Xlint:all</arg>
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
index cf2da78c4a9..68c9e4b4b7c 100644
--- a/vespa-feed-client/pom.xml
+++ b/vespa-feed-client/pom.xml
@@ -12,11 +12,6 @@
<packaging>jar</packaging>
<version>7-SNAPSHOT</version>
- <properties>
- <!-- Used by internal properties that are still using JDK8-->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<!-- compile scope -->
<dependency>
@@ -54,11 +49,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
<showDeprecation>true</showDeprecation>
<compilerArgs>
<arg>-Xlint:all</arg>
diff --git a/vespa-hadoop/pom.xml b/vespa-hadoop/pom.xml
index b93e1d8dace..8c65470abea 100644
--- a/vespa-hadoop/pom.xml
+++ b/vespa-hadoop/pom.xml
@@ -19,32 +19,8 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>2.8.0</hadoop.version>
<pig.version>0.14.0</pig.version>
- <!-- This is a client jar and should be compilable with jdk8 -->
- <maven.compiler.release>8</maven.compiler.release>
</properties>
- <!-- This is a client jar and should be compilable with jdk8 -->
- <profiles>
- <profile>
- <id>jdk11</id>
- <activation>
- <jdk>11</jdk>
- </activation>
- <properties>
- <java.version>11</java.version>
- </properties>
- </profile>
- <profile>
- <id>jdk1.8</id>
- <activation>
- <jdk>1.8</jdk>
- </activation>
- <properties>
- <java.version>8</java.version>
- </properties>
- </profile>
- </profiles>
-
<dependencies>
<!-- Hadoop dependencies -->
<dependency>
@@ -208,11 +184,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
</configuration>
</plugin>
</plugins>
diff --git a/vespa-http-client/pom.xml b/vespa-http-client/pom.xml
index fa73dd1bd74..eefb07d4ece 100644
--- a/vespa-http-client/pom.xml
+++ b/vespa-http-client/pom.xml
@@ -14,11 +14,6 @@
<name>${project.artifactId}</name>
<description>Independent external feeding API towards Vespa.</description>
- <properties>
- <!-- This is a client jar and should be compilable with jdk8 -->
- <maven.compiler.release>8</maven.compiler.release>
- </properties>
-
<dependencies>
<!-- NOTE: Adding dependencies here may break clients because this is used outside an OSGi container with
@@ -155,11 +150,7 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
- <jdkToolchain>
- <version>${java.version}</version>
- </jdkToolchain>
- <source>${java.version}</source>
- <target>${java.version}</target>
+ <release>${vespaClients.jdk.releaseVersion}</release>
<showDeprecation>true</showDeprecation>
<compilerArgs>
<arg>-Xlint:all</arg>
diff --git a/vespamalloc/src/vespamalloc/malloc/allocchunk.h b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
index efacb0a2085..7df8e12b470 100644
--- a/vespamalloc/src/vespamalloc/malloc/allocchunk.h
+++ b/vespamalloc/src/vespamalloc/malloc/allocchunk.h
@@ -16,9 +16,9 @@ namespace vespamalloc {
* but requires the double-word compare-and-swap instruction.
* Very early Amd K7/8 CPUs are lacking this and will fail (Illegal Instruction).
**/
-struct TaggedPtr {
- TaggedPtr() noexcept : _ptr(nullptr), _tag(0) { }
- TaggedPtr(void *h, size_t t) noexcept : _ptr(h), _tag(t) {}
+struct TaggedPtrT {
+ TaggedPtrT() noexcept : _ptr(nullptr), _tag(0) { }
+ TaggedPtrT(void *h, size_t t) noexcept : _ptr(h), _tag(t) {}
void *_ptr;
size_t _tag;
@@ -61,16 +61,20 @@ struct AtomicTaggedPtr {
void *_ptr;
size_t _tag;
} __attribute__ ((aligned (16)));
+
+using TaggedPtr = AtomicTaggedPtr;
+
#else
- using AtomicTaggedPtr = TaggedPtr;
+ using TaggedPtr = TaggedPtrT;
+ using AtomicTaggedPtr = std::atomic<TaggedPtr>;
#endif
class AFListBase
{
public:
- using HeadPtr = std::conditional<std::atomic<TaggedPtr>::is_always_lock_free, TaggedPtr, AtomicTaggedPtr>::type;
- using AtomicHeadPtr = std::conditional<std::atomic<TaggedPtr>::is_always_lock_free, std::atomic<TaggedPtr>, AtomicTaggedPtr>::type;
+ using HeadPtr = TaggedPtr;
+ using AtomicHeadPtr = std::atomic<TaggedPtr>;
AFListBase() : _next(nullptr) { }
void setNext(AFListBase * csl) { _next = csl; }