aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/internal/admin/vespa-wrapper/logfmt/internal_names.txt1
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml2
-rw-r--r--config-model/src/main/java/com/yahoo/schema/expressiontransforms/InputRecorder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java40
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/TelegrafTest.java148
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnection.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/core/config/ApplicationBundleLoader.java6
-rw-r--r--container-core/src/main/java/com/yahoo/container/core/config/HandlersConfigurerDi.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java76
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java2
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/DistributorMetrics.java142
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/StorageMetrics.java163
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java14
-rw-r--r--dist/vespa.spec4
-rw-r--r--metrics-proxy/CMakeLists.txt5
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/Telegraf.java111
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/TelegrafRegistry.java33
-rw-r--r--metrics-proxy/src/main/resources/configdefinitions/telegraf.def22
-rw-r--r--metrics-proxy/src/main/resources/templates/telegraf.conf.vm44
-rw-r--r--metrics-proxy/src/main/sh/start-telegraf.sh104
-rw-r--r--metrics-proxy/src/main/sh/stop-telegraf.sh81
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/telegraf/TelegrafTest.java44
-rw-r--r--metrics-proxy/src/test/resources/telegraf-config-with-two-cloudwatch-plugins.txt46
-rw-r--r--model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java23
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/onnx/TypeConverter.java36
-rw-r--r--model-integration/src/main/protobuf/onnx.proto6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java37
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java28
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java63
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java78
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java37
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java2
-rw-r--r--parent/pom.xml2
-rw-r--r--screwdriver.yaml1
-rw-r--r--searchlib/src/tests/attribute/attribute_test.cpp32
-rw-r--r--searchlib/src/tests/attribute/raw_attribute/raw_attribute_test.cpp44
-rw-r--r--searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp16
-rw-r--r--searchlib/src/tests/predicate/document_features_store_test.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/CMakeLists.txt4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributevector.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattributesaver.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.h9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattributesaver.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattributesaver.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/raw_buffer_store.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.cpp34
-rw-r--r--searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.h29
-rw-r--r--searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.cpp33
-rw-r--r--searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.h26
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h4
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute.h9
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.cpp51
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.h36
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.cpp42
-rw-r--r--searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.h32
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.h2
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.h4
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/feature_store.h8
-rw-r--r--searchsummary/src/tests/docsummary/attributedfw/attributedfw_test.cpp36
-rw-r--r--searchsummary/src/vespa/searchsummary/docsummary/attributedfw.cpp5
-rw-r--r--searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.cpp19
-rw-r--r--searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.h3
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt28
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java8
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java15
-rw-r--r--vespalib/src/tests/btree/btree_test.cpp5
-rw-r--r--vespalib/src/tests/datastore/array_store/array_store_test.cpp10
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp33
-rw-r--r--vespalib/src/tests/datastore/unique_store/unique_store_test.cpp9
-rw-r--r--vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp6
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodeallocator.h2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreenodestore.h2
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.h255
-rw-r--r--vespalib/src/vespa/vespalib/btree/btreestore.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.h16
-rw-r--r--vespalib/src/vespa/vespalib/datastore/array_store.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/bufferstate.h32
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastore.h2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp79
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h31
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entryref.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.h5
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp8
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h6
-rw-r--r--zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java17
-rw-r--r--zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java2
107 files changed, 1320 insertions, 1335 deletions
diff --git a/client/go/internal/admin/vespa-wrapper/logfmt/internal_names.txt b/client/go/internal/admin/vespa-wrapper/logfmt/internal_names.txt
index cc554546fcc..41ef2bce528 100644
--- a/client/go/internal/admin/vespa-wrapper/logfmt/internal_names.txt
+++ b/client/go/internal/admin/vespa-wrapper/logfmt/internal_names.txt
@@ -41,7 +41,6 @@ Container.ai.vespa.metricsproxy.metric.model.prometheus.Test
Container.ai.vespa.metricsproxy.node.Test
Container.ai.vespa.metricsproxy.rpc.Test
Container.ai.vespa.metricsproxy.service.Test
-Container.ai.vespa.metricsproxy.telegraf.Test
Container.ai.vespa.modelintegration.evaluator.Test
Container.ai.vespa.models.evaluation.Test
Container.ai.vespa.models.handler.Test
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 14b39867348..daf3683ad26 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -44,7 +44,7 @@
<javax.servlet-api.version>3.1.0</javax.servlet-api.version>
<javax.ws.rs-api.version>2.0.1</javax.ws.rs-api.version>
<jaxb.version>2.3.0</jaxb.version>
- <jetty.version>11.0.13</jetty.version>
+ <jetty.version>11.0.14</jetty.version>
<org.lz4.version>1.8.0</org.lz4.version>
<org.json.version>20220320</org.json.version> <!-- TODO: Remove on Vespa 9 -->
<slf4j.version>1.7.32</slf4j.version> <!-- WARNING: when updated, also update c.y.v.tenant:base pom -->
diff --git a/config-model/src/main/java/com/yahoo/schema/expressiontransforms/InputRecorder.java b/config-model/src/main/java/com/yahoo/schema/expressiontransforms/InputRecorder.java
index 515745cb6bc..7124628be0c 100644
--- a/config-model/src/main/java/com/yahoo/schema/expressiontransforms/InputRecorder.java
+++ b/config-model/src/main/java/com/yahoo/schema/expressiontransforms/InputRecorder.java
@@ -13,6 +13,7 @@ import com.yahoo.searchlib.rankingexpression.rule.ReferenceNode;
import com.yahoo.searchlib.rankingexpression.transform.ExpressionTransformer;
import java.io.StringReader;
+import java.util.HashSet;
import java.util.Set;
/**
@@ -23,6 +24,7 @@ import java.util.Set;
public class InputRecorder extends ExpressionTransformer<RankProfileTransformContext> {
private final Set<String> neededInputs;
+ private final Set<String> handled = new HashSet<>();
public InputRecorder(Set<String> target) {
this.neededInputs = target;
@@ -52,9 +54,13 @@ public class InputRecorder extends ExpressionTransformer<RankProfileTransformCon
simpleFunctionOrIdentifier = true;
}
if (simpleFunctionOrIdentifier) {
+ if (handled.contains(name)) {
+ return;
+ }
var f = context.rankProfile().getFunctions().get(name);
if (f != null && f.function().arguments().size() == 0) {
transform(f.function().getBody().getRoot(), context);
+ handled.add(name);
return;
}
neededInputs.add(feature.toString());
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
index 5a1c3d87e5e..ec543dba6fb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
@@ -20,9 +20,6 @@ import ai.vespa.metricsproxy.metric.dimensions.PublicDimensions;
import ai.vespa.metricsproxy.rpc.RpcServer;
import ai.vespa.metricsproxy.service.ConfigSentinelClient;
import ai.vespa.metricsproxy.service.SystemPollerProvider;
-import ai.vespa.metricsproxy.telegraf.Telegraf;
-import ai.vespa.metricsproxy.telegraf.TelegrafConfig;
-import ai.vespa.metricsproxy.telegraf.TelegrafRegistry;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.TreeConfigProducer;
import com.yahoo.config.provision.ApplicationId;
@@ -67,7 +64,6 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
ApplicationDimensionsConfig.Producer,
ConsumersConfig.Producer,
MonitoringConfig.Producer,
- TelegrafConfig.Producer,
MetricsNodesConfig.Producer
{
public static final Logger log = Logger.getLogger(MetricsProxyContainerCluster.class.getName());
@@ -124,8 +120,6 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
addHttpHandler(ApplicationMetricsHandler.class, ApplicationMetricsHandler.METRICS_V1_PATH);
addMetricsProxyComponent(ApplicationMetricsRetriever.class);
-
- addTelegrafComponents();
}
private void addHttpHandler(Class<? extends ThreadedHttpRequestHandler> clazz, String bindingPath) {
@@ -142,15 +136,6 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
return metricsHandler;
}
- private void addTelegrafComponents() {
- getAdmin().ifPresent(admin -> {
- if (admin.getUserMetrics().usesExternalMetricSystems()) {
- addMetricsProxyComponent(Telegraf.class);
- addMetricsProxyComponent(TelegrafRegistry.class);
- }
- });
- }
-
@Override
protected void doPrepare(DeployState deployState) { }
@@ -180,31 +165,6 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
}
}
- @Override
- public void getConfig(TelegrafConfig.Builder builder) {
- builder.isHostedVespa(isHostedVespa());
-
- var userConsumers = getUserMetricsConsumers();
- for (var consumer : userConsumers.values()) {
- for (var cloudWatch : consumer.cloudWatches()) {
- var cloudWatchBuilder = new TelegrafConfig.CloudWatch.Builder();
- cloudWatchBuilder
- .region(cloudWatch.region())
- .namespace(cloudWatch.namespace())
- .consumer(cloudWatch.consumer());
-
- cloudWatch.hostedAuth().ifPresent(hostedAuth -> cloudWatchBuilder
- .accessKeyName(hostedAuth.accessKeyName)
- .secretKeyName(hostedAuth.secretKeyName));
-
- cloudWatch.sharedCredentials().ifPresent(sharedCredentials -> {
- cloudWatchBuilder.file(sharedCredentials.file);
- sharedCredentials.profile.ifPresent(cloudWatchBuilder::profile);
- });
- builder.cloudWatch(cloudWatchBuilder);
- }
- }
- }
protected boolean messageBusEnabled() { return false; }
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/TelegrafTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/TelegrafTest.java
deleted file mode 100644
index 5a6a65b5a82..00000000000
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/TelegrafTest.java
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model.admin.metricsproxy;
-
-import ai.vespa.metricsproxy.telegraf.Telegraf;
-import ai.vespa.metricsproxy.telegraf.TelegrafConfig;
-import ai.vespa.metricsproxy.telegraf.TelegrafRegistry;
-import com.yahoo.component.ComponentId;
-import com.yahoo.vespa.model.VespaModel;
-import org.junit.jupiter.api.Test;
-
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CLUSTER_CONFIG_ID;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
-import static org.junit.jupiter.api.Assertions.*;
-
-/**
- * @author gjoranv
- */
-public class TelegrafTest {
-
- @Test
- void telegraf_components_are_set_up_when_cloudwatch_is_configured() {
- String services = servicesWithCloudwatch();
- VespaModel hostedModel = getModel(services, hosted);
-
- var clusterComponents = hostedModel.getAdmin().getMetricsProxyCluster().getComponentsMap();
- assertTrue(clusterComponents.containsKey(ComponentId.fromString(Telegraf.class.getName())));
- assertTrue(clusterComponents.containsKey(ComponentId.fromString(TelegrafRegistry.class.getName())));
- }
-
- @Test
- void telegraf_components_are_not_set_up_when_no_external_systems_are_added_in_services() {
- String services = String.join("\n",
- "<services>",
- " <admin version='2.0'>",
- " <adminserver hostalias='node1'/>",
- " <metrics>",
- " <consumer id='foo' />",
- " </metrics>",
- " </admin>",
- "</services>");
- VespaModel hostedModel = getModel(services, hosted);
-
- var clusterComponents = hostedModel.getAdmin().getMetricsProxyCluster().getComponentsMap();
- assertFalse(clusterComponents.containsKey(ComponentId.fromString(Telegraf.class.getName())));
- assertFalse(clusterComponents.containsKey(ComponentId.fromString(TelegrafRegistry.class.getName())));
- }
-
- @Test
- void telegraf_config_is_generated_for_cloudwatch_in_services() {
- String services = servicesWithCloudwatch();
- VespaModel hostedModel = getModel(services, hosted);
- TelegrafConfig config = hostedModel.getConfig(TelegrafConfig.class, CLUSTER_CONFIG_ID);
- assertTrue(config.isHostedVespa());
-
- var cloudWatch0 = config.cloudWatch(0);
- assertEquals("cloudwatch-consumer", cloudWatch0.consumer());
- assertEquals("us-east-1", cloudWatch0.region());
- assertEquals("my-namespace", cloudWatch0.namespace());
- assertEquals("my-access-key", cloudWatch0.accessKeyName());
- assertEquals("my-secret-key", cloudWatch0.secretKeyName());
- assertEquals("default", cloudWatch0.profile());
- }
-
- private String servicesWithCloudwatch() {
- return String.join("\n",
- "<services>",
- " <admin version='2.0'>",
- " <adminserver hostalias='node1'/>",
- " <metrics>",
- " <consumer id='cloudwatch-consumer'>",
- " <metric id='my-metric'/>",
- " <cloudwatch region='us-east-1' namespace='my-namespace' >",
- " <credentials access-key-name='my-access-key' ",
- " secret-key-name='my-secret-key' />",
- " </cloudwatch>",
- " </consumer>",
- " </metrics>",
- " </admin>",
- "</services>"
- );
- }
-
- @Test
- void multiple_cloudwatches_are_allowed_for_the_same_consumer() {
- String services = String.join("\n",
- "<services>",
- " <admin version='2.0'>",
- " <adminserver hostalias='node1'/>",
- " <metrics>",
- " <consumer id='cloudwatch-consumer'>",
- " <metric id='my-metric'/>",
- " <cloudwatch region='us-east-1' namespace='namespace-1' >",
- " <credentials access-key-name='access-key-1' ",
- " secret-key-name='secret-key-1' />",
- " </cloudwatch>",
- " <cloudwatch region='us-east-1' namespace='namespace-2' >",
- " <shared-credentials profile='profile-2' />",
- " </cloudwatch>",
- " </consumer>",
- " </metrics>",
- " </admin>",
- "</services>"
- );
- VespaModel hostedModel = getModel(services, hosted);
- TelegrafConfig config = hostedModel.getConfig(TelegrafConfig.class, CLUSTER_CONFIG_ID);
-
- var cloudWatch0 = config.cloudWatch(0);
- assertEquals("cloudwatch-consumer", cloudWatch0.consumer());
- assertEquals("us-east-1", cloudWatch0.region());
- assertEquals("namespace-1", cloudWatch0.namespace());
- assertEquals("access-key-1", cloudWatch0.accessKeyName());
- assertEquals("secret-key-1", cloudWatch0.secretKeyName());
- assertEquals("default", cloudWatch0.profile());
-
- var cloudWatch1 = config.cloudWatch(1);
- assertEquals("cloudwatch-consumer", cloudWatch1.consumer());
- assertEquals("us-east-1", cloudWatch1.region());
- assertEquals("namespace-2", cloudWatch1.namespace());
- assertEquals("", cloudWatch1.accessKeyName());
- assertEquals("", cloudWatch1.secretKeyName());
- assertEquals("profile-2", cloudWatch1.profile());
- }
-
- @Test
- void profile_named_default_is_used_when_no_profile_is_given_in_shared_credentials() {
- String services = String.join("\n",
- "<services>",
- " <admin version='2.0'>",
- " <adminserver hostalias='node1'/>",
- " <metrics>",
- " <consumer id='cloudwatch-consumer'>",
- " <metric id='my-metric'/>",
- " <cloudwatch region='us-east-1' namespace='foo' >",
- " <shared-credentials file='/path/to/file' />",
- " </cloudwatch>",
- " </consumer>",
- " </metrics>",
- " </admin>",
- "</services>"
- );
- VespaModel model = getModel(services, self_hosted);
- TelegrafConfig config = model.getConfig(TelegrafConfig.class, CLUSTER_CONFIG_ID);
- assertEquals("default", config.cloudWatch(0).profile());
- }
-
-}
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
index f2a104696c1..1d4f7d37fe5 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnection.java
@@ -6,7 +6,6 @@ import com.yahoo.jrt.RequestWaiter;
import com.yahoo.jrt.Spec;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Target;
-
import java.time.Duration;
import java.util.Objects;
import java.util.logging.Level;
@@ -52,7 +51,8 @@ public class JRTConnection implements Connection {
*/
public synchronized Target getTarget() {
if (target == null || !target.isValid()) {
- logger.log(Level.INFO, "Connecting to " + address);
+ if (target != null) // Don't log the first time
+ logger.log(Level.INFO, "Connecting to " + address);
target = supervisor.connect(new Spec(address));
}
return target;
diff --git a/container-core/src/main/java/com/yahoo/container/core/config/ApplicationBundleLoader.java b/container-core/src/main/java/com/yahoo/container/core/config/ApplicationBundleLoader.java
index 66604966c8a..27fca37268e 100644
--- a/container-core/src/main/java/com/yahoo/container/core/config/ApplicationBundleLoader.java
+++ b/container-core/src/main/java/com/yahoo/container/core/config/ApplicationBundleLoader.java
@@ -70,7 +70,8 @@ public class ApplicationBundleLoader {
bundlesFromNewGeneration = installBundles(newFileReferences);
BundleStarter.startBundles(activeBundles.values());
- log.fine(installedBundlesMessage());
+
+ if (obsoleteBundles.size() > 0 || newFileReferences.size() > 0) log.info(installedBundlesMessage());
readyForNewBundles = false;
}
@@ -96,7 +97,8 @@ public class ApplicationBundleLoader {
*/
private Set<Bundle> commitBundles() {
var bundlesToUninstall = new LinkedHashSet<>(obsoleteBundles.values());
- log.info("Bundles to be uninstalled from previous generation: " + bundlesToUninstall);
+ if (bundlesToUninstall.size() > 0)
+ log.info("Bundles to be uninstalled from previous generation: " + bundlesToUninstall);
bundlesFromNewGeneration = Map.of();
obsoleteBundles = Map.of();
diff --git a/container-core/src/main/java/com/yahoo/container/core/config/HandlersConfigurerDi.java b/container-core/src/main/java/com/yahoo/container/core/config/HandlersConfigurerDi.java
index 06dce96e70a..925ea9ec702 100644
--- a/container-core/src/main/java/com/yahoo/container/core/config/HandlersConfigurerDi.java
+++ b/container-core/src/main/java/com/yahoo/container/core/config/HandlersConfigurerDi.java
@@ -15,8 +15,8 @@ import com.yahoo.container.di.componentgraph.core.ComponentGraph;
import com.yahoo.container.di.config.SubscriberFactory;
import com.yahoo.container.logging.AccessLog;
import com.yahoo.filedistribution.fileacquirer.FileAcquirer;
-import com.yahoo.jdisc.application.OsgiFramework;
import com.yahoo.jdisc.application.BsnVersion;
+import com.yahoo.jdisc.application.OsgiFramework;
import com.yahoo.jdisc.handler.RequestHandler;
import com.yahoo.jdisc.service.ClientProvider;
import com.yahoo.jdisc.service.ServerProvider;
@@ -104,7 +104,7 @@ public class HandlersConfigurerDi {
@Override
public void useApplicationBundles(Collection<FileReference> bundles, long generation) {
- log.info("Installing bundles for application generation " + generation);
+ if (! bundles.isEmpty()) log.info("Installing bundles for application generation " + generation);
applicationBundleLoader.useBundles(new ArrayList<>(bundles));
}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
index d608977a7ff..bacceac9d76 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/LoggingRequestHandler.java
@@ -3,7 +3,6 @@ package com.yahoo.container.jdisc;
import com.yahoo.component.annotation.Inject;
import com.yahoo.container.handler.Timing;
-import com.yahoo.container.logging.AccessLog;
import com.yahoo.container.logging.AccessLogEntry;
import com.yahoo.jdisc.Metric;
import com.yahoo.jdisc.Response;
@@ -102,54 +101,54 @@ public abstract class LoggingRequestHandler extends ThreadedHttpRequestHandler {
private void logTimes(long startTime, String sourceIP,
long renderStartTime, long commitStartTime, long endTime,
- String req, String normalizedQuery, Timing t) {
+ String req, ExtendedResponse response) {
// note: intentionally only taking time since request was received
long totalTime = endTime - startTime;
- long timeoutInterval = Long.MAX_VALUE;
- long requestOverhead = 0;
- long summaryStartTime = 0;
+ long timeoutInterval;
+ long requestOverhead;
+ long summaryStartTime;
+ Timing t = response.getTiming();
if (t != null) {
timeoutInterval = t.getTimeout();
long queryStartTime = t.getQueryStartTime();
- if (queryStartTime > 0) {
- requestOverhead = queryStartTime - startTime;
- }
+ requestOverhead = (queryStartTime > 0) ? queryStartTime - startTime : 0;
summaryStartTime = t.getSummaryStartTime();
- }
-
- if (totalTime <= timeoutInterval) {
- return;
- }
-
- StringBuilder b = new StringBuilder();
- b.append(normalizedQuery);
- b.append(" from ").append(sourceIP).append(". ");
-
- if (requestOverhead > 0) {
- b.append("Time from HTTP connection open to request reception ");
- b.append(requestOverhead).append(" ms. ");
- }
- if (summaryStartTime != 0) {
- b.append("Request time: ");
- b.append(summaryStartTime - startTime).append(" ms. ");
- b.append("Summary fetch time: ");
- b.append(renderStartTime - summaryStartTime).append(" ms. ");
} else {
- long spentSearching = renderStartTime - startTime;
- b.append("Processing time: ").append(spentSearching).append(" ms. ");
+ requestOverhead = 0;
+ summaryStartTime = 0;
+ timeoutInterval = Long.MAX_VALUE;
}
- b.append("Result rendering/transfer: ");
- b.append(commitStartTime - renderStartTime).append(" ms. ");
- b.append("End transaction: ");
- b.append(endTime - commitStartTime).append(" ms. ");
- b.append("Total: ").append(totalTime).append(" ms. ");
- b.append("Timeout: ").append(timeoutInterval).append(" ms. ");
- b.append("Request string: ").append(req);
+ if (totalTime <= timeoutInterval) return;
- log.log(Level.WARNING, "Slow execution. " + b);
+ log.log(Level.FINE, () -> {
+ StringBuilder b = new StringBuilder();
+ b.append(response.getParsedQuery());
+ b.append(" from ").append(sourceIP).append(". ");
+ if (requestOverhead > 0) {
+ b.append("Time from HTTP connection open to request reception ");
+ b.append(requestOverhead).append(" ms. ");
+ }
+ if (summaryStartTime != 0) {
+ b.append("Request time: ");
+ b.append(summaryStartTime - startTime).append(" ms. ");
+ b.append("Summary fetch time: ");
+ b.append(renderStartTime - summaryStartTime).append(" ms. ");
+ } else {
+ long spentSearching = renderStartTime - startTime;
+ b.append("Processing time: ").append(spentSearching).append(" ms. ");
+ }
+ b.append("Result rendering/transfer: ");
+ b.append(commitStartTime - renderStartTime).append(" ms. ");
+ b.append("End transaction: ");
+ b.append(endTime - commitStartTime).append(" ms. ");
+ b.append("Total: ").append(totalTime).append(" ms. ");
+ b.append("Timeout: ").append(timeoutInterval).append(" ms. ");
+ b.append("Request string: ").append(req);
+ return b.toString();
+ });
}
private static class NullResponse extends ExtendedResponse {
@@ -224,8 +223,7 @@ public abstract class LoggingRequestHandler extends ThreadedHttpRequestHandler {
commitStartTime,
endTime,
getUri(jdiscRequest),
- extendedResponse.getParsedQuery(),
- extendedResponse.getTiming());
+ extendedResponse);
Optional<AccessLogEntry> jdiscRequestAccessLogEntry =
AccessLoggingRequestHandler.getAccessLogEntry(jdiscRequest);
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java
index 333305cf604..649f0908748 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/Janitor.java
@@ -25,7 +25,7 @@ public class Janitor extends AbstractComponent {
@Inject
public Janitor() {
int threadPoolSize = Math.max(2, Runtime.getRuntime().availableProcessors()/4);
- log.info("Creating janitor executor with " + threadPoolSize + " threads");
+ log.fine("Creating janitor executor with " + threadPoolSize + " threads");
this.executor = Executors.newFixedThreadPool(threadPoolSize, new DaemonThreadFactory("jdisc-janitor-"));
}
diff --git a/container-core/src/main/java/com/yahoo/metrics/DistributorMetrics.java b/container-core/src/main/java/com/yahoo/metrics/DistributorMetrics.java
index 7a88607c7f6..82e9aff74a8 100644
--- a/container-core/src/main/java/com/yahoo/metrics/DistributorMetrics.java
+++ b/container-core/src/main/java/com/yahoo/metrics/DistributorMetrics.java
@@ -21,6 +21,8 @@ public enum DistributorMetrics implements VespaMetrics {
VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK("vds.idealstate.delete_bucket.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED("vds.idealstate.delete_bucket.done_failed", Unit.OPERATION, "The number of operations that failed"),
VDS_IDEALSTATE_DELETE_BUCKET_PENDING("vds.idealstate.delete_bucket.pending", Unit.OPERATION, "The number of operations pending"),
+ VDS_IDEALSTATE_DELETE_BUCKET_BLOCKED("vds.idealstate.delete_bucket.blocked", Unit.OPERATION, "The number of operations blocked by blocking operation starter"),
+ VDS_IDEALSTATE_DELETE_BUCKET_THROTTLED("vds.idealstate.delete_bucket.throttled", Unit.OPERATION, "The number of operations throttled by throttling operation starter"),
VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK("vds.idealstate.merge_bucket.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED("vds.idealstate.merge_bucket.done_failed", Unit.OPERATION, "The number of operations that failed"),
VDS_IDEALSTATE_MERGE_BUCKET_PENDING("vds.idealstate.merge_bucket.pending", Unit.OPERATION, "The number of operations pending"),
@@ -32,13 +34,19 @@ public enum DistributorMetrics implements VespaMetrics {
VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK("vds.idealstate.split_bucket.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED("vds.idealstate.split_bucket.done_failed", Unit.OPERATION, "The number of operations that failed"),
VDS_IDEALSTATE_SPLIT_BUCKET_PENDING("vds.idealstate.split_bucket.pending", Unit.OPERATION, "The number of operations pending"),
+ VDS_IDEALSTATE_SPLIT_BUCKET_BLOCKED("vds.idealstate.split_bucket.blocked", Unit.OPERATION, "The number of operations blocked by blocking operation starter"),
+ VDS_IDEALSTATE_SPLIT_BUCKET_THROTTLED("vds.idealstate.split_bucket.throttled", Unit.OPERATION, "The number of operations throttled by throttling operation starter"),
VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK("vds.idealstate.join_bucket.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED("vds.idealstate.join_bucket.done_failed", Unit.OPERATION, "The number of operations that failed"),
VDS_IDEALSTATE_JOIN_BUCKET_PENDING("vds.idealstate.join_bucket.pending", Unit.OPERATION, "The number of operations pending"),
+ VDS_IDEALSTATE_JOIN_BUCKET_BLOCKED("vds.idealstate.join_bucket.blocked", Unit.OPERATION, "The number of operations blocked by blocking operation starter"),
+ VDS_IDEALSTATE_JOIN_BUCKET_THROTTLED("vds.idealstate.join_bucket.throttled", Unit.OPERATION, "The number of operations throttled by throttling operation starter"),
VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK("vds.idealstate.garbage_collection.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED("vds.idealstate.garbage_collection.done_failed", Unit.OPERATION, "The number of operations that failed"),
VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING("vds.idealstate.garbage_collection.pending", Unit.OPERATION, "The number of operations pending"),
VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED("vds.idealstate.garbage_collection.documents_removed", Unit.DOCUMENT, "Number of documents removed by GC operations"),
+ VDS_IDEALSTATE_GARBAGE_COLLECTION_BLOCKED("vds.idealstate.garbage_collection.blocked", Unit.OPERATION, "The number of operations blocked by blocking operation starter"),
+ VDS_IDEALSTATE_GARBAGE_COLLECTION_THROTTLED("vds.idealstate.garbage_collection.throttled", Unit.OPERATION, "The number of operations throttled by throttling operation starter"),
VDS_DISTRIBUTOR_PUTS_LATENCY("vds.distributor.puts.latency", Unit.MILLISECOND, "The latency of put operations"),
VDS_DISTRIBUTOR_PUTS_OK("vds.distributor.puts.ok", Unit.OPERATION, "The number of successful put operations performed"),
@@ -60,6 +68,14 @@ public enum DistributorMetrics implements VespaMetrics {
VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND("vds.distributor.removes.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED("vds.distributor.removes.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.removes.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_BUSY("vds.distributor.removes.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_INCONSISTENT_BUCKET("vds.distributor.removes.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTCONNECTED("vds.distributor.removes.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTREADY("vds.distributor.removes.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.removes.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_STORAGEFAILURE("vds.distributor.removes.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_TIMEOUT("vds.distributor.removes.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_REMOVES_FAILURES_WRONGDISTRIBUTOR("vds.distributor.removes.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
VDS_DISTRIBUTOR_UPDATES_LATENCY("vds.distributor.updates.latency", Unit.MILLISECOND, "The latency of update operations"),
VDS_DISTRIBUTOR_UPDATES_OK("vds.distributor.updates.ok", Unit.OPERATION, "The number of successful updates operations performed"),
VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL("vds.distributor.updates.failures.total", Unit.OPERATION, "Sum of all failures"),
@@ -67,12 +83,43 @@ public enum DistributorMetrics implements VespaMetrics {
VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED("vds.distributor.updates.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.updates.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES("vds.distributor.updates.diverging_timestamp_updates", Unit.OPERATION, "Number of updates that report they were performed against divergent version timestamps on different replicas"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_BUSY("vds.distributor.updates.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_INCONSISTENT_BUCKET("vds.distributor.updates.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTCONNECTED("vds.distributor.updates.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTREADY("vds.distributor.updates.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.updates.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_STORAGEFAILURE("vds.distributor.updates.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_TIMEOUT("vds.distributor.updates.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_UPDATES_FAILURES_WRONGDISTRIBUTOR("vds.distributor.updates.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_UPDATES_FAST_PATH_RESTARTS("vds.distributor.updates.fast_path_restarts", Unit.OPERATION, "Number of safe path (write repair) updates that were restarted as fast path updates because all replicas returned documents with the same timestamp in the initial read phase"),
VDS_DISTRIBUTOR_REMOVELOCATIONS_OK("vds.distributor.removelocations.ok", Unit.OPERATION, "The number of successful removelocations operations performed"),
VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL("vds.distributor.removelocations.failures.total", Unit.OPERATION, "Sum of all failures"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_BUSY("vds.distributor.removelocations.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.removelocations.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.removelocations.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_NOTCONNECTED("vds.distributor.removelocations.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_NOTFOUND("vds.distributor.removelocations.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_NOTREADY("vds.distributor.removelocations.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.removelocations.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_STORAGEFAILURE("vds.distributor.removelocations.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.removelocations.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TIMEOUT("vds.distributor.removelocations.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.removelocations.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_REMOVELOCATIONS_LATENCY("vds.distributor.removelocations.latency", Unit.MILLISECOND, "The average latency of removelocations operations"),
VDS_DISTRIBUTOR_GETS_LATENCY("vds.distributor.gets.latency", Unit.MILLISECOND, "The average latency of gets operations"),
VDS_DISTRIBUTOR_GETS_OK("vds.distributor.gets.ok", Unit.OPERATION, "The number of successful gets operations performed"),
VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL("vds.distributor.gets.failures.total", Unit.OPERATION, "Sum of all failures"),
VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND("vds.distributor.gets.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_BUSY("vds.distributor.gets.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.gets.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.gets.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_NOTCONNECTED("vds.distributor.gets.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_NOTREADY("vds.distributor.gets.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.gets.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_STORAGEFAILURE("vds.distributor.gets.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.gets.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_TIMEOUT("vds.distributor.gets.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_GETS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.gets.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
VDS_DISTRIBUTOR_VISITOR_LATENCY("vds.distributor.visitor.latency", Unit.MILLISECOND, "The average latency of visitor operations"),
VDS_DISTRIBUTOR_VISITOR_OK("vds.distributor.visitor.ok", Unit.OPERATION, "The number of successful visitor operations performed"),
VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL("vds.distributor.visitor.failures.total", Unit.OPERATION, "Sum of all failures"),
@@ -85,10 +132,105 @@ public enum DistributorMetrics implements VespaMetrics {
VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY("vds.distributor.visitor.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET("vds.distributor.visitor.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND("vds.distributor.visitor.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_VISITOR_BYTES_PER_VISITOR("vds.distributor.visitor.bytes_per_visitor", Unit.OPERATION, "The number of bytes visited on content nodes as part of a single client visitor command"),
+ VDS_DISTRIBUTOR_VISITOR_DOCS_PER_VISITOR("vds.distributor.visitor.docs_per_visitor", Unit.OPERATION, "The number of documents visited on content nodes as part of a single client visitor command"),
+ VDS_DISTRIBUTOR_VISITOR_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.visitor.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_VISITOR_FAILURES_TEST_AND_SET_FAILED("vds.distributor.visitor.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
VDS_DISTRIBUTOR_DOCSSTORED("vds.distributor.docsstored", Unit.DOCUMENT, "Number of documents stored in all buckets controlled by this distributor"),
VDS_DISTRIBUTOR_BYTESSTORED("vds.distributor.bytesstored", Unit.BYTE, "Number of bytes stored in all buckets controlled by this distributor"),
+ METRICMANAGER_PERIODICHOOKLATENCY("metricmanager.periodichooklatency", Unit.MILLISECOND, "Time in ms used to update a single periodic hook"),
+ METRICMANAGER_RESETLATENCY("metricmanager.resetlatency", Unit.MILLISECOND, "Time in ms used to reset all metrics."),
+ METRICMANAGER_SLEEPTIME("metricmanager.sleeptime", Unit.MILLISECOND, "Time in ms worker thread is sleeping"),
+ METRICMANAGER_SNAPSHOTHOOKLATENCY("metricmanager.snapshothooklatency", Unit.MILLISECOND, "Time in ms used to update a single snapshot hook"),
+ METRICMANAGER_SNAPSHOTLATENCY("metricmanager.snapshotlatency", Unit.MILLISECOND, "Time in ms used to take a snapshot"),
+ VDS_DISTRIBUTOR_ACTIVATE_CLUSTER_STATE_PROCESSING_TIME("vds.distributor.activate_cluster_state_processing_time", Unit.MILLISECOND, "Elapsed time where the distributor thread is blocked on merging pending bucket info into its bucket database upon activating a cluster state"),
+ VDS_DISTRIBUTOR_BUCKET_DB_MEMORY_USAGE_ALLOCATED_BYTES("vds.distributor.bucket_db.memory_usage.allocated_bytes", Unit.BYTE, "The number of allocated bytes"),
+ VDS_DISTRIBUTOR_BUCKET_DB_MEMORY_USAGE_DEAD_BYTES("vds.distributor.bucket_db.memory_usage.dead_bytes", Unit.BYTE, "The number of dead bytes (<= used_bytes)"),
+ VDS_DISTRIBUTOR_BUCKET_DB_MEMORY_USAGE_ONHOLD_BYTES("vds.distributor.bucket_db.memory_usage.onhold_bytes", Unit.BYTE, "The number of bytes on hold"),
+ VDS_DISTRIBUTOR_BUCKET_DB_MEMORY_USAGE_USED_BYTES("vds.distributor.bucket_db.memory_usage.used_bytes", Unit.BYTE, "The number of used bytes (<= allocated_bytes)"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_BUSY("vds.distributor.getbucketlists.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.getbucketlists.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.getbucketlists.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_NOTCONNECTED("vds.distributor.getbucketlists.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_NOTFOUND("vds.distributor.getbucketlists.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_NOTREADY("vds.distributor.getbucketlists.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.getbucketlists.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_STORAGEFAILURE("vds.distributor.getbucketlists.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.getbucketlists.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_TIMEOUT("vds.distributor.getbucketlists.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_TOTAL("vds.distributor.getbucketlists.failures.total", Unit.OPERATION, "Total number of failures"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.getbucketlists.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_LATENCY("vds.distributor.getbucketlists.latency", Unit.MILLISECOND, "The average latency of getbucketlists operations"),
+ VDS_DISTRIBUTOR_GETBUCKETLISTS_OK("vds.distributor.getbucketlists.ok", Unit.OPERATION, "The number of successful getbucketlists operations performed"),
+ VDS_DISTRIBUTOR_RECOVERYMODESCHEDULINGTIME("vds.distributor.recoverymodeschedulingtime", Unit.MILLISECOND, "Time spent scheduling operations in recovery mode after receiving new cluster state"),
+ VDS_DISTRIBUTOR_SET_CLUSTER_STATE_PROCESSING_TIME("vds.distributor.set_cluster_state_processing_time", Unit.MILLISECOND, "Elapsed time where the distributor thread is blocked on processing its bucket database upon receiving a new cluster state"),
+ VDS_DISTRIBUTOR_STATE_TRANSITION_TIME("vds.distributor.state_transition_time", Unit.MILLISECOND, "Time it takes to complete a cluster state transition. If a state transition is preempted before completing, its elapsed time is counted as part of the total time spent for the final, completed state transition"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_BUSY("vds.distributor.stats.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.stats.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.stats.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_NOTCONNECTED("vds.distributor.stats.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_NOTFOUND("vds.distributor.stats.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_NOTREADY("vds.distributor.stats.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.stats.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_STORAGEFAILURE("vds.distributor.stats.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.stats.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_TIMEOUT("vds.distributor.stats.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_TOTAL("vds.distributor.stats.failures.total", Unit.OPERATION, "The total number of failures"),
+ VDS_DISTRIBUTOR_STATS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.stats.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_STATS_LATENCY("vds.distributor.stats.latency", Unit.MILLISECOND, "The average latency of stats operations"),
+ VDS_DISTRIBUTOR_STATS_OK("vds.distributor.stats.ok", Unit.OPERATION, "The number of successful stats operations performed"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_BUSY("vds.distributor.update_gets.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.update_gets.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.update_gets.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_NOTCONNECTED("vds.distributor.update_gets.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_NOTFOUND("vds.distributor.update_gets.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_NOTREADY("vds.distributor.update_gets.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.update_gets.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_STORAGEFAILURE("vds.distributor.update_gets.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.update_gets.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_TIMEOUT("vds.distributor.update_gets.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_TOTAL("vds.distributor.update_gets.failures.total", Unit.OPERATION, "The total number of failures"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.update_gets.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_LATENCY("vds.distributor.update_gets.latency", Unit.MILLISECOND, "The average latency of update_gets operations"),
+ VDS_DISTRIBUTOR_UPDATE_GETS_OK("vds.distributor.update_gets.ok", Unit.OPERATION, "The number of successful update_gets operations performed"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_BUSY("vds.distributor.update_metadata_gets.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.update_metadata_gets.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.update_metadata_gets.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_NOTCONNECTED("vds.distributor.update_metadata_gets.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_NOTFOUND("vds.distributor.update_metadata_gets.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_NOTREADY("vds.distributor.update_metadata_gets.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.update_metadata_gets.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_STORAGEFAILURE("vds.distributor.update_metadata_gets.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.update_metadata_gets.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_TIMEOUT("vds.distributor.update_metadata_gets.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_TOTAL("vds.distributor.update_metadata_gets.failures.total", Unit.OPERATION, "The total number of failures"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.update_metadata_gets.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_LATENCY("vds.distributor.update_metadata_gets.latency", Unit.MILLISECOND, "The average latency of update_metadata_gets operations"),
+ VDS_DISTRIBUTOR_UPDATE_METADATA_GETS_OK("vds.distributor.update_metadata_gets.ok", Unit.OPERATION, "The number of successful update_metadata_gets operations performed"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_BUSY("vds.distributor.update_puts.failures.busy", Unit.OPERATION, "The number of messages from storage that failed because the storage node was busy"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_CONCURRENT_MUTATIONS("vds.distributor.update_puts.failures.concurrent_mutations", Unit.OPERATION, "The number of operations that were transiently failed due to a mutating operation already being in progress for its document ID"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_INCONSISTENT_BUCKET("vds.distributor.update_puts.failures.inconsistent_bucket", Unit.OPERATION, "The number of operations failed due to buckets being in an inconsistent state or not found"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_NOTCONNECTED("vds.distributor.update_puts.failures.notconnected", Unit.OPERATION, "The number of operations discarded because there were no available storage nodes to send to"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_NOTFOUND("vds.distributor.update_puts.failures.notfound", Unit.OPERATION, "The number of operations that failed because the document did not exist"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_NOTREADY("vds.distributor.update_puts.failures.notready", Unit.OPERATION, "The number of operations discarded because distributor was not ready"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_SAFE_TIME_NOT_REACHED("vds.distributor.update_puts.failures.safe_time_not_reached", Unit.OPERATION, "The number of operations that were transiently failed due to them arriving before the safe time point for bucket ownership handovers has passed"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_STORAGEFAILURE("vds.distributor.update_puts.failures.storagefailure", Unit.OPERATION, "The number of operations that failed in storage"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_TEST_AND_SET_FAILED("vds.distributor.update_puts.failures.test_and_set_failed", Unit.OPERATION, "The number of mutating operations that failed because they specified a test-and-set condition that did not match the existing document"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_TIMEOUT("vds.distributor.update_puts.failures.timeout", Unit.OPERATION, "The number of operations that failed because the operation timed out towards storage"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_TOTAL("vds.distributor.update_puts.failures.total", Unit.OPERATION, "The total number of put failures"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_FAILURES_WRONGDISTRIBUTOR("vds.distributor.update_puts.failures.wrongdistributor", Unit.OPERATION, "The number of operations discarded because they were sent to the wrong distributor"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_LATENCY("vds.distributor.update_puts.latency", Unit.MILLISECOND, "The average latency of update_puts operations"),
+ VDS_DISTRIBUTOR_UPDATE_PUTS_OK("vds.distributor.update_puts.ok", Unit.OPERATION, "The number of successful update_puts operations performed"),
+
+ VDS_IDEALSTATE_NODES_PER_MERGE("vds.idealstate.nodes_per_merge", Unit.NODE, "The number of nodes involved in a single merge operation."),
+ VDS_IDEALSTATE_SET_BUCKET_STATE_BLOCKED("vds.idealstate.set_bucket_state.blocked", Unit.OPERATION, "The number of operations blocked by blocking operation starter"),
+ VDS_IDEALSTATE_SET_BUCKET_STATE_DONE_FAILED("vds.idealstate.set_bucket_state.done_failed", Unit.OPERATION, "The number of operations that failed"),
+ VDS_IDEALSTATE_SET_BUCKET_STATE_DONE_OK("vds.idealstate.set_bucket_state.done_ok", Unit.OPERATION, "The number of operations successfully performed"),
+ VDS_IDEALSTATE_SET_BUCKET_STATE_PENDING("vds.idealstate.set_bucket_state.pending", Unit.OPERATION, "The number of operations pending"),
+ VDS_IDEALSTATE_SET_BUCKET_STATE_THROTTLED("vds.idealstate.set_bucket_state.throttled", Unit.OPERATION, "The number of operations throttled by throttling operation starter"),
+
VDS_BOUNCER_CLOCK_SKEW_ABORTS("vds.bouncer.clock_skew_aborts", Unit.OPERATION, "Number of client operations that were aborted due to clock skew between sender and receiver exceeding acceptable range");
diff --git a/container-core/src/main/java/com/yahoo/metrics/StorageMetrics.java b/container-core/src/main/java/com/yahoo/metrics/StorageMetrics.java
index d67b67d04b7..05ae5180d3b 100644
--- a/container-core/src/main/java/com/yahoo/metrics/StorageMetrics.java
+++ b/container-core/src/main/java/com/yahoo/metrics/StorageMetrics.java
@@ -11,6 +11,9 @@ public enum StorageMetrics implements VespaMetrics {
VDS_DATASTORED_ALLDISKS_BUCKETS("vds.datastored.alldisks.buckets", Unit.BUCKET, "Number of buckets managed"),
VDS_DATASTORED_ALLDISKS_DOCS("vds.datastored.alldisks.docs", Unit.DOCUMENT, "Number of documents stored"),
VDS_DATASTORED_ALLDISKS_BYTES("vds.datastored.alldisks.bytes", Unit.BYTE, "Number of bytes stored"),
+ VDS_DATASTORED_ALLDISKS_ACTIVEBUCKETS("vds.datastored.alldisks.activebuckets", Unit.BUCKET, "Number of active buckets on the node"),
+ VDS_DATASTORED_ALLDISKS_READYBUCKETS("vds.datastored.alldisks.readybuckets", Unit.BUCKET, "Number of ready buckets on the node"),
+
VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME("vds.visitor.allthreads.averagevisitorlifetime", Unit.MILLISECOND, "Average lifetime of a visitor"),
VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT("vds.visitor.allthreads.averagequeuewait", Unit.MILLISECOND, "Average time an operation spends in input queue."),
VDS_VISITOR_ALLTHREADS_QUEUESIZE("vds.visitor.allthreads.queuesize", Unit.OPERATION, "Size of input message queue."),
@@ -19,6 +22,9 @@ public enum StorageMetrics implements VespaMetrics {
VDS_VISITOR_ALLTHREADS_FAILED("vds.visitor.allthreads.failed", Unit.OPERATION, "Number of visitors failed"),
VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME("vds.visitor.allthreads.averagemessagesendtime", Unit.MILLISECOND, "Average time it takes for messages to be sent to their target (and be replied to)"),
VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME("vds.visitor.allthreads.averageprocessingtime", Unit.MILLISECOND, "Average time used to process visitor requests"),
+ VDS_VISITOR_ALLTHREADS_ABORTED("vds.visitor.allthreads.aborted", Unit.INSTANCE, "Number of visitors aborted."),
+ VDS_VISITOR_ALLTHREADS_AVERAGEVISITORCREATIONTIME("vds.visitor.allthreads.averagevisitorcreationtime", Unit.MILLISECOND, "Average time spent creating a visitor instance"),
+ VDS_VISITOR_ALLTHREADS_DESTINATION_FAILURE_REPLIES("vds.visitor.allthreads.destination_failure_replies", Unit.INSTANCE, "Number of failure replies received from the visitor destination"),
VDS_FILESTOR_QUEUESIZE("vds.filestor.queuesize", Unit.OPERATION, "Size of input message queue."),
VDS_FILESTOR_AVERAGEQUEUEWAIT("vds.filestor.averagequeuewait", Unit.MILLISECOND, "Average time an operation spends in input queue."),
@@ -30,11 +36,17 @@ public enum StorageMetrics implements VespaMetrics {
VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY("vds.filestor.allthreads.mergemetadatareadlatency", Unit.MILLISECOND, "Time spent in a merge step to check metadata of current node to see what data it has."),
VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY("vds.filestor.allthreads.mergedatareadlatency", Unit.MILLISECOND, "Time spent in a merge step to read data other nodes need."),
VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY("vds.filestor.allthreads.mergedatawritelatency", Unit.MILLISECOND, "Time spent in a merge step to write data needed to current node."),
+ VDS_FILESTOR_ALLTHREADS_MERGEAVGDATARECEIVEDNEEDED("vds.filestor.allthreads.mergeavgdatareceivedneeded", Unit.BYTE, "Amount of data transferred from previous node in chain that we needed to apply locally."),
+ VDS_FILESTOR_ALLTHREADS_MERGEBUCKETS_COUNT("vds.filestor.allthreads.mergebuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_MERGEBUCKETS_FAILED("vds.filestor.allthreads.mergebuckets.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_MERGEBUCKETS_LATENCY("vds.filestor.allthreads.mergebuckets.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_MERGELATENCYTOTAL("vds.filestor.allthreads.mergelatencytotal", Unit.MILLISECOND, "Latency of total merge operation, from master node receives it, until merge is complete and master node replies."),
VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY("vds.filestor.allthreads.put_latency", Unit.MILLISECOND, "Latency of individual puts that are part of merge operations"), // TODO Vespa 9: Update metric name to include 'merge'
VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY("vds.filestor.allthreads.remove_latency", Unit.MILLISECOND, "Latency of individual removes that are part of merge operations"), // TODO Vespa 9: Update metric name to include 'merge'
VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES("vds.filestor.allstripes.throttled_rpc_direct_dispatches", Unit.INSTANCE, "Number of times an RPC thread could not directly dispatch an async operation directly to Proton because it was disallowed by the throttle policy"),
VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS("vds.filestor.allstripes.throttled_persistence_thread_polls", Unit.INSTANCE, "Number of times a persistence thread could not immediately dispatch a queued async operation because it was disallowed by the throttle policy"),
VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN("vds.filestor.allstripes.timeouts_waiting_for_throttle_token", Unit.INSTANCE, "Number of times a persistence thread timed out waiting for an available throttle policy token"),
+ VDS_FILESTOR_ALLSTRIPES_AVERAGEQUEUEWAIT("vds.filestor.allstripes.averagequeuewait", Unit.MILLISECOND, "Average time an operation spends in input queue."),
VDS_FILESTOR_ALLTHREADS_PUT_COUNT("vds.filestor.allthreads.put.count", Unit.OPERATION, "Number of requests processed."),
VDS_FILESTOR_ALLTHREADS_PUT_FAILED("vds.filestor.allthreads.put.failed", Unit.OPERATION, "Number of failed requests."),
@@ -46,36 +58,67 @@ public enum StorageMetrics implements VespaMetrics {
VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED("vds.filestor.allthreads.remove.test_and_set_failed", Unit.OPERATION, "Number of operations that were skipped due to a test-and-set condition not met"),
VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY("vds.filestor.allthreads.remove.latency", Unit.MILLISECOND, "Latency of successful requests."),
VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE("vds.filestor.allthreads.remove.request_size", Unit.BYTE, "Size of requests, in bytes"),
+ VDS_FILESTOR_ALLTHREADS_REMOVE_NOT_FOUND("vds.filestor.allthreads.remove.not_found", Unit.REQUEST, "Number of requests that could not be completed due to source document not found."),
+
VDS_FILESTOR_ALLTHREADS_GET_COUNT("vds.filestor.allthreads.get.count", Unit.OPERATION, "Number of requests processed."),
VDS_FILESTOR_ALLTHREADS_GET_FAILED("vds.filestor.allthreads.get.failed", Unit.OPERATION, "Number of failed requests."),
VDS_FILESTOR_ALLTHREADS_GET_LATENCY("vds.filestor.allthreads.get.latency", Unit.MILLISECOND, "Latency of successful requests."),
VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE("vds.filestor.allthreads.get.request_size", Unit.BYTE, "Size of requests, in bytes"),
- VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT("vds.filestor.allthreads.update.count", Unit.OPERATION, "Number of requests processed."),
- VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED("vds.filestor.allthreads.update.failed", Unit.OPERATION, "Number of failed requests."),
- VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED("vds.filestor.allthreads.update.test_and_set_failed", Unit.OPERATION, "Number of operations that were skipped due to a test-and-set condition not met"),
+ VDS_FILESTOR_ALLTHREADS_GET_NOT_FOUND("vds.filestor.allthreads.get.not_found", Unit.REQUEST, "Number of requests that could not be completed due to source document not found."),
+ VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT("vds.filestor.allthreads.update.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED("vds.filestor.allthreads.update.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED("vds.filestor.allthreads.update.test_and_set_failed", Unit.REQUEST, "Number of requests that were skipped due to a test-and-set condition not met"),
VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY("vds.filestor.allthreads.update.latency", Unit.MILLISECOND, "Latency of successful requests."),
VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE("vds.filestor.allthreads.update.request_size", Unit.BYTE, "Size of requests, in bytes"),
- VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT("vds.filestor.allthreads.createiterator.count", Unit.OPERATION, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY_READ("vds.filestor.allthreads.update.latency_read", Unit.MILLISECOND, "Latency of the source read in the request."),
+ VDS_FILESTOR_ALLTHREADS_UPDATE_NOT_FOUND("vds.filestor.allthreads.update.not_found", Unit.REQUEST, "Number of requests that could not be completed due to source document not found."),
+ VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT("vds.filestor.allthreads.createiterator.count", Unit.REQUEST, "Number of requests processed."),
VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY("vds.filestor.allthreads.createiterator.latency", Unit.MILLISECOND, "Latency of successful requests."),
- VDS_FILESTOR_ALLTHREADS_VISIT_COUNT("vds.filestor.allthreads.visit.count", Unit.OPERATION, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_FAILED("vds.filestor.allthreads.createiterator.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_VISIT_COUNT("vds.filestor.allthreads.visit.count", Unit.REQUEST, "Number of requests processed."),
VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY("vds.filestor.allthreads.visit.latency", Unit.MILLISECOND, "Latency of successful requests."),
- VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT("vds.filestor.allthreads.remove_location.count", Unit.OPERATION, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_VISIT_DOCS("vds.filestor.allthreads.visit.docs", Unit.DOCUMENT, "Number of entries read per iterate call"),
+ VDS_FILESTOR_ALLTHREADS_VISIT_FAILED("vds.filestor.allthreads.visit.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT("vds.filestor.allthreads.remove_location.count", Unit.REQUEST, "Number of requests processed."),
VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY("vds.filestor.allthreads.remove_location.latency", Unit.MILLISECOND, "Latency of successful requests."),
- VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT("vds.filestor.allthreads.splitbuckets.count", Unit.OPERATION, "Number of requests processed."),
- VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT("vds.filestor.allthreads.joinbuckets.count", Unit.OPERATION, "Number of requests processed."),
- VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT("vds.filestor.allthreads.deletebuckets.count", Unit.OPERATION, "Number of requests processed."),
- VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED("vds.filestor.allthreads.deletebuckets.failed", Unit.OPERATION, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_FAILED("vds.filestor.allthreads.remove_location.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT("vds.filestor.allthreads.splitbuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_FAILED("vds.filestor.allthreads.splitbuckets.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_LATENCY("vds.filestor.allthreads.splitbuckets.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT("vds.filestor.allthreads.joinbuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_FAILED("vds.filestor.allthreads.joinbuckets.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_LATENCY("vds.filestor.allthreads.joinbuckets.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT("vds.filestor.allthreads.deletebuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED("vds.filestor.allthreads.deletebuckets.failed", Unit.REQUEST, "Number of failed requests."),
VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY("vds.filestor.allthreads.deletebuckets.latency", Unit.MILLISECOND, "Latency of successful requests."),
- VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT("vds.filestor.allthreads.setbucketstates.count", Unit.OPERATION, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT("vds.filestor.allthreads.setbucketstates.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_FAILED("vds.filestor.allthreads.setbucketstates.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_LATENCY("vds.filestor.allthreads.setbucketstates.latency", Unit.MILLISECOND, "Latency of successful requests."),
VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME("vds.mergethrottler.averagequeuewaitingtime", Unit.MILLISECOND, "Time merges spent in the throttler queue"),
VDS_MERGETHROTTLER_QUEUESIZE("vds.mergethrottler.queuesize", Unit.INSTANCE, "Length of merge queue"),
VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE("vds.mergethrottler.active_window_size", Unit.INSTANCE, "Number of merges active within the pending window size"),
VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE("vds.mergethrottler.bounced_due_to_back_pressure", Unit.INSTANCE, "Number of merges bounced due to resource exhaustion back-pressure"),
VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK("vds.mergethrottler.locallyexecutedmerges.ok", Unit.INSTANCE, "The number of successful merges for 'locallyexecutedmerges'"),
- VDS_MERGETHROTTLER_MERGECHAINS_OK("vds.mergethrottler.mergechains.ok", Unit.INSTANCE, "The number of successful merges for 'mergechains'"),
- VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY("vds.mergethrottler.mergechains.failures.busy", Unit.INSTANCE, "The number of merges that failed because the storage node was busy"),
- VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL("vds.mergethrottler.mergechains.failures.total", Unit.INSTANCE, "Sum of all failures"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_ABORTED("vds.mergethrottler.locallyexecutedmerges.failures.aborted", Unit.OPERATION, "The number of merges that failed because the storage node was (most likely) shutting down"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_BUCKETNOTFOUND("vds.mergethrottler.locallyexecutedmerges.failures.bucketnotfound", Unit.OPERATION, "The number of operations that failed because the bucket did not exist"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_BUSY("vds.mergethrottler.locallyexecutedmerges.failures.busy", Unit.OPERATION, "The number of merges that failed because the storage node was busy"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_EXISTS("vds.mergethrottler.locallyexecutedmerges.failures.exists", Unit.OPERATION, "The number of merges that were rejected due to a merge operation for their bucket already being processed"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_NOTREADY("vds.mergethrottler.locallyexecutedmerges.failures.notready", Unit.OPERATION, "The number of merges discarded because distributor was not ready"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_OTHER("vds.mergethrottler.locallyexecutedmerges.failures.other", Unit.OPERATION, "The number of other failures"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_REJECTED("vds.mergethrottler.locallyexecutedmerges.failures.rejected", Unit.OPERATION, "The number of merges that were rejected"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_TIMEOUT("vds.mergethrottler.locallyexecutedmerges.failures.timeout", Unit.OPERATION, "The number of merges that failed because they timed out towards storage"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_TOTAL("vds.mergethrottler.locallyexecutedmerges.failures.total", Unit.OPERATION, "Sum of all failures"),
+ VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_FAILURES_WRONGDISTRIBUTION("vds.mergethrottler.locallyexecutedmerges.failures.wrongdistribution", Unit.OPERATION, "The number of merges that were discarded (flushed) because they were initiated at an older cluster state than the current"),
+ VDS_MERGETHROTTLER_MERGECHAINS_OK("vds.mergethrottler.mergechains.ok", Unit.OPERATION, "The number of successful merges for 'mergechains'"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY("vds.mergethrottler.mergechains.failures.busy", Unit.OPERATION, "The number of merges that failed because the storage node was busy"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL("vds.mergethrottler.mergechains.failures.total", Unit.OPERATION, "Sum of all failures"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_EXISTS("vds.mergethrottler.mergechains.failures.exists", Unit.OPERATION, "The number of merges that were rejected due to a merge operation for their bucket already being processed"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_NOTREADY("vds.mergethrottler.mergechains.failures.notready", Unit.OPERATION, "The number of merges discarded because distributor was not ready"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_OTHER("vds.mergethrottler.mergechains.failures.other", Unit.OPERATION, "The number of other failures"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_REJECTED("vds.mergethrottler.mergechains.failures.rejected", Unit.OPERATION, "The number of merges that were rejected"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TIMEOUT("vds.mergethrottler.mergechains.failures.timeout", Unit.OPERATION, "The number of merges that failed because they timed out towards storage"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_WRONGDISTRIBUTION("vds.mergethrottler.mergechains.failures.wrongdistribution", Unit.OPERATION, "The number of merges that were discarded (flushed) because they were initiated at an older cluster state than the current"),
// C++ TLS metrics - these come from both the distributor and storage
@@ -88,6 +131,98 @@ public enum StorageMetrics implements VespaMetrics {
VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN("vds.server.network.tls-connections-broken", Unit.CONNECTION, "Number of TLS connections broken due to failures during frame encoding or decoding"),
VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS("vds.server.network.failed-tls-config-reloads", Unit.FAILURE, "Number of times background reloading of TLS config has failed"),
+ VDS_BOUNCER_UNAVAILABLE_NODE_ABORTS("vds.bouncer.unavailable_node_aborts", Unit.OPERATION, "Number of operations that were aborted due to the node (or target bucket space) being unavailable"),
+ VDS_CHANGEDBUCKETOWNERSHIPHANDLER_AVG_ABORT_PROCESSING_TIME("vds.changedbucketownershiphandler.avg_abort_processing_time", Unit.MILLISECOND, "Average time spent aborting operations for changed buckets"),
+ VDS_CHANGEDBUCKETOWNERSHIPHANDLER_EXTERNAL_LOAD_OPS_ABORTED("vds.changedbucketownershiphandler.external_load_ops_aborted", Unit.OPERATION, "Number of outdated external load operations aborted"),
+ VDS_CHANGEDBUCKETOWNERSHIPHANDLER_IDEAL_STATE_OPS_ABORTED("vds.changedbucketownershiphandler.ideal_state_ops_aborted", Unit.OPERATION, "Number of outdated ideal state operations aborted"),
+ VDS_COMMUNICATION_BUCKET_SPACE_MAPPING_FAILURES("vds.communication.bucket_space_mapping_failures", Unit.OPERATION, "Number of messages that could not be resolved to a known bucket space"),
+ VDS_COMMUNICATION_CONVERTFAILURES("vds.communication.convertfailures", Unit.OPERATION, "Number of messages that failed to get converted to storage API messages"),
+ VDS_COMMUNICATION_EXCEPTIONMESSAGEPROCESSTIME("vds.communication.exceptionmessageprocesstime", Unit.MILLISECOND, "Time transport thread uses to process a single message that fails with an exception thrown into communication manager"),
+ VDS_COMMUNICATION_MESSAGEPROCESSTIME("vds.communication.messageprocesstime", Unit.MILLISECOND, "Time transport thread uses to process a single message"),
+ VDS_COMMUNICATION_MESSAGEQUEUE("vds.communication.messagequeue", Unit.ITEM, "Size of input message queue."),
+ VDS_COMMUNICATION_SENDCOMMANDLATENCY("vds.communication.sendcommandlatency", Unit.MILLISECOND, "Average ms used to send commands to MBUS"),
+ VDS_COMMUNICATION_SENDREPLYLATENCY("vds.communication.sendreplylatency", Unit.MILLISECOND, "Average ms used to send replies to MBUS"),
+ VDS_COMMUNICATION_TOOLITTLEMEMORY("vds.communication.toolittlememory", Unit.OPERATION, "Number of messages failed due to too little memory available"),
+
+ VDS_DATASTORED_BUCKET_SPACE_ACTIVE_BUCKETS("vds.datastored.bucket_space.active_buckets", Unit.BUCKET, "Number of active buckets in the bucket space"),
+ VDS_DATASTORED_BUCKET_SPACE_BUCKET_DB_MEMORY_USAGE_ALLOCATED_BYTES("vds.datastored.bucket_space.bucket_db.memory_usage.allocated_bytes", Unit.BYTE, "The number of allocated bytes"),
+ VDS_DATASTORED_BUCKET_SPACE_BUCKET_DB_MEMORY_USAGE_DEAD_BYTES("vds.datastored.bucket_space.bucket_db.memory_usage.dead_bytes", Unit.BYTE, "The number of dead bytes (<= used_bytes)"),
+ VDS_DATASTORED_BUCKET_SPACE_BUCKET_DB_MEMORY_USAGE_ONHOLD_BYTES("vds.datastored.bucket_space.bucket_db.memory_usage.onhold_bytes", Unit.BYTE, "The number of bytes on hold"),
+ VDS_DATASTORED_BUCKET_SPACE_BUCKET_DB_MEMORY_USAGE_USED_BYTES("vds.datastored.bucket_space.bucket_db.memory_usage.used_bytes", Unit.BYTE, "The number of used bytes (<= allocated_bytes)"),
+ VDS_DATASTORED_BUCKET_SPACE_BUCKETS_TOTAL("vds.datastored.bucket_space.buckets_total", Unit.BUCKET, "Total number buckets present in the bucket space (ready + not ready)"),
+ VDS_DATASTORED_BUCKET_SPACE_BYTES("vds.datastored.bucket_space.bytes", Unit.BYTE, "Bytes stored across all documents in the bucket space"),
+ VDS_DATASTORED_BUCKET_SPACE_DOCS("vds.datastored.bucket_space.docs", Unit.DOCUMENT, "Documents stored in the bucket space"),
+ VDS_DATASTORED_BUCKET_SPACE_READY_BUCKETS("vds.datastored.bucket_space.ready_buckets", Unit.BUCKET, "Number of ready buckets in the bucket space"),
+ VDS_DATASTORED_FULLBUCKETINFOLATENCY("vds.datastored.fullbucketinfolatency", Unit.MILLISECOND, "Amount of time spent to process a full bucket info request"),
+ VDS_DATASTORED_FULLBUCKETINFOREQSIZE("vds.datastored.fullbucketinforeqsize", Unit.NODE, "Amount of distributors answered at once in full bucket info requests."),
+ VDS_DATASTORED_SIMPLEBUCKETINFOREQSIZE("vds.datastored.simplebucketinforeqsize", Unit.BUCKET, "Amount of buckets returned in simple bucket info requests"),
+
+ VDS_FILESTOR_ALLTHREADS_APPLYBUCKETDIFF_COUNT("vds.filestor.allthreads.applybucketdiff.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_APPLYBUCKETDIFF_FAILED("vds.filestor.allthreads.applybucketdiff.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_APPLYBUCKETDIFF_LATENCY("vds.filestor.allthreads.applybucketdiff.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_APPLYBUCKETDIFFREPLY("vds.filestor.allthreads.applybucketdiffreply", Unit.REQUEST, "Number of applybucketdiff replies that have been processed."),
+ VDS_FILESTOR_ALLTHREADS_BUCKETFIXED("vds.filestor.allthreads.bucketfixed", Unit.BUCKET, "Number of times bucket has been fixed because of corruption"),
+ VDS_FILESTOR_ALLTHREADS_BUCKETVERIFIED_COUNT("vds.filestor.allthreads.bucketverified.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_BUCKETVERIFIED_FAILED("vds.filestor.allthreads.bucketverified.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_BUCKETVERIFIED_LATENCY("vds.filestor.allthreads.bucketverified.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_BYTESMERGED("vds.filestor.allthreads.bytesmerged", Unit.BYTE, "Total number of bytes merged into this node."),
+ VDS_FILESTOR_ALLTHREADS_CREATEBUCKETS_COUNT("vds.filestor.allthreads.createbuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_CREATEBUCKETS_FAILED("vds.filestor.allthreads.createbuckets.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_CREATEBUCKETS_LATENCY("vds.filestor.allthreads.createbuckets.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_FAILEDOPERATIONS("vds.filestor.allthreads.failedoperations", Unit.OPERATION, "Number of operations throwing exceptions."),
+ VDS_FILESTOR_ALLTHREADS_GETBUCKETDIFF_COUNT("vds.filestor.allthreads.getbucketdiff.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_GETBUCKETDIFF_FAILED("vds.filestor.allthreads.getbucketdiff.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_GETBUCKETDIFF_LATENCY("vds.filestor.allthreads.getbucketdiff.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_GETBUCKETDIFFREPLY("vds.filestor.allthreads.getbucketdiffreply", Unit.REQUEST, "Number of getbucketdiff replies that have been processed."),
+ VDS_FILESTOR_ALLTHREADS_INTERNALJOIN_COUNT("vds.filestor.allthreads.internaljoin.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_INTERNALJOIN_FAILED("vds.filestor.allthreads.internaljoin.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_INTERNALJOIN_LATENCY("vds.filestor.allthreads.internaljoin.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_MOVEDBUCKETS_COUNT("vds.filestor.allthreads.movedbuckets.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_MOVEDBUCKETS_FAILED("vds.filestor.allthreads.movedbuckets.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_MOVEDBUCKETS_LATENCY("vds.filestor.allthreads.movedbuckets.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_OPERATIONS("vds.filestor.allthreads.operations", Unit.OPERATION, "Number of operations processed."),
+
+ VDS_FILESTOR_ALLTHREADS_READBUCKETINFO_COUNT("vds.filestor.allthreads.readbucketinfo.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_READBUCKETINFO_FAILED("vds.filestor.allthreads.readbucketinfo.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_READBUCKETINFO_LATENCY("vds.filestor.allthreads.readbucketinfo.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_READBUCKETLIST_COUNT("vds.filestor.allthreads.readbucketlist.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_READBUCKETLIST_FAILED("vds.filestor.allthreads.readbucketlist.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_READBUCKETLIST_LATENCY("vds.filestor.allthreads.readbucketlist.latency", Unit.MILLISECOND, "Latency of successful requests."),
+
+ VDS_FILESTOR_ALLTHREADS_RECHECKBUCKETINFO_COUNT("vds.filestor.allthreads.recheckbucketinfo.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_RECHECKBUCKETINFO_FAILED("vds.filestor.allthreads.recheckbucketinfo.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_RECHECKBUCKETINFO_LATENCY("vds.filestor.allthreads.recheckbucketinfo.latency", Unit.MILLISECOND, "Latency of successful requests."),
+
+ VDS_FILESTOR_ALLTHREADS_REVERT_COUNT("vds.filestor.allthreads.revert.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_REVERT_FAILED("vds.filestor.allthreads.revert.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_REVERT_LATENCY("vds.filestor.allthreads.revert.latency", Unit.MILLISECOND, "Latency of successful requests."),
+ VDS_FILESTOR_ALLTHREADS_REVERT_NOT_FOUND("vds.filestor.allthreads.revert.not_found", Unit.REQUEST, "Number of requests that could not be completed due to source document not found."),
+ VDS_FILESTOR_ALLTHREADS_STAT_BUCKET_COUNT("vds.filestor.allthreads.stat_bucket.count", Unit.REQUEST, "Number of requests processed."),
+ VDS_FILESTOR_ALLTHREADS_STAT_BUCKET_FAILED("vds.filestor.allthreads.stat_bucket.failed", Unit.REQUEST, "Number of failed requests."),
+ VDS_FILESTOR_ALLTHREADS_STAT_BUCKET_LATENCY("vds.filestor.allthreads.stat_bucket.latency", Unit.REQUEST, "Latency of successful requests."),
+ VDS_FILESTOR_BUCKET_DB_INIT_LATENCY("vds.filestor.bucket_db_init_latency", Unit.MILLISECOND, "Time taken (in ms) to initialize bucket databases with information from the persistence provider"),
+ VDS_FILESTOR_DIRECTORYEVENTS("vds.filestor.directoryevents", Unit.OPERATION, "Number of directory events received."),
+ VDS_FILESTOR_DISKEVENTS("vds.filestor.diskevents", Unit.OPERATION, "Number of disk events received."),
+ VDS_FILESTOR_PARTITIONEVENTS("vds.filestor.partitionevents", Unit.OPERATION, "Number of partition events received."),
+ VDS_FILESTOR_PENDINGMERGE("vds.filestor.pendingmerge", Unit.BUCKET, "Number of buckets currently being merged."),
+ VDS_FILESTOR_WAITINGFORLOCKRATE("vds.filestor.waitingforlockrate", Unit.OPERATION, "Amount of times a filestor thread has needed to wait for lock to take next message in queue."),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_ABORTED("vds.mergethrottler.mergechains.failures.aborted", Unit.OPERATION, "The number of merges that failed because the storage node was (most likely) shutting down"),
+ VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUCKETNOTFOUND("vds.mergethrottler.mergechains.failures.bucketnotfound", Unit.OPERATION, "The number of operations that failed because the bucket did not exist"),
+ VDS_SERVER_MEMORYUSAGE("vds.server.memoryusage", Unit.BYTE, "Amount of memory used by the storage subsystem"),
+ VDS_SERVER_MEMORYUSAGE_VISITING("vds.server.memoryusage_visiting", Unit.BYTE, "Message use from visiting"),
+ VDS_SERVER_MESSAGE_MEMORY_USE_HIGHPRI("vds.server.message_memory_use.highpri", Unit.BYTE, "Message use from high priority storage messages"),
+ VDS_SERVER_MESSAGE_MEMORY_USE_LOWPRI("vds.server.message_memory_use.lowpri", Unit.BYTE, "Message use from low priority storage messages"),
+ VDS_SERVER_MESSAGE_MEMORY_USE_NORMALPRI("vds.server.message_memory_use.normalpri", Unit.BYTE, "Message use from normal priority storage messages"),
+ VDS_SERVER_MESSAGE_MEMORY_USE_TOTAL("vds.server.message_memory_use.total", Unit.BYTE, "Message use from storage messages"),
+ VDS_SERVER_MESSAGE_MEMORY_USE_VERYHIGHPRI("vds.server.message_memory_use.veryhighpri", Unit.BYTE, "Message use from very high priority storage messages"),
+ VDS_STATE_MANAGER_INVOKE_STATE_LISTENERS_LATENCY("vds.state_manager.invoke_state_listeners_latency", Unit.MILLISECOND, "Time spent (in ms) propagating state changes to internal state listeners"),
+ VDS_VISITOR_CV_QUEUEEVICTEDWAITTIME("vds.visitor.cv_queueevictedwaittime", Unit.MILLISECOND, "Milliseconds waiting in create visitor queue, for visitors that was evicted from queue due to higher priority visitors coming"),
+ VDS_VISITOR_CV_QUEUEFULL("vds.visitor.cv_queuefull", Unit.OPERATION, "Number of create visitor messages failed as queue is full"),
+ VDS_VISITOR_CV_QUEUESIZE("vds.visitor.cv_queuesize", Unit.ITEM, "Size of create visitor queue"),
+ VDS_VISITOR_CV_QUEUETIMEOUTWAITTIME("vds.visitor.cv_queuetimeoutwaittime", Unit.MILLISECOND, "Milliseconds waiting in create visitor queue, for visitors that timed out while in the visitor quueue"),
+ VDS_VISITOR_CV_QUEUEWAITTIME("vds.visitor.cv_queuewaittime", Unit.MILLISECOND, "Milliseconds waiting in create visitor queue, for visitors that was added to visitor queue but scheduled later"),
+ VDS_VISITOR_CV_SKIPQUEUE("vds.visitor.cv_skipqueue", Unit.OPERATION, "Number of times we could skip queue as we had free visitor spots"),
+
// C++ capability metrics
VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED("vds.server.network.rpc-capability-checks-failed", Unit.FAILURE, "Number of RPC operations that failed to due one or more missing capabilities"),
VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED("vds.server.network.status-capability-checks-failed", Unit.FAILURE, "Number of status page operations that failed to due one or more missing capabilities"),
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index 9097012ecfc..d56daa34f03 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -171,7 +171,7 @@ public class HttpServerTest {
binder -> binder.bind(MetricConsumer.class).toInstance(metricConsumer.mockitoMock()));
driver.client()
.newGet("/status.html").addHeader("Host", "localhost").addHeader("Host", "vespa.ai").execute()
- .expectStatusCode(is(BAD_REQUEST)).expectContent(containsString("Bad Host: multiple headers"));
+ .expectStatusCode(is(BAD_REQUEST)).expectContent(containsString("reason: Duplicate Host Header"));
assertTrue(driver.close());
var aggregator = ResponseMetricAggregator.getBean(driver.server());
var metric = waitForStatistics(aggregator);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
index 842d99abe71..bd42587576d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
@@ -225,14 +225,15 @@ public class ApplicationPackageValidator {
oldEndpoint.allowedUrns().stream().map(AllowedUrn::toString).collect(joining(", ", "[", "]")) +
", but does not include all these in the new deployment spec. " +
"Deploying with the new settings will allow access to " +
- (newEndpoint.allowedUrns().isEmpty() ? "no one" : newEndpoint.allowedUrns().stream().map(AllowedUrn::toString).collect(joining(", ", "[", "]"))));
+ (newEndpoint.allowedUrns().isEmpty() ? "no one" : newEndpoint.allowedUrns().stream().map(AllowedUrn::toString).collect(joining(", ", "[", "]")) +
+ ". " + ValidationOverrides.toAllowMessage(validationId)));
});
newEndpoints.forEach((cluster, newEndpoint) -> {
ZoneEndpoint oldEndpoint = oldEndpoints.getOrDefault(cluster, ZoneEndpoint.defaultEndpoint);
if (oldEndpoint.isPublicEndpoint() && ! newEndpoint.isPublicEndpoint())
throw new IllegalArgumentException(prefix + "has a public endpoint for cluster '" + cluster.value() +
"' in '" + zone.region().get().value() + "', but the new deployment spec " +
- "disables this");
+ "disables this. " + ValidationOverrides.toAllowMessage(validationId));
});
});
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index a049b614e25..8f0536480f5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -1128,7 +1128,10 @@ public class ControllerTest {
</prod>
</deployment>"""));
- assertEquals("zone-endpoint-change: application 'tenant.application' has a public endpoint for cluster 'foo' in 'us-east-3', but the new deployment spec disables this",
+ assertEquals("zone-endpoint-change: application 'tenant.application' has a public endpoint for " +
+ "cluster 'foo' in 'us-east-3', but the new deployment spec disables this. " +
+ "To allow this add <allow until='yyyy-mm-dd'>zone-endpoint-change</allow> to validation-overrides.xml, " +
+ "see https://docs.vespa.ai/en/reference/validation-overrides.html",
assertThrows(IllegalArgumentException.class,
() -> app.submit(ApplicationPackageBuilder.fromDeploymentXml("""
<deployment>
@@ -1168,9 +1171,12 @@ public class ControllerTest {
ValidationId.zoneEndpointChange));
// Changing URNs is guarded.
- assertEquals("zone-endpoint-change: application 'tenant.application' allows access to cluster 'foo' in 'us-east-3' to " +
- "['yarn' through 'aws-private-link'], but does not include all these in the new deployment spec. " +
- "Deploying with the new settings will allow access to ['yarn' through 'gcp-service-connect']",
+ assertEquals("zone-endpoint-change: application 'tenant.application' allows access to cluster " +
+ "'foo' in 'us-east-3' to ['yarn' through 'aws-private-link'], " +
+ "but does not include all these in the new deployment spec. " +
+ "Deploying with the new settings will allow access to ['yarn' through 'gcp-service-connect']. " +
+ "To allow this add <allow until='yyyy-mm-dd'>zone-endpoint-change</allow> to validation-overrides.xml, " +
+ "see https://docs.vespa.ai/en/reference/validation-overrides.html",
assertThrows(IllegalArgumentException.class,
() -> app.submit(ApplicationPackageBuilder.fromDeploymentXml("""
<deployment>
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 79778d6028d..fe493dc883f 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -467,7 +467,6 @@ VERSION=%{version} CI=true make -C client/go install-all
%check
%if ! 0%{?installdir:1}
-%if 0%{?_run_unit_tests:1}
%if 0%{?_java_home:1}
export JAVA_HOME=%{?_java_home}
%else
@@ -481,7 +480,6 @@ export PYTHONPATH="$PYTHONPATH:/usr/local/lib/$(basename $(readlink -f $(which p
#%{?_use_mvn_wrapper:./mvnw}%{!?_use_mvn_wrapper:mvn} --batch-mode -nsu -T 1C -Dmaven.javadoc.skip=true test
make test ARGS="--output-on-failure %{_smp_mflags}"
%endif
-%endif
%install
rm -rf %{buildroot}
@@ -596,7 +594,6 @@ fi
%exclude %{_prefix}/conf/configserver-app/components/config-model-fat.jar
%exclude %{_prefix}/conf/configserver-app/config-models.xml
%dir %{_prefix}/conf/logd
-%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/conf/telegraf
%dir %{_prefix}/conf/vespa
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/var/zookeeper/conf
%dir %{_prefix}/etc
@@ -639,7 +636,6 @@ fi
%exclude %{_prefix}/libexec/vespa/standalone-container.sh
%exclude %{_prefix}/libexec/vespa/vespa-curl-wrapper
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/logs
-%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/logs/telegraf
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/logs/vespa
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/logs/vespa/access
%dir %attr(-,%{_vespa_user},%{_vespa_group}) %{_prefix}/logs/vespa/configserver
diff --git a/metrics-proxy/CMakeLists.txt b/metrics-proxy/CMakeLists.txt
index 4cc59300058..21889d83da5 100644
--- a/metrics-proxy/CMakeLists.txt
+++ b/metrics-proxy/CMakeLists.txt
@@ -1,9 +1,4 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
install_jar(metrics-proxy-jar-with-dependencies.jar)
-install(DIRECTORY DESTINATION conf/telegraf)
-install(DIRECTORY DESTINATION logs/telegraf)
-vespa_install_script(src/main/sh/start-telegraf.sh libexec/vespa)
-vespa_install_script(src/main/sh/stop-telegraf.sh libexec/vespa)
-
install_config_definitions()
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/Telegraf.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/Telegraf.java
deleted file mode 100644
index aa35832a3be..00000000000
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/Telegraf.java
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.metricsproxy.telegraf;
-
-import com.yahoo.component.annotation.Inject;
-import com.yahoo.component.AbstractComponent;
-
-import java.io.File;
-import java.util.logging.Level;
-import com.yahoo.system.execution.ProcessExecutor;
-import com.yahoo.system.execution.ProcessResult;
-import org.apache.velocity.VelocityContext;
-import org.apache.velocity.app.VelocityEngine;
-
-import java.io.FileWriter;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.io.Writer;
-import java.util.logging.Logger;
-
-import static com.yahoo.vespa.defaults.Defaults.getDefaults;
-import static com.yahoo.yolean.Exceptions.uncheck;
-
-/**
- * @author olaa
- */
-public class Telegraf extends AbstractComponent {
-
- // These paths must coincide with the paths in the start/stop-telegraf shell scripts.
- private static final String TELEGRAF_CONFIG_PATH = getDefaults().underVespaHome("conf/telegraf/telegraf.conf");
- private static final String TELEGRAF_LOG_FILE_PATH = getDefaults().underVespaHome("logs/telegraf/telegraf.log");
-
- private static final String START_TELEGRAF_SCRIPT = getDefaults().underVespaHome("libexec/vespa/start-telegraf.sh");
- private static final String STOP_TELEGRAF_SCRIPT = getDefaults().underVespaHome("libexec/vespa/stop-telegraf.sh");
-
- private static final String TELEGRAF_CONFIG_TEMPLATE_PATH = "templates/telegraf.conf.vm";
-
- private final TelegrafRegistry telegrafRegistry;
-
- private static final Logger logger = Logger.getLogger(Telegraf.class.getName());
-
- @Inject
- public Telegraf(TelegrafRegistry telegrafRegistry, TelegrafConfig telegrafConfig) {
- this.telegrafRegistry = telegrafRegistry;
- telegrafRegistry.addInstance(this);
- writeConfig(telegrafConfig, getConfigWriter(), TELEGRAF_LOG_FILE_PATH);
- restartTelegraf();
- }
-
- protected static void writeConfig(TelegrafConfig telegrafConfig, Writer writer, String logFilePath) {
- VelocityContext context = new VelocityContext();
- context.put("logFilePath", logFilePath);
- context.put("intervalSeconds", telegrafConfig.intervalSeconds());
- context.put("cloudwatchPlugins", telegrafConfig.cloudWatch());
- context.put("protocol", telegrafConfig.isHostedVespa() ? "https" : "http");
- // TODO: Add node cert if hosted
-
- VelocityEngine velocityEngine = new VelocityEngine();
- velocityEngine.init();
- velocityEngine.evaluate(context, writer, "TelegrafConfigWriter", getTemplateReader());
- uncheck(writer::close);
- }
-
- private void restartTelegraf() {
- executeCommand(STOP_TELEGRAF_SCRIPT);
- executeCommand(START_TELEGRAF_SCRIPT);
- }
-
- private void stopTelegraf() {
- executeCommand(STOP_TELEGRAF_SCRIPT);
- }
-
- private void executeCommand(String command) {
- logger.info(String.format("Running command: %s", command));
- ProcessExecutor processExecutor = new ProcessExecutor
- .Builder(10)
- .successExitCodes(0)
- .build();
- ProcessResult processResult = uncheck(() -> processExecutor.execute(command))
- .orElseThrow(() -> new RuntimeException("Timed out running command: " + command));
-
- logger.log(Level.FINE, () -> String.format("Exit code: %d\nstdOut: %s\nstdErr: %s",
- processResult.exitCode,
- processResult.stdOut,
- processResult.stdErr));
-
- if (!processResult.stdErr.isBlank())
- logger.warning(String.format("stdErr not empty: %s", processResult.stdErr));
- }
-
- @SuppressWarnings("ConstantConditions")
- private static Reader getTemplateReader() {
- return new InputStreamReader(Telegraf.class.getClassLoader()
- .getResourceAsStream(TELEGRAF_CONFIG_TEMPLATE_PATH)
- );
-
- }
-
- private static Writer getConfigWriter() {
- File configFile = new File(TELEGRAF_CONFIG_PATH);
- configFile.getParentFile().mkdirs();
- return uncheck(() -> new FileWriter(configFile));
- }
-
- @Override
- public void deconstruct() {
- telegrafRegistry.removeInstance(this);
- if (telegrafRegistry.isEmpty()) {
- stopTelegraf();
- }
- }
-}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/TelegrafRegistry.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/TelegrafRegistry.java
deleted file mode 100644
index 2129ae38f89..00000000000
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/telegraf/TelegrafRegistry.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.metricsproxy.telegraf;
-
-import java.util.logging.Level;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.logging.Logger;
-
-/**
- * @author olaa
- */
-public class TelegrafRegistry {
-
- private static final List<Telegraf> telegrafInstances = Collections.synchronizedList(new ArrayList<>());
-
- private static final Logger logger = Logger.getLogger(TelegrafRegistry.class.getName());
-
- public void addInstance(Telegraf telegraf) {
- logger.log(Level.FINE, () -> "Adding Telegraf instance to registry: " + telegraf.hashCode());
- telegrafInstances.add(telegraf);
- }
-
- public void removeInstance(Telegraf telegraf) {
- logger.log(Level.FINE, () -> "Removing Telegraf instance from registry: " + telegraf.hashCode());
- telegrafInstances.remove(telegraf);
- }
-
- public boolean isEmpty() {
- return telegrafInstances.isEmpty();
- }
-}
diff --git a/metrics-proxy/src/main/resources/configdefinitions/telegraf.def b/metrics-proxy/src/main/resources/configdefinitions/telegraf.def
deleted file mode 100644
index d92997879aa..00000000000
--- a/metrics-proxy/src/main/resources/configdefinitions/telegraf.def
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package=ai.vespa.metricsproxy.telegraf
-
-isHostedVespa bool default=false
-
-# Metrics pull/push interval
-intervalSeconds int default=60
-
-
-# The Vespa metrics consumer to get metrics for
-cloudWatch[].consumer string
-
-cloudWatch[].region string default="us-east-1"
-cloudWatch[].namespace string
-
-# Only valid and required for hosted Vespa
-cloudWatch[].accessKeyName string default=""
-cloudWatch[].secretKeyName string default=""
-
-# Only valid and optional for self-hosted Vespa
-cloudWatch[].file string default=""
-cloudWatch[].profile string default="default"
diff --git a/metrics-proxy/src/main/resources/templates/telegraf.conf.vm b/metrics-proxy/src/main/resources/templates/telegraf.conf.vm
deleted file mode 100644
index 5a5f2d5f712..00000000000
--- a/metrics-proxy/src/main/resources/templates/telegraf.conf.vm
+++ /dev/null
@@ -1,44 +0,0 @@
-# Configuration for telegraf agent
-[agent]
- interval = "${intervalSeconds}s"
- round_interval = true
- metric_batch_size = 1000
- metric_buffer_limit = 10000
- collection_jitter = "0s"
- flush_interval = "${intervalSeconds}s"
- flush_jitter = "0s"
- precision = ""
- logtarget = "file"
- logfile = "$logFilePath"
- logfile_rotation_interval = "1d"
- logfile_rotation_max_size = "20MB"
- logfile_rotation_max_archives = 5
-
-#foreach( $cloudwatch in $cloudwatchPlugins )
-# Configuration for AWS CloudWatch output.
-[[outputs.cloudwatch]]
- region = "$cloudwatch.region()"
- namespace = "$cloudwatch.namespace()"
-#if( $cloudwatch.accessKeyName() != "" )
- access_key = "$cloudwatch.accessKeyName()"
- secret_key = "$cloudwatch.secretKeyName()"
-#elseif( $cloudwatch.profile() != "" )
- profile = "$cloudwatch.profile()"
-#end
- tagexclude = ["vespa_consumer"]
- [outputs.cloudwatch.tagpass]
- vespa_consumer = ["$cloudwatch.consumer()"]
-
-# Configuration for Vespa input plugin
-[[inputs.vespa]]
- url = "${protocol}://localhost:19092/metrics/v2/values?consumer=$cloudwatch.consumer()"
- [inputs.vespa.tags]
- vespa_consumer = "$cloudwatch.consumer()"
-#* TODO: Add node cert if hosted
-#if( $isHosted )
- tls_cert = "${VESPA_CERTIFICATE_PATH}"
- tls_key = "${VESPA_KEY_PATH}"
- insecure_skip_verify = true
-#end
-*###
-#end \ No newline at end of file
diff --git a/metrics-proxy/src/main/sh/start-telegraf.sh b/metrics-proxy/src/main/sh/start-telegraf.sh
deleted file mode 100644
index 60677e9f065..00000000000
--- a/metrics-proxy/src/main/sh/start-telegraf.sh
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/bin/sh
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-# BEGIN environment bootstrap section
-# Do not edit between here and END as this section should stay identical in all scripts
-
-findpath () {
- myname=${0}
- mypath=${myname%/*}
- myname=${myname##*/}
- empty_if_start_slash=${mypath%%/*}
- if [ "${empty_if_start_slash}" ]; then
- mypath=$(pwd)/${mypath}
- fi
- if [ "$mypath" ] && [ -d "$mypath" ]; then
- return
- fi
- mypath=$(pwd)
- if [ -f "${mypath}/${myname}" ]; then
- return
- fi
- echo "FATAL: Could not figure out the path where $myname lives from $0"
- exit 1
-}
-
-COMMON_ENV=libexec/vespa/common-env.sh
-
-source_common_env () {
- if [ "$VESPA_HOME" ] && [ -d "$VESPA_HOME" ]; then
- export VESPA_HOME
- common_env=$VESPA_HOME/$COMMON_ENV
- if [ -f "$common_env" ]; then
- . $common_env
- return
- fi
- fi
- return 1
-}
-
-findroot () {
- source_common_env && return
- if [ "$VESPA_HOME" ]; then
- echo "FATAL: bad VESPA_HOME value '$VESPA_HOME'"
- exit 1
- fi
- if [ "$ROOT" ] && [ -d "$ROOT" ]; then
- VESPA_HOME="$ROOT"
- source_common_env && return
- fi
- findpath
- while [ "$mypath" ]; do
- VESPA_HOME=${mypath}
- source_common_env && return
- mypath=${mypath%/*}
- done
- echo "FATAL: missing VESPA_HOME environment variable"
- echo "Could not locate $COMMON_ENV anywhere"
- exit 1
-}
-
-findhost () {
- if [ "${VESPA_HOSTNAME}" = "" ]; then
- VESPA_HOSTNAME=$(vespa-detect-hostname || hostname -f || hostname || echo "localhost") || exit 1
- fi
- validate="${VESPA_HOME}/bin/vespa-validate-hostname"
- if [ -f "$validate" ]; then
- "$validate" "${VESPA_HOSTNAME}" || exit 1
- fi
- export VESPA_HOSTNAME
-}
-
-findroot
-findhost
-
-ROOT=${VESPA_HOME%/}
-export ROOT
-
-# END environment bootstrap section
-
-fixddir () {
- if ! [ -d $1 ]; then
- echo "Creating data directory $1"
- mkdir -p $1 || exit 1
- fi
- if [ "${VESPA_USER}" ] && [ "$(id -u)" -eq 0 ]; then
- chown ${VESPA_USER} $1
- fi
- chmod 755 $1
-}
-
-# Note: these directories must coincide with the paths defined in the Telegraf Java component
-conf_dir=${VESPA_HOME}/conf/telegraf
-log_dir=${VESPA_HOME}/logs/telegraf
-fixddir ${conf_dir}
-fixddir ${log_dir}
-
-configfile=${conf_dir}/telegraf.conf
-pidfile="${VESPA_HOME}/var/run/telegraf.pid"
-
-TELEGRAF_CMD=/opt/vespa-deps/bin/telegraf
-
-vespa-run-as-vespa-user vespa-runserver -s telegraf -r 30 -p $pidfile -- \
-${TELEGRAF_CMD} --config ${configfile}
-
diff --git a/metrics-proxy/src/main/sh/stop-telegraf.sh b/metrics-proxy/src/main/sh/stop-telegraf.sh
deleted file mode 100644
index 595e4068ee5..00000000000
--- a/metrics-proxy/src/main/sh/stop-telegraf.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/sh
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-# BEGIN environment bootstrap section
-# Do not edit between here and END as this section should stay identical in all scripts
-
-findpath () {
- myname=${0}
- mypath=${myname%/*}
- myname=${myname##*/}
- empty_if_start_slash=${mypath%%/*}
- if [ "${empty_if_start_slash}" ]; then
- mypath=$(pwd)/${mypath}
- fi
- if [ "$mypath" ] && [ -d "$mypath" ]; then
- return
- fi
- mypath=$(pwd)
- if [ -f "${mypath}/${myname}" ]; then
- return
- fi
- echo "FATAL: Could not figure out the path where $myname lives from $0"
- exit 1
-}
-
-COMMON_ENV=libexec/vespa/common-env.sh
-
-source_common_env () {
- if [ "$VESPA_HOME" ] && [ -d "$VESPA_HOME" ]; then
- export VESPA_HOME
- common_env=$VESPA_HOME/$COMMON_ENV
- if [ -f "$common_env" ]; then
- . $common_env
- return
- fi
- fi
- return 1
-}
-
-findroot () {
- source_common_env && return
- if [ "$VESPA_HOME" ]; then
- echo "FATAL: bad VESPA_HOME value '$VESPA_HOME'"
- exit 1
- fi
- if [ "$ROOT" ] && [ -d "$ROOT" ]; then
- VESPA_HOME="$ROOT"
- source_common_env && return
- fi
- findpath
- while [ "$mypath" ]; do
- VESPA_HOME=${mypath}
- source_common_env && return
- mypath=${mypath%/*}
- done
- echo "FATAL: missing VESPA_HOME environment variable"
- echo "Could not locate $COMMON_ENV anywhere"
- exit 1
-}
-
-findhost () {
- if [ "${VESPA_HOSTNAME}" = "" ]; then
- VESPA_HOSTNAME=$(vespa-detect-hostname || hostname -f || hostname || echo "localhost") || exit 1
- fi
- validate="${VESPA_HOME}/bin/vespa-validate-hostname"
- if [ -f "$validate" ]; then
- "$validate" "${VESPA_HOSTNAME}" || exit 1
- fi
- export VESPA_HOSTNAME
-}
-
-findroot
-findhost
-
-ROOT=${VESPA_HOME%/}
-export ROOT
-
-# END environment bootstrap section
-
-pidfile="${VESPA_HOME}/var/run/telegraf.pid"
-vespa-run-as-vespa-user vespa-runserver -s telegraf -p $pidfile -S
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/telegraf/TelegrafTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/telegraf/TelegrafTest.java
deleted file mode 100644
index 5f13561a332..00000000000
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/telegraf/TelegrafTest.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.metricsproxy.telegraf;
-
-import ai.vespa.metricsproxy.TestUtil;
-import org.junit.Test;
-
-import java.io.StringWriter;
-
-import static org.junit.Assert.*;
-
-/**
- * @author olaa
- */
-public class TelegrafTest {
-
- @Test
- public void test_writing_correct_telegraf_plugin_config() {
- TelegrafConfig telegrafConfig = new TelegrafConfig.Builder()
- .cloudWatch(
- new TelegrafConfig.CloudWatch.Builder()
- .accessKeyName("accessKey1")
- .namespace("namespace1")
- .secretKeyName("secretKey1")
- .region("us-east-1")
- .consumer("consumer1")
- )
- .cloudWatch(
- new TelegrafConfig.CloudWatch.Builder()
- .namespace("namespace2")
- .profile("awsprofile")
- .region("us-east-2")
- .consumer("consumer2")
- )
- .intervalSeconds(300)
- .isHostedVespa(true)
- .build();
- StringWriter stringWriter = new StringWriter();
- String logFilePath = "/path/to/logs/telegraf/telegraf.log";
- Telegraf.writeConfig(telegrafConfig, stringWriter, logFilePath);
- String expectedConfig = TestUtil.getFileContents( "telegraf-config-with-two-cloudwatch-plugins.txt");
- assertEquals(expectedConfig, stringWriter.toString());
- }
-
-}
diff --git a/metrics-proxy/src/test/resources/telegraf-config-with-two-cloudwatch-plugins.txt b/metrics-proxy/src/test/resources/telegraf-config-with-two-cloudwatch-plugins.txt
deleted file mode 100644
index 3194b290b78..00000000000
--- a/metrics-proxy/src/test/resources/telegraf-config-with-two-cloudwatch-plugins.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-# Configuration for telegraf agent
-[agent]
- interval = "300s"
- round_interval = true
- metric_batch_size = 1000
- metric_buffer_limit = 10000
- collection_jitter = "0s"
- flush_interval = "300s"
- flush_jitter = "0s"
- precision = ""
- logtarget = "file"
- logfile = "/path/to/logs/telegraf/telegraf.log"
- logfile_rotation_interval = "1d"
- logfile_rotation_max_size = "20MB"
- logfile_rotation_max_archives = 5
-
-# Configuration for AWS CloudWatch output.
-[[outputs.cloudwatch]]
- region = "us-east-1"
- namespace = "namespace1"
- access_key = "accessKey1"
- secret_key = "secretKey1"
- tagexclude = ["vespa_consumer"]
- [outputs.cloudwatch.tagpass]
- vespa_consumer = ["consumer1"]
-
-# Configuration for Vespa input plugin
-[[inputs.vespa]]
- url = "https://localhost:19092/metrics/v2/values?consumer=consumer1"
- [inputs.vespa.tags]
- vespa_consumer = "consumer1"
-# Configuration for AWS CloudWatch output.
-[[outputs.cloudwatch]]
- region = "us-east-2"
- namespace = "namespace2"
- profile = "awsprofile"
- tagexclude = ["vespa_consumer"]
- [outputs.cloudwatch.tagpass]
- vespa_consumer = ["consumer2"]
-
-# Configuration for Vespa input plugin
-[[inputs.vespa]]
- url = "https://localhost:19092/metrics/v2/values?consumer=consumer2"
- [inputs.vespa.tags]
- vespa_consumer = "consumer2"
-
diff --git a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java
index 9c79961eddf..eef75a32c0a 100644
--- a/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java
+++ b/model-integration/src/main/java/ai/vespa/modelintegration/evaluator/TensorConverter.java
@@ -180,12 +180,25 @@ class TensorConverter {
}
static private TensorType.Value toVespaValueType(TensorInfo.OnnxTensorType onnxType) {
+ // NOTE:
+ // should match best_cell_type in onnx_wrapper.cpp
switch (onnxType) {
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return TensorType.Value.INT8;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16: return TensorType.Value.BFLOAT16;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return TensorType.Value.FLOAT;
- case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE: return TensorType.Value.DOUBLE;
- }
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
+ return TensorType.Value.INT8;
+
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
+ return TensorType.Value.BFLOAT16;
+
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
+ return TensorType.Value.FLOAT;
+
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
+ return TensorType.Value.DOUBLE;
+ }
return TensorType.Value.DOUBLE;
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/onnx/TypeConverter.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/onnx/TypeConverter.java
index 35ec1d8c54a..2c008dbb922 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/onnx/TypeConverter.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/onnx/TypeConverter.java
@@ -56,21 +56,27 @@ class TypeConverter {
tensor.getDimsList());
}
- private static TensorType.Value toValueType(Onnx.TensorProto.DataType dataType) {
- switch (dataType) {
- case FLOAT: return TensorType.Value.FLOAT;
- case DOUBLE: return TensorType.Value.DOUBLE;
- // Imperfect conversion, for now:
- case BOOL: return TensorType.Value.FLOAT;
- case INT8: return TensorType.Value.FLOAT;
- case INT16: return TensorType.Value.FLOAT;
- case INT32: return TensorType.Value.FLOAT;
- case INT64: return TensorType.Value.FLOAT;
- case UINT8: return TensorType.Value.FLOAT;
- case UINT16: return TensorType.Value.FLOAT;
- case UINT32: return TensorType.Value.FLOAT;
- case UINT64: return TensorType.Value.FLOAT;
- default: throw new IllegalArgumentException("A ONNX tensor with data type " + dataType +
+ private static TensorType.Value toValueType(Onnx.TensorProto.DataType onnxType) {
+ // NOTE:
+ // should match best_cell_type in onnx_wrapper.cpp
+ switch (onnxType) {
+ case BOOL: // Imperfect conversion fallthrough
+ case INT8:
+ return TensorType.Value.INT8;
+ case BFLOAT16:
+ return TensorType.Value.BFLOAT16;
+ case UINT8: // Imperfect conversion fallthrough
+ case INT16: // Imperfect conversion fallthrough
+ case UINT16: // Imperfect conversion fallthrough
+ case FLOAT:
+ return TensorType.Value.FLOAT;
+ case INT32: // Imperfect conversion fallthrough
+ case INT64: // Imperfect conversion fallthrough
+ case UINT32: // Imperfect conversion fallthrough
+ case UINT64: // Imperfect conversion fallthrough
+ case DOUBLE:
+ return TensorType.Value.DOUBLE;
+ default: throw new IllegalArgumentException("A ONNX tensor with data type " + onnxType +
" cannot be converted to a Vespa tensor type");
}
}
diff --git a/model-integration/src/main/protobuf/onnx.proto b/model-integration/src/main/protobuf/onnx.proto
index dc6542867e0..27f1fdef4b3 100644
--- a/model-integration/src/main/protobuf/onnx.proto
+++ b/model-integration/src/main/protobuf/onnx.proto
@@ -298,6 +298,10 @@ message TensorProto {
UINT64 = 13;
COMPLEX64 = 14; // complex with float32 real and imaginary components
COMPLEX128 = 15; // complex with float64 real and imaginary components
+ // Non-IEEE floating-point format based on IEEE754 single-precision
+ // floating-point number truncated to 16 bits.
+ // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
+ BFLOAT16 = 16;
// Future extensions go here.
}
@@ -461,4 +465,4 @@ message OperatorSetIdProto {
// The version of the operator set being identified.
// This field MUST be present in this version of the IR.
optional int64 version = 2;
-} \ No newline at end of file
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
index 9cfed5d046c..8daac029c7b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
@@ -73,4 +73,9 @@ public class Applications {
return db.lock(application, timeout);
}
+ /** Create a lock which provides exclusive rights to perform a maintenance deployment */
+ public Mutex lockMaintenance(ApplicationId application) {
+ return db.lockMaintenance(application);
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
index b3ff0c42547..50eee9e33b3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
@@ -46,15 +46,18 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
return 1.0;
}
+ protected final Deployer deployer() { return deployer; }
+
/** Returns the number of deployments that are pending execution */
public int pendingDeployments() {
return pendingDeployments.size();
}
/** Returns whether given application should be deployed at this moment in time */
- protected boolean canDeployNow(ApplicationId application) {
- return true;
- }
+ protected abstract boolean canDeployNow(ApplicationId application);
+
+ /** Returns the applications that should be maintained by this now. */
+ protected abstract Map<ApplicationId, String> applicationsNeedingMaintenance();
/**
* Redeploy this application.
@@ -64,19 +67,14 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
*/
protected void deploy(ApplicationId application, String reason) {
if (pendingDeployments.addIfAbsent(application)) { // Avoid queuing multiple deployments for same application
- deploymentExecutor.execute(() -> deployWithLock(application, reason));
+ deploymentExecutor.execute(() -> deployNow(application, reason));
}
}
- protected Deployer deployer() { return deployer; }
-
- /** Returns the applications that should be maintained by this now. */
- protected abstract Map<ApplicationId, String> applicationsNeedingMaintenance();
-
/**
* Redeploy this application. A lock will be taken for the duration of the deployment activation
*/
- protected final void deployWithLock(ApplicationId application, String reason) {
+ protected final void deployNow(ApplicationId application, String reason) {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) return; // this will be done at another config server
if ( ! canDeployNow(application)) return; // redeployment is no longer needed
@@ -97,7 +95,7 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
@Override
public void shutdown() {
super.shutdown();
- this.deploymentExecutor.shutdownNow();
+ deploymentExecutor.shutdownNow();
}
@Override
@@ -105,7 +103,9 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
super.awaitShutdown();
try {
// Give deployments in progress some time to complete
- this.deploymentExecutor.awaitTermination(1, TimeUnit.MINUTES);
+ if (!deploymentExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
+ log.log(Level.WARNING, "Failed to shut down deployment executor within deadline");
+ }
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 69c03dbf6dc..44944af15d7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -6,7 +6,6 @@ import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Deployer;
-import com.yahoo.config.provision.Environment;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -19,6 +18,7 @@ import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
import com.yahoo.vespa.hosted.provision.node.History;
+
import java.time.Duration;
import java.time.Instant;
import java.util.Map;
@@ -68,6 +68,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
* @return true if an autoscaling decision was made or nothing should be done, false if there was an error
*/
private boolean autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId) {
+ boolean redeploy = false;
try (var lock = nodeRepository().applications().lock(applicationId)) {
Optional<Application> application = nodeRepository().applications().get(applicationId);
if (application.isEmpty()) return true;
@@ -92,13 +93,9 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
// Attempt to perform the autoscaling immediately, and log it regardless
if (autoscaling != null && autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) {
- try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
- if (deployment.isValid())
- deployment.activate();
- logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired());
- }
+ redeploy = true;
+ logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired());
}
- return true;
}
catch (ApplicationLockException e) {
return false;
@@ -106,6 +103,13 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Illegal arguments for " + applicationId + " cluster " + clusterId, e);
}
+ if (redeploy) {
+ try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
+ if (deployment.isValid())
+ deployment.activate();
+ }
+ }
+ return true;
}
private Applications applications() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
index d048f43973a..ffa125230a8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
@@ -16,7 +16,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.stream.Collectors;
/**
* This maintainer detects changes to nodes that must be expedited, and redeploys affected applications.
@@ -40,25 +39,21 @@ public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer
@Override
protected Map<ApplicationId, String> applicationsNeedingMaintenance() {
- var applications = new HashMap<ApplicationId, String>();
-
- nodeRepository().nodes()
- .list()
- .nodeType(NodeType.tenant, NodeType.proxy)
- .matching(node -> node.allocation().isPresent())
- .groupingBy(node -> node.allocation().get().owner())
- .forEach((applicationId, nodes) -> {
- hasNodesWithChanges(applicationId, nodes)
- .ifPresent(reason -> applications.put(applicationId, reason));
- });
-
+ NodeList allNodes = nodeRepository().nodes().list();
+ Map<ApplicationId, String> applications = new HashMap<>();
+ allNodes.nodeType(NodeType.tenant, NodeType.proxy)
+ .matching(node -> node.allocation().isPresent())
+ .groupingBy(node -> node.allocation().get().owner())
+ .forEach((applicationId, nodes) -> {
+ hasNodesWithChanges(applicationId, nodes)
+ .ifPresent(reason -> applications.put(applicationId, reason));
+ });
// A ready proxy node should trigger a redeployment as it will activate the node.
- if (!nodeRepository().nodes().list(Node.State.ready, Node.State.reserved).nodeType(NodeType.proxy).isEmpty()) {
+ if (!allNodes.state(Node.State.ready, Node.State.reserved).nodeType(NodeType.proxy).isEmpty()) {
applications.merge(ApplicationId.from("hosted-vespa", "routing", "default"),
"nodes being ready",
(oldValue, newValue) -> oldValue + ", " + newValue);
}
-
return applications;
}
@@ -68,7 +63,7 @@ public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer
*/
@Override
protected void deploy(ApplicationId application, String reason) {
- deployWithLock(application, reason);
+ deployNow(application, reason);
}
/** Returns the reason for doing an expedited deploy. */
@@ -78,11 +73,11 @@ public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer
List<String> reasons = nodes.stream()
.flatMap(node -> node.history()
- .events()
- .stream()
- .filter(event -> expediteChangeBy(event.agent()))
- .filter(event -> lastDeployTime.get().isBefore(event.at()))
- .map(event -> event.type() + (event.agent() == Agent.system ? "" : " by " + event.agent())))
+ .events()
+ .stream()
+ .filter(event -> expediteChangeBy(event.agent()))
+ .filter(event -> lastDeployTime.get().isBefore(event.at()))
+ .map(event -> event.type() + (event.agent() == Agent.system ? "" : " by " + event.agent())))
.sorted()
.distinct()
.toList();
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index bcc571355e3..189238a1c11 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -1,8 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import com.yahoo.concurrent.UncheckedTimeoutException;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Deployment;
import com.yahoo.config.provision.TransientException;
@@ -16,7 +16,6 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.yolean.Exceptions;
import java.io.Closeable;
-import java.time.Duration;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@@ -63,18 +62,12 @@ class MaintenanceDeployment implements Closeable {
return deployment.isPresent();
}
- /**
- * Returns the application lock held by this, or empty if it is not held.
- *
- * @throws IllegalStateException id this is called when closed
- */
- public Optional<Mutex> applicationLock() {
- if (closed) throw new IllegalStateException(this + " is closed");
- return lock;
- }
-
+ /** Prepare this deployment. Returns whether prepare was successful */
public boolean prepare() {
- return doStep(() -> { deployment.get().prepare(); return 0L; }).isPresent();
+ return doStep(() -> {
+ deployment.get().prepare();
+ return 0L;
+ }).isPresent();
}
/**
@@ -104,13 +97,10 @@ class MaintenanceDeployment implements Closeable {
}
private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) {
- Duration timeout = Duration.ofSeconds(3);
try {
- // Use a short lock to avoid interfering with change deployments
- return Optional.of(nodeRepository.applications().lock(application, timeout));
- }
- catch (ApplicationLockException e) {
- log.log(Level.INFO, () -> "Could not lock " + application + " for maintenance deployment within " + timeout);
+ return Optional.of(nodeRepository.applications().lockMaintenance(application));
+ } catch (UncheckedTimeoutException e) {
+ log.log(Level.INFO, () -> "Could not lock " + application + " for maintenance deployment within timeout");
return Optional.empty();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index 84a45de39d7..afea08711fa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -182,27 +182,26 @@ public class NodeFailer extends NodeRepositoryMaintainer {
* Called when a node should be moved to the failed state: Do that if it seems safe,
* which is when the node repo has available capacity to replace the node (and all its tenant nodes if host).
* Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
- *
- * @return whether node was successfully failed
*/
- private boolean failActive(FailingNode failing) {
+ private void failActive(FailingNode failing) {
Optional<Deployment> deployment =
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(5));
- if (deployment.isEmpty()) return false;
+ if (deployment.isEmpty()) return;
// If the active node that we are trying to fail is of type host, we need to successfully fail all
// the children nodes running on it before we fail the host. Failing a child node in a dynamically
// provisioned zone may require provisioning new hosts that require the host application lock to be held,
// so we must release ours before failing the children.
List<FailingNode> activeChildrenToFail = new ArrayList<>();
+ boolean redeploy = false;
try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
// Now that we have gotten the node object under the proper lock, sanity-check it still makes sense to fail
if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
- return false;
+ return;
if (lock.node().state() == Node.State.failed)
- return true;
+ return;
if (!Objects.equals(failing.node().state(), lock.node().state()))
- return false;
+ return;
failing = new FailingNode(lock.node(), failing.reason);
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
@@ -216,36 +215,46 @@ public class NodeFailer extends NodeRepositoryMaintainer {
if (activeChildrenToFail.isEmpty()) {
log.log(Level.INFO, "Failing out " + failing.node + ": " + failing.reason);
- wantToFail(failing.node(), true, lock);
- try {
- deployment.get().activate();
- return true;
- } catch (TransientException | UncheckedTimeoutException e) {
- log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
- " with a transient error, will be retried by application maintainer: " +
- Exceptions.toMessageString(e));
- return true;
- } catch (RuntimeException e) {
- // Reset want to fail: We'll retry failing unless it heals in the meantime
- nodeRepository().nodes().node(failing.node().hostname())
- .ifPresent(n -> wantToFail(n, false, lock));
- log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
- " for " + failing.reason() + ": " + Exceptions.toMessageString(e));
- return false;
- }
+ markWantToFail(failing.node(), true, lock);
+ redeploy = true;
}
}
+ // Redeploy to replace failing node
+ if (redeploy) {
+ redeploy(deployment.get(), failing);
+ return;
+ }
+
// In a dynamically provisioned zone the failing of the first child may require a new host to be provisioned,
// so failActive() may take a long time to complete, but the remaining children should be fast.
activeChildrenToFail.forEach(this::failActive);
- return false;
}
- private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
- if (!node.status().wantToFail())
+ private void redeploy(Deployment deployment, FailingNode failing) {
+ try {
+ deployment.activate();
+ } catch (TransientException | UncheckedTimeoutException e) {
+ log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
+ " with a transient error, will be retried by application maintainer: " +
+ Exceptions.toMessageString(e));
+ } catch (RuntimeException e) {
+ // Reset want to fail: We'll retry failing unless it heals in the meantime
+ Optional<NodeMutex> optionalNodeMutex = nodeRepository().nodes().lockAndGet(failing.node());
+ if (optionalNodeMutex.isEmpty()) return;
+ try (var nodeMutex = optionalNodeMutex.get()) {
+ markWantToFail(nodeMutex.node(), false, nodeMutex);
+ log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
+ " for " + failing.reason() + ": " + Exceptions.toMessageString(e));
+ }
+ }
+ }
+
+ private void markWantToFail(Node node, boolean wantToFail, Mutex lock) {
+ if (node.status().wantToFail() != wantToFail) {
nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
+ }
}
/** Returns true if node failing should be throttled */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
index 036c46479d1..10a828c887a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
@@ -47,13 +47,12 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer {
.orElse(false);
}
- // Returns the applications that need to be redeployed by this config server at this point in time.
@Override
protected Map<ApplicationId, String> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Map.of();
// Collect all deployment times before sorting as deployments may happen while we build the set, breaking
- // the comparable contract. Stale times are fine as the time is rechecked in ApplicationMaintainer#deployWithLock
+ // the comparable contract. Stale times are fine as the time is rechecked in ApplicationMaintainer#deployNow
Map<ApplicationId, Instant> deploymentTimes = nodesNeedingMaintenance().stream()
.map(node -> node.allocation().get().owner())
.distinct()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
index 87af8c05b14..1ae9b00d794 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
@@ -14,9 +14,10 @@ import com.yahoo.vespa.orchestrator.OrchestrationException;
import com.yahoo.yolean.Exceptions;
import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.OptionalLong;
/**
* Maintenance job which deactivates retired nodes, if given permission by orchestrator, or
@@ -47,40 +48,55 @@ public class RetiredExpirer extends NodeRepositoryMaintainer {
protected double maintain() {
int attempts = 0;
int successes = 0;
+ List<ApplicationId> applicationsWithRetiredNodes = nodeRepository().nodes().list(Node.State.active)
+ .retired()
+ .stream()
+ .map(node -> node.allocation().get().owner())
+ .distinct()
+ .toList();
+ for (var application : applicationsWithRetiredNodes) {
+ attempts++;
+ if (removeRetiredNodes(application)) {
+ successes++;
+ }
+ }
+ return attempts == 0 ? 1.0 : ((double)successes / attempts);
+ }
- NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
- Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
- for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
- ApplicationId application = entry.getKey();
- NodeList retiredNodes = entry.getValue();
- Map<Removal, NodeList> nodesByRemovalReason = retiredNodes.groupingBy(node -> removalOf(node, activeNodes));
- if (nodesByRemovalReason.isEmpty()) continue;
-
- for (var kv : nodesByRemovalReason.entrySet()) {
- Removal removal = kv.getKey();
- if (removal.equals(Removal.none())) continue;
-
- NodeList nodes = kv.getValue();
- attempts++;
- try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
- if (!deployment.isValid()) {
- log.info("Skipping invalid deployment for " + application);
- continue;
- }
-
- nodeRepository().nodes().setRemovable(application, nodes.asList(), removal.isReusable());
- Optional<Long> session = deployment.activate();
- String nodeList = String.join(", ", nodes.mapToList(Node::hostname));
- if (session.isEmpty()) {
- log.info("Failed to redeploy " + application);
- continue;
- }
- log.info("Redeployed " + application + " at session " + session.get() + " to deactivate retired nodes: " + nodeList);
- successes++;
+ /** Mark retired nodes as removable and redeploy application */
+ private boolean removeRetiredNodes(ApplicationId application) {
+ try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
+ if (!deployment.isValid()) {
+ log.info("Skipping invalid deployment for " + application);
+ return false;
+ }
+ boolean redeploy = false;
+ List<String> nodesToDeactivate = new ArrayList<>();
+ try (var lock = nodeRepository().applications().lock(application)) {
+ NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
+ Map<Removal, NodeList> nodesByRemovalReason = activeNodes.owner(application)
+ .retired()
+ .groupingBy(node -> removalOf(node, activeNodes));
+ for (var kv : nodesByRemovalReason.entrySet()) {
+ Removal reason = kv.getKey();
+ if (reason.equals(Removal.none())) continue;
+ redeploy = true;
+ nodesToDeactivate.addAll(kv.getValue().hostnames());
+ nodeRepository().nodes().setRemovable(kv.getValue(), reason.isReusable());
}
}
+ if (!redeploy) {
+ return true;
+ }
+ Optional<Long> session = deployment.activate();
+ String nodeList = String.join(", ", nodesToDeactivate);
+ if (session.isEmpty()) {
+ log.info("Failed to redeploy " + application);
+ return false;
+ }
+ log.info("Redeployed " + application + " at session " + session.get() + " to deactivate retired nodes: " + nodeList);
+ return true;
}
- return attempts == 0 ? 1.0 : ((double)successes / attempts);
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
index 5ce88346178..dcdcbf09175 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.maintenance.MaintenanceDeployment.Move;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -49,9 +50,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
NodeRepository nodeRepository,
Metric metric,
Duration interval) {
- this(deployer, nodeRepository, metric, interval,
- 10_000 // Should take less than a few minutes
- );
+ this(deployer, nodeRepository, metric, interval, 10_000 /* Should take less than a few minutes */);
}
public SpareCapacityMaintainer(Deployer deployer,
@@ -160,22 +159,32 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
.filter(node -> node.state() == Node.State.active)
.min(this::retireOvercomittedComparator);
if (nodeToRetire.isEmpty()) return;
+ retire(nodeToRetire.get());
+ }
- ApplicationId application = nodeToRetire.get().allocation().get().owner();
- try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
- if ( ! deployment.isValid()) return;
-
- Optional<Node> nodeWithWantToRetire = nodeRepository().nodes().node(nodeToRetire.get().hostname())
- .map(node -> node.withWantToRetire(true, Agent.SpareCapacityMaintainer, nodeRepository().clock().instant()));
- if (nodeWithWantToRetire.isEmpty()) return;
-
- nodeRepository().nodes().write(nodeWithWantToRetire.get(), deployment.applicationLock().get());
- log.log(Level.INFO, String.format("Redeploying %s to move %s from overcommitted host",
- application, nodeToRetire.get().hostname()));
+ /** Mark node for retirement and redeploy its application */
+ private void retire(Node node) {
+ ApplicationId owner = node.allocation().get().owner();
+ try (MaintenanceDeployment deployment = new MaintenanceDeployment(owner, deployer, metric, nodeRepository())) {
+ if (!deployment.isValid()) return;
+ if (!markWantToRetire(node.hostname())) return;
+ log.log(Level.INFO, String.format("Redeploying %s to move %s from over-committed host",
+ owner, node.hostname()));
deployment.activate();
}
}
+ private boolean markWantToRetire(String hostname) {
+ Optional<NodeMutex> optionalNodeMutex = nodeRepository().nodes().lockAndGet(hostname);
+ if (optionalNodeMutex.isEmpty()) return false;
+ try (var nodeMutex = optionalNodeMutex.get()) {
+ Node retiredNode = nodeMutex.node().withWantToRetire(true, Agent.SpareCapacityMaintainer,
+ nodeRepository().clock().instant());
+ nodeRepository().nodes().write(retiredNode, nodeMutex);
+ return true;
+ }
+ }
+
private static class CapacitySolver {
private final HostCapacity hostCapacity;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index bb3d288e555..10f0c8aa554 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -203,17 +203,12 @@ public class Nodes {
/**
* Sets a list of nodes to have their allocation removable (active to inactive) in the node repository.
*
- * @param application the application the nodes belong to
* @param nodes the nodes to make removable. These nodes MUST be in the active state
* @param reusable move the node directly to {@link Node.State#dirty} after removal
*/
- public void setRemovable(ApplicationId application, List<Node> nodes, boolean reusable) {
- try (Mutex lock = applications.lock(application)) {
- List<Node> removableNodes = nodes.stream()
- .map(node -> node.with(node.allocation().get().removable(true, reusable)))
- .toList();
- write(removableNodes, lock);
- }
+ public void setRemovable(NodeList nodes, boolean reusable) {
+ performOn(nodes, (node, mutex) -> write(node.with(node.allocation().get().removable(true, reusable)),
+ mutex));
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index c1ab8489f40..cec413cf4e3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -391,6 +391,11 @@ public class CuratorDb {
return db.lock(lockPath.append("archiveUris"), defaultLockTimeout);
}
+ public Lock lockMaintenance(ApplicationId application) {
+ return db.lock(lockPath.append("maintenanceDeployment").append(application.serializedForm()),
+ Duration.ofSeconds(3));
+ }
+
// Load balancers -----------------------------------------------------------
public List<LoadBalancerId> readLoadBalancerIds() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
index 880a69b61e5..e075995c89e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java
@@ -402,8 +402,8 @@ public class HostCapacityMaintainerTest {
// Config server becomes removable (done by RetiredExpirer in a real system) and redeployment moves it
// to parked
int removedIndex = nodeToRemove.get().allocation().get().membership().index();
- tester.nodeRepository().nodes().setRemovable(configSrvApp, List.of(nodeToRemove.get()), true);
- tester.nodeRepository().nodes().setRemovable(hostApp, List.of(hostToRemove.get()), true);
+ tester.nodeRepository().nodes().setRemovable(NodeList.of(nodeToRemove.get()), true);
+ tester.nodeRepository().nodes().setRemovable(NodeList.of(hostToRemove.get()), true);
tester.prepareAndActivateInfraApplication(configSrvApp, hostType.childNodeType());
tester.prepareAndActivateInfraApplication(hostApp, hostType);
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation(nodeToRemove.get().hostname(), Agent.operator, "Readied by host-admin");
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index b5735cfae84..98c17eb4d5e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -515,9 +515,9 @@ public class OsVersionsTest {
private void replaceNodes(ApplicationId application) {
// Deploy to retire nodes
deployApplication(application);
- List<Node> retired = tester.nodeRepository().nodes().list().owner(application).retired().asList();
+ NodeList retired = tester.nodeRepository().nodes().list().owner(application).retired();
assertFalse("At least one node is retired", retired.isEmpty());
- tester.nodeRepository().nodes().setRemovable(application, retired, false);
+ tester.nodeRepository().nodes().setRemovable(retired, false);
// Redeploy to deactivate removable nodes and allocate new ones
deployApplication(application);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
index 9a38cbbba44..9cd5adef5f4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java
@@ -10,8 +10,8 @@ import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.NodeRepositoryTester;
import com.yahoo.vespa.hosted.provision.maintenance.InfrastructureVersions;
@@ -122,7 +122,7 @@ public class InfraDeployerImplTest {
addNode(5, Node.State.dirty, Optional.empty());
addNode(6, Node.State.ready, Optional.empty());
Node node7 = addNode(7, Node.State.active, Optional.of(target));
- nodeRepository.nodes().setRemovable(application.getApplicationId(), List.of(node7), false);
+ nodeRepository.nodes().setRemovable(NodeList.of(node7), false);
infraDeployer.getDeployment(application.getApplicationId()).orElseThrow().activate();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index c1c4630f431..fb773f19b8a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -636,7 +636,7 @@ public class VirtualNodeProvisioningTest {
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
// Deactivate any retired nodes - usually done by the RetiredExpirer
- tester.nodeRepository().nodes().setRemovable(app1, tester.getNodes(app1).retired().asList(), false);
+ tester.nodeRepository().nodes().setRemovable(tester.getNodes(app1).retired(), false);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
if (expectedReuse) {
diff --git a/parent/pom.xml b/parent/pom.xml
index fb6b0abf395..48e28895891 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -1142,7 +1142,7 @@
<felix.log.version>1.0.1</felix.log.version>
<findbugs.version>3.0.2</findbugs.version> <!-- Should be kept in sync with guava -->
<hdrhistogram.version>2.1.12</hdrhistogram.version>
- <jetty.version>11.0.13</jetty.version>
+ <jetty.version>11.0.14</jetty.version>
<jetty-servlet-api.version>5.0.2</jetty-servlet-api.version>
<jjwt.version>0.11.2</jjwt.version>
<jna.version>5.11.0</jna.version>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index a43abbafa54..16f4c9ac9f4 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -279,7 +279,6 @@ jobs:
--define="_topdir $WORKDIR/vespa-rpmbuild" \
--define "debug_package %{nil}" \
--define "_debugsource_template %{nil}" \
- --define "_run_unit_tests 1" \
--define '_cmake_extra_opts "-DDEFAULT_VESPA_CPU_ARCH_FLAGS=-msse3 -mcx16 -mtune=intel"' \
*.src.rpm
rm -f *.src.rpm
diff --git a/searchlib/src/tests/attribute/attribute_test.cpp b/searchlib/src/tests/attribute/attribute_test.cpp
index 18896ca24c1..16b5ef78951 100644
--- a/searchlib/src/tests/attribute/attribute_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_test.cpp
@@ -898,6 +898,7 @@ AttributeTest::testSingle()
AttributePtr ptr = createAttribute("sv-int32", Config(BasicType::INT32, CollectionType::SINGLE));
ptr->updateStat(true);
EXPECT_EQ(12288u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(0u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<IntegerAttribute, AttributeVector::largeint_t, int32_t>(ptr, values);
}
@@ -911,7 +912,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-post-int32", cfg);
ptr->updateStat(true);
- EXPECT_EQ(231640u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(887756u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(656444u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<IntegerAttribute, AttributeVector::largeint_t, int32_t>(ptr, values);
}
@@ -923,6 +925,7 @@ AttributeTest::testSingle()
AttributePtr ptr = createAttribute("sv-float", Config(BasicType::FLOAT, CollectionType::SINGLE));
ptr->updateStat(true);
EXPECT_EQ(12288u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(0u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<FloatingPointAttribute, double, float>(ptr, values);
}
@@ -931,7 +934,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-post-float", cfg);
ptr->updateStat(true);
- EXPECT_EQ(231640u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(887756u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(656444u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<FloatingPointAttribute, double, float>(ptr, values);
}
@@ -943,7 +947,8 @@ AttributeTest::testSingle()
{
AttributePtr ptr = createAttribute("sv-string", Config(BasicType::STRING, CollectionType::SINGLE));
ptr->updateStat(true);
- EXPECT_EQ(74648u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(403552u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(328576u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<StringAttribute, string, string>(ptr, values);
}
@@ -952,7 +957,8 @@ AttributeTest::testSingle()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("sv-fs-string", cfg);
ptr->updateStat(true);
- EXPECT_EQ(245024u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(902256u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(657088u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testSingle<StringAttribute, string, string>(ptr, values);
}
@@ -1083,7 +1089,8 @@ AttributeTest::testArray()
{
AttributePtr ptr = createAttribute("a-int32", Config(BasicType::INT32, CollectionType::ARRAY));
ptr->updateStat(true);
- EXPECT_EQ(12320u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1474480u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1462192u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1092,7 +1099,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("flags", cfg);
ptr->updateStat(true);
- EXPECT_EQ(12320u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1474480u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1462192u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1101,7 +1109,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("a-fs-int32", cfg);
ptr->updateStat(true);
- EXPECT_EQ(253608u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2371884u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2118656u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<IntegerAttribute, AttributeVector::largeint_t>(ptr, values);
}
@@ -1119,7 +1128,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("a-fs-float", cfg);
ptr->updateStat(true);
- EXPECT_EQ(253608u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2371884u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2118656u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<FloatingPointAttribute, double>(ptr, values);
}
@@ -1130,7 +1140,8 @@ AttributeTest::testArray()
{
AttributePtr ptr = createAttribute("a-string", Config(BasicType::STRING, CollectionType::ARRAY));
ptr->updateStat(true);
- EXPECT_EQ(74680u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1865744u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(1790768u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<StringAttribute, string>(ptr, values);
}
@@ -1139,7 +1150,8 @@ AttributeTest::testArray()
cfg.setFastSearch(true);
AttributePtr ptr = createAttribute("afs-string", cfg);
ptr->updateStat(true);
- EXPECT_EQ(266992u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2386384u, ptr->getStatus().getAllocated());
+ EXPECT_EQ(2119300u, ptr->getStatus().getUsed());
addDocs(ptr, numDocs);
testArray<StringAttribute, string>(ptr, values);
}
diff --git a/searchlib/src/tests/attribute/raw_attribute/raw_attribute_test.cpp b/searchlib/src/tests/attribute/raw_attribute/raw_attribute_test.cpp
index 82e4fd065cf..9f728cc0482 100644
--- a/searchlib/src/tests/attribute/raw_attribute/raw_attribute_test.cpp
+++ b/searchlib/src/tests/attribute/raw_attribute/raw_attribute_test.cpp
@@ -4,6 +4,7 @@
#include <vespa/searchlib/attribute/attributefactory.h>
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/vespalib/gtest/gtest.h>
+#include <filesystem>
#include <memory>
using search::AttributeFactory;
@@ -19,6 +20,8 @@ std::vector<char> empty;
vespalib::string hello("hello");
vespalib::ConstArrayRef<char> raw_hello(hello.c_str(), hello.size());
+std::filesystem::path attr_path("raw.dat");
+
std::vector<char> as_vector(vespalib::stringref value) {
return {value.data(), value.data() + value.size()};
}
@@ -27,6 +30,10 @@ std::vector<char> as_vector(vespalib::ConstArrayRef<char> value) {
return {value.data(), value.data() + value.size()};
}
+void remove_saved_attr() {
+ std::filesystem::remove(attr_path);
+}
+
class RawAttributeTest : public ::testing::Test
{
protected:
@@ -36,6 +43,7 @@ protected:
RawAttributeTest();
~RawAttributeTest() override;
std::vector<char> get_raw(uint32_t docid);
+ void reset_attr(bool add_reserved);
};
@@ -44,10 +52,7 @@ RawAttributeTest::RawAttributeTest()
_attr(),
_raw(nullptr)
{
- Config cfg(BasicType::RAW, CollectionType::SINGLE);
- _attr = AttributeFactory::createAttribute("raw", cfg);
- _raw = &dynamic_cast<SingleRawAttribute&>(*_attr);
- _attr->addReservedDoc();
+ reset_attr(true);
}
RawAttributeTest::~RawAttributeTest() = default;
@@ -58,6 +63,17 @@ RawAttributeTest::get_raw(uint32_t docid)
return as_vector(_raw->get_raw(docid));
}
+void
+RawAttributeTest::reset_attr(bool add_reserved)
+{
+ Config cfg(BasicType::RAW, CollectionType::SINGLE);
+ _attr = AttributeFactory::createAttribute("raw", cfg);
+ _raw = &dynamic_cast<SingleRawAttribute&>(*_attr);
+ if (add_reserved) {
+ _attr->addReservedDoc();
+ }
+}
+
TEST_F(RawAttributeTest, can_set_and_clear_value)
{
EXPECT_TRUE(_attr->addDocs(10));
@@ -89,4 +105,24 @@ TEST_F(RawAttributeTest, implements_serialize_for_sort) {
EXPECT_EQ(-1, _attr->serializeForDescendingSort(1, buf, sizeof(buf)));
}
+TEST_F(RawAttributeTest, save_and_load)
+{
+ auto mini_test = as_vector("mini test");
+ remove_saved_attr();
+ _attr->addDocs(10);
+ _attr->commit();
+ _raw->set_raw(1, raw_hello);
+ _raw->set_raw(2, mini_test);
+ _attr->setCreateSerialNum(20);
+ _attr->save();
+ reset_attr(false);
+ _attr->load();
+ EXPECT_EQ(11, _attr->getCommittedDocIdLimit());
+ EXPECT_EQ(11, _attr->getStatus().getNumDocs());
+ EXPECT_EQ(20, _attr->getCreateSerialNum());
+ EXPECT_EQ(as_vector("hello"), as_vector(_raw->get_raw(1)));
+ EXPECT_EQ(mini_test, as_vector(_raw->get_raw(2)));
+ remove_saved_attr();
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
index 4f7a7934d4c..1b1c1181666 100644
--- a/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/memory_index/memory_index_test.cpp
@@ -462,22 +462,26 @@ TEST(MemoryIndexTest, require_that_num_docs_and_doc_id_limit_is_returned)
TEST(MemoryIndexTest, require_that_we_understand_the_memory_footprint)
{
- constexpr size_t BASE_SIZE = 188172u;
+ constexpr size_t BASE_ALLOCATED = 1172040u;
+ constexpr size_t BASE_USED = 984116u;
{
MySetup setup;
Index index(setup);
EXPECT_EQ(0u, index.index.getStaticMemoryFootprint());
EXPECT_EQ(index.index.getStaticMemoryFootprint(), index.index.getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(0u, index.index.getMemoryUsage().usedBytes());
}
{
Index index(MySetup().field("f1"));
- EXPECT_EQ(BASE_SIZE, index.index.getStaticMemoryFootprint());
+ EXPECT_EQ(BASE_ALLOCATED, index.index.getStaticMemoryFootprint());
EXPECT_EQ(index.index.getStaticMemoryFootprint(), index.index.getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(BASE_USED, index.index.getMemoryUsage().usedBytes());
}
{
Index index(MySetup().field("f1").field("f2"));
- EXPECT_EQ(2 * BASE_SIZE, index.index.getStaticMemoryFootprint());
+ EXPECT_EQ(2 * BASE_ALLOCATED, index.index.getStaticMemoryFootprint());
EXPECT_EQ(index.index.getStaticMemoryFootprint(), index.index.getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(2 * BASE_USED, index.index.getMemoryUsage().usedBytes());
}
}
@@ -511,12 +515,12 @@ TEST(MemoryIndexTest, require_that_we_can_fake_bit_vector)
Searchable &searchable = index.index;
Blueprint::UP res = searchable.createBlueprint(requestContext, fields, makeTerm(foo));
- EXPECT_TRUE(res.get() != NULL);
+ EXPECT_TRUE(res);
res->fetchPostings(search::queryeval::ExecuteInfo::TRUE);
SearchIterator::UP search = res->createSearch(*match_data, true);
- EXPECT_TRUE(search.get() != NULL);
- EXPECT_TRUE(dynamic_cast<BooleanMatchIteratorWrapper *>(search.get()) != NULL);
+ EXPECT_TRUE(search);
+ EXPECT_TRUE(dynamic_cast<BooleanMatchIteratorWrapper *>(search.get()) != nullptr);
search->initFullRange();
EXPECT_EQ("1,3", toString(*search));
}
diff --git a/searchlib/src/tests/predicate/document_features_store_test.cpp b/searchlib/src/tests/predicate/document_features_store_test.cpp
index fbd4629cc4a..c37fe2739ca 100644
--- a/searchlib/src/tests/predicate/document_features_store_test.cpp
+++ b/searchlib/src/tests/predicate/document_features_store_test.cpp
@@ -165,17 +165,17 @@ TEST("require that both features and ranges are removed by 'remove'") {
TEST("require that both features and ranges counts towards memory usage") {
DocumentFeaturesStore features_store(10);
- EXPECT_EQUAL(364u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328152u, features_store.getMemoryUsage().usedBytes());
PredicateTreeAnnotations annotations;
annotations.features.push_back(PredicateHash::hash64("foo=100-199"));
features_store.insert(annotations, doc_id);
- EXPECT_EQUAL(372u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328160u, features_store.getMemoryUsage().usedBytes());
annotations.features.clear();
annotations.range_features.push_back({"foo", 100, 199});
features_store.insert(annotations, doc_id + 1);
- EXPECT_EQUAL(468u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328256u, features_store.getMemoryUsage().usedBytes());
}
TEST("require that DocumentFeaturesStore can be serialized") {
@@ -205,17 +205,17 @@ TEST("require that serialization cleans up wordstore") {
PredicateTreeAnnotations annotations;
annotations.range_features.push_back({"foo", 100, 199});
features_store.insert(annotations, doc_id);
- EXPECT_EQUAL(460u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328248u, features_store.getMemoryUsage().usedBytes());
annotations.range_features.push_back({"bar", 100, 199});
features_store.insert(annotations, doc_id + 1);
- EXPECT_EQUAL(848u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328636u, features_store.getMemoryUsage().usedBytes());
features_store.remove(doc_id + 1);
- EXPECT_EQUAL(800u, features_store.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328588u, features_store.getMemoryUsage().usedBytes());
vespalib::DataBuffer buffer;
features_store.serialize(buffer);
DocumentFeaturesStore features_store2(buffer);
- EXPECT_EQUAL(460u, features_store2.getMemoryUsage().usedBytes());
+ EXPECT_EQUAL(328248u, features_store2.getMemoryUsage().usedBytes());
}
diff --git a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
index ebd4dc6998c..c966c4f81b6 100644
--- a/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/attribute/CMakeLists.txt
@@ -106,6 +106,8 @@ vespa_add_library(searchlib_attribute OBJECT
postingstore.cpp
predicate_attribute.cpp
raw_buffer_store.cpp
+ raw_buffer_store_reader.cpp
+ raw_buffer_store_writer.cpp
raw_buffer_type_mapper.cpp
raw_multi_value_read_view.cpp
readerbase.cpp
@@ -128,6 +130,8 @@ vespa_add_library(searchlib_attribute OBJECT
single_numeric_enum_search_context.cpp
single_numeric_search_context.cpp
single_raw_attribute.cpp
+ single_raw_attribute_loader.cpp
+ single_raw_attribute_saver.cpp
single_small_numeric_search_context.cpp
single_string_enum_search_context.cpp
single_string_enum_hint_search_context.cpp
diff --git a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp
index 7f8f3f92f9e..e6b584d29b2 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributevector.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributevector.cpp
@@ -590,7 +590,6 @@ AttributeVector::getEstimatedSaveByteSize() const
uint64_t idxFileSize = 0;
uint64_t udatFileSize = 0;
size_t fixedWidth = getFixedWidth();
- vespalib::MemoryUsage values_mem_usage = getEnumStoreValuesMemoryUsage();
if (hasMultiValue()) {
idxFileSize = headerSize + sizeof(uint32_t) * (docIdLimit + 1);
@@ -603,6 +602,7 @@ AttributeVector::getEstimatedSaveByteSize() const
if (fixedWidth != 0) {
udatFileSize = headerSize + fixedWidth * uniqueValueCount;
} else {
+ vespalib::MemoryUsage values_mem_usage = getEnumStoreValuesMemoryUsage();
size_t unique_values_bytes = values_mem_usage.usedBytes() -
(values_mem_usage.deadBytes() + values_mem_usage.allocatedBytesOnHold());
size_t ref_count_mem_usage = sizeof(uint32_t) * uniqueValueCount;
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
index 6d6022d59dd..c5188b89129 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
@@ -74,7 +74,7 @@ template <typename B>
vespalib::MemoryUsage
EnumAttribute<B>::getEnumStoreValuesMemoryUsage() const
{
- return _enumStore.get_values_memory_usage();
+ return _enumStore.get_dynamic_values_memory_usage();
}
template <typename B>
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp
index 74a37ca8394..a2090185158 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.cpp
@@ -8,16 +8,13 @@
namespace search {
-EnumAttributeSaver::
-EnumAttributeSaver(const IEnumStore &enumStore)
+EnumAttributeSaver::EnumAttributeSaver(IEnumStore &enumStore)
: _enumStore(enumStore),
_enumerator(enumStore.make_enumerator())
{
}
-EnumAttributeSaver::~EnumAttributeSaver()
-{
-}
+EnumAttributeSaver::~EnumAttributeSaver() = default;
void
EnumAttributeSaver::writeUdat(IAttributeSaveTarget &saveTarget)
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.h b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.h
index d6dceb4772a..47af8c0452e 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattributesaver.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumattributesaver.h
@@ -24,7 +24,7 @@ private:
std::unique_ptr<Enumerator> _enumerator;
public:
- EnumAttributeSaver(const IEnumStore &enumStore);
+ EnumAttributeSaver(IEnumStore &enumStore);
~EnumAttributeSaver();
void writeUdat(IAttributeSaveTarget &saveTarget);
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h
index 0a0b2040b2a..59524f3788a 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h
@@ -91,7 +91,12 @@ public:
uint32_t get_num_uniques() const override { return _dict->get_num_uniques(); }
bool is_folded() const { return _is_folded;}
- vespalib::MemoryUsage get_values_memory_usage() const override { return _store.get_allocator().get_data_store().getMemoryUsage(); }
+ vespalib::MemoryUsage get_values_memory_usage() const override {
+ return _store.get_allocator().get_data_store().getMemoryUsage();
+ }
+ vespalib::MemoryUsage get_dynamic_values_memory_usage() const {
+ return _store.get_allocator().get_data_store().getDynamicMemoryUsage();
+ }
vespalib::MemoryUsage get_dictionary_memory_usage() const override { return _dict->get_memory_usage(); }
vespalib::AddressSpace get_values_address_space_usage() const override;
@@ -207,7 +212,7 @@ public:
void inc_compaction_count() override {
_store.get_allocator().get_data_store().inc_compaction_count();
}
- std::unique_ptr<Enumerator> make_enumerator() const override;
+ std::unique_ptr<Enumerator> make_enumerator() override;
std::unique_ptr<EntryComparator> allocate_comparator() const override;
// Methods below are only relevant for strings, and are templated to only be instantiated on demand.
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
index b863e56fb4a..bc767a296eb 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
@@ -259,7 +259,7 @@ EnumStoreT<EntryT>::consider_compact_dictionary(const CompactionStrategy& compac
template <typename EntryT>
std::unique_ptr<IEnumStore::Enumerator>
-EnumStoreT<EntryT>::make_enumerator() const
+EnumStoreT<EntryT>::make_enumerator()
{
return std::make_unique<Enumerator>(*_dict, _store.get_data_store(), false);
}
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
index 57886511221..2157db3e5ed 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
@@ -72,7 +72,7 @@ public:
enumstore::EnumeratedLoader make_enumerated_loader();
enumstore::EnumeratedPostingsLoader make_enumerated_postings_loader();
- virtual std::unique_ptr<Enumerator> make_enumerator() const = 0;
+ virtual std::unique_ptr<Enumerator> make_enumerator() = 0;
virtual std::unique_ptr<vespalib::datastore::EntryComparator> allocate_comparator() const = 0;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.cpp
index 083f0409821..87326f3628f 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.cpp
@@ -80,7 +80,7 @@ MultiValueEnumAttributeSaver<MultiValueT>::
MultiValueEnumAttributeSaver(GenerationHandler::Guard &&guard,
const attribute::AttributeHeader &header,
const MultiValueMapping &mvMapping,
- const IEnumStore &enumStore)
+ IEnumStore &enumStore)
: Parent(std::move(guard), header, mvMapping),
_mvMapping(mvMapping),
_enumSaver(enumStore),
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.h b/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.h
index 44c45567733..7c127ac0781 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.h
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattributesaver.h
@@ -35,7 +35,7 @@ public:
MultiValueEnumAttributeSaver(GenerationHandler::Guard &&guard,
const attribute::AttributeHeader &header,
const MultiValueMapping &mvMapping,
- const IEnumStore &enumStore);
+ IEnumStore &enumStore);
~MultiValueEnumAttributeSaver() override;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store.h b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store.h
index 60132c70852..2731157b13c 100644
--- a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store.h
@@ -30,6 +30,7 @@ public:
std::unique_ptr<vespalib::datastore::ICompactionContext> start_compact(const vespalib::datastore::CompactionStrategy& compaction_strategy);
void reclaim_memory(generation_t oldest_used_gen) { _array_store.reclaim_memory(oldest_used_gen); }
void assign_generation(generation_t current_gen) { _array_store.assign_generation(current_gen); }
+ void set_initializing(bool initializing) { _array_store.setInitializing(initializing); }
};
}
diff --git a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.cpp b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.cpp
new file mode 100644
index 00000000000..8fbf6fa6ea8
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.cpp
@@ -0,0 +1,34 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "raw_buffer_store_reader.h"
+#include "raw_buffer_store.h"
+#include "blob_sequence_reader.h"
+
+using vespalib::datastore::EntryRef;
+
+namespace search::attribute {
+
+RawBufferStoreReader::RawBufferStoreReader(RawBufferStore& store, BlobSequenceReader& reader)
+ : _store(store),
+ _reader(reader),
+ _buffer(1024)
+{
+}
+
+RawBufferStoreReader::~RawBufferStoreReader() = default;
+
+EntryRef
+RawBufferStoreReader::read()
+{
+ uint32_t size = _reader.getNextSize();
+ if (size == 0) {
+ return EntryRef();
+ }
+ if (size > _buffer.size()) {
+ _buffer.resize(size + 1024);
+ }
+ _reader.readBlob(_buffer.data(), size);
+ return _store.set({_buffer.data(), size});
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.h b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.h
new file mode 100644
index 00000000000..e58713ed0b2
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_reader.h
@@ -0,0 +1,29 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/datastore/entryref.h>
+#include <vespa/vespalib/stllike/allocator.h>
+#include <vector>
+
+namespace search::attribute {
+
+class BlobSequenceReader;
+class RawBufferStore;
+
+/**
+ * Class for reading raw values into a raw buffer store from a
+ * BlobSequenceReader.
+ */
+class RawBufferStoreReader
+{
+ RawBufferStore& _store;
+ BlobSequenceReader& _reader;
+ std::vector<char, vespalib::allocator_large<char>> _buffer;
+public:
+ RawBufferStoreReader(RawBufferStore& store, BlobSequenceReader& reader);
+ ~RawBufferStoreReader();
+ vespalib::datastore::EntryRef read();
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.cpp b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.cpp
new file mode 100644
index 00000000000..78aa3ddd2eb
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.cpp
@@ -0,0 +1,33 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "raw_buffer_store_writer.h"
+#include "raw_buffer_store.h"
+#include <vespa/searchlib/util/bufferwriter.h>
+
+using vespalib::datastore::EntryRef;
+
+namespace search::attribute {
+
+RawBufferStoreWriter::RawBufferStoreWriter(const RawBufferStore& store, BufferWriter& writer)
+ : _store(store),
+ _writer(writer)
+{
+}
+
+RawBufferStoreWriter::~RawBufferStoreWriter() = default;
+
+void
+RawBufferStoreWriter::write(EntryRef ref)
+{
+ if (ref.valid()) {
+ auto raw = _store.get(ref);
+ uint32_t size = raw.size();
+ _writer.write(&size, sizeof(size));
+ _writer.write(raw.data(), raw.size());
+ } else {
+ uint32_t size = 0;
+ _writer.write(&size, sizeof(size));
+ }
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.h b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.h
new file mode 100644
index 00000000000..cfcd6fa9093
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/raw_buffer_store_writer.h
@@ -0,0 +1,26 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/datastore/entryref.h>
+
+namespace search { class BufferWriter; }
+
+namespace search::attribute {
+
+class RawBufferStore;
+
+/**
+ * Class for writing raw values from a raw buffer store to a BufferWriter.
+ */
+class RawBufferStoreWriter
+{
+ const RawBufferStore& _store;
+ BufferWriter& _writer;
+public:
+ RawBufferStoreWriter(const RawBufferStore& store, BufferWriter& writer);
+ ~RawBufferStoreWriter();
+ void write(vespalib::datastore::EntryRef ref);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp
index 0c3180875a3..a8f13df0a85 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.cpp
@@ -16,7 +16,7 @@ namespace search::attribute {
ReferenceAttributeSaver::ReferenceAttributeSaver(GenerationHandler::Guard &&guard,
const AttributeHeader &header,
EntryRefVector&& indices,
- const Store &store)
+ Store &store)
: AttributeSaver(std::move(guard), header),
_indices(std::move(indices)),
_store(store),
@@ -25,9 +25,7 @@ ReferenceAttributeSaver::ReferenceAttributeSaver(GenerationHandler::Guard &&guar
}
-ReferenceAttributeSaver::~ReferenceAttributeSaver()
-{
-}
+ReferenceAttributeSaver::~ReferenceAttributeSaver() = default;
namespace {
diff --git a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h
index c413d01c386..fa3fafc3254 100644
--- a/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h
+++ b/searchlib/src/vespa/searchlib/attribute/reference_attribute_saver.h
@@ -41,9 +41,9 @@ public:
ReferenceAttributeSaver(vespalib::GenerationHandler::Guard &&guard,
const AttributeHeader &header,
EntryRefVector&& indices,
- const Store &store);
+ Store &store);
- virtual ~ReferenceAttributeSaver();
+ ~ReferenceAttributeSaver() override;
};
}
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.cpp
index 9746929c666..67bbd3c945d 100644
--- a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.cpp
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "single_raw_attribute.h"
+#include "single_raw_attribute_loader.h"
+#include "single_raw_attribute_saver.h"
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/vespalib/datastore/array_store.hpp>
@@ -171,4 +173,29 @@ SingleRawAttribute::onSerializeForDescendingSort(DocId doc, void * serTo, long a
return buf.size();
}
+std::unique_ptr<AttributeSaver>
+SingleRawAttribute::onInitSave(vespalib::stringref fileName)
+{
+ vespalib::GenerationHandler::Guard guard(getGenerationHandler().takeGuard());
+ return std::make_unique<SingleRawAttributeSaver>
+ (std::move(guard),
+ this->createAttributeHeader(fileName),
+ make_entry_ref_vector_snapshot(_ref_vector, getCommittedDocIdLimit()),
+ _raw_store);
+}
+
+bool
+SingleRawAttribute::onLoad(vespalib::Executor* executor)
+{
+ SingleRawAttributeLoader loader(*this, _ref_vector, _raw_store);
+ return loader.on_load(executor);
+}
+
+bool
+SingleRawAttribute::isUndefined(DocId docid) const
+{
+ auto raw = get_raw(docid);
+ return raw.empty();
+}
+
}
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.h b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.h
index d7ea321a3d4..d0baaaf91cd 100644
--- a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute.h
@@ -22,6 +22,8 @@ class SingleRawAttribute : public NotImplementedAttribute
vespalib::MemoryUsage update_stat();
EntryRef acquire_entry_ref(DocId docid) const noexcept { return _ref_vector.acquire_elem_ref(docid).load_acquire(); }
+ bool onLoad(vespalib::Executor *executor) override;
+ std::unique_ptr<AttributeSaver> onInitSave(vespalib::stringref fileName) override;
public:
SingleRawAttribute(const vespalib::string& name, const Config& config);
~SingleRawAttribute() override;
@@ -32,6 +34,13 @@ public:
bool addDoc(DocId &docId) override;
vespalib::ConstArrayRef<char> get_raw(DocId docid) const override;
void set_raw(DocId docid, vespalib::ConstArrayRef<char> raw);
+ void update(DocId docid, vespalib::ConstArrayRef<char> raw) { set_raw(docid, raw); }
+ void append(DocId docid, vespalib::ConstArrayRef<char> raw, int32_t weight) {
+ (void) docid;
+ (void) raw;
+ (void) weight;
+ }
+ bool isUndefined(DocId docid) const override;
uint32_t clearDoc(DocId docId) override;
long onSerializeForAscendingSort(DocId, void *, long, const common::BlobConverter *) const override;
long onSerializeForDescendingSort(DocId, void *, long, const common::BlobConverter *) const override;
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.cpp b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.cpp
new file mode 100644
index 00000000000..9ccb6c9ef26
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.cpp
@@ -0,0 +1,51 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "single_raw_attribute_loader.h"
+#include "attributevector.h"
+#include "blob_sequence_reader.h"
+#include "raw_buffer_store.h"
+#include "raw_buffer_store_reader.h"
+
+using vespalib::datastore::EntryRef;
+
+namespace search::attribute {
+
+SingleRawAttributeLoader::SingleRawAttributeLoader(AttributeVector& attr, RefVector& ref_vector, RawBufferStore& raw_store)
+ : _attr(attr),
+ _ref_vector(ref_vector),
+ _raw_store(raw_store)
+{
+}
+
+SingleRawAttributeLoader::~SingleRawAttributeLoader() = default;
+
+void
+SingleRawAttributeLoader::load_raw_store(BlobSequenceReader& reader, uint32_t docid_limit)
+{
+ RawBufferStoreReader raw_reader(_raw_store, reader);
+ _raw_store.set_initializing(true);
+ for (uint32_t lid = 0; lid < docid_limit; ++lid) {
+ _ref_vector.push_back(AtomicEntryRef(raw_reader.read()));
+ }
+ _raw_store.set_initializing(false);
+}
+
+bool
+SingleRawAttributeLoader::on_load(vespalib::Executor*)
+{
+ BlobSequenceReader reader(_attr);
+ if (!reader.hasData()) {
+ return false;
+ }
+ _attr.setCreateSerialNum(reader.getCreateSerialNum());
+ uint32_t docid_limit(reader.getDocIdLimit());
+ _ref_vector.reset();
+ _ref_vector.unsafe_reserve(docid_limit);
+ load_raw_store(reader, docid_limit);
+ _attr.commit();
+ _attr.getStatus().setNumDocs(docid_limit);
+ _attr.setCommittedDocIdLimit(docid_limit);
+ return true;
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.h b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.h
new file mode 100644
index 00000000000..1ed2fd05b2d
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_loader.h
@@ -0,0 +1,36 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/datastore/atomic_entry_ref.h>
+#include <vespa/vespalib/util/rcuvector.h>
+
+namespace search { class AttributeVector; }
+
+namespace vespalib { class Executor; }
+
+namespace search::attribute {
+
+class BlobSequenceReader;
+class RawBufferStore;
+
+/**
+ * Class for loading a single raw attribute.
+ */
+class SingleRawAttributeLoader
+{
+ using AtomicEntryRef = vespalib::datastore::AtomicEntryRef;
+ using RefVector = vespalib::RcuVectorBase<AtomicEntryRef>;
+
+ AttributeVector& _attr;
+ RefVector& _ref_vector;
+ RawBufferStore& _raw_store;
+
+ void load_raw_store(BlobSequenceReader& reader, uint32_t docid_limit);
+public:
+ SingleRawAttributeLoader(AttributeVector& attr, RefVector& ref_vector, RawBufferStore& raw_store);
+ ~SingleRawAttributeLoader();
+ bool on_load(vespalib::Executor*);
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.cpp b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.cpp
new file mode 100644
index 00000000000..260010a85a0
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.cpp
@@ -0,0 +1,42 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "single_raw_attribute_saver.h"
+#include "raw_buffer_store.h"
+#include "raw_buffer_store_writer.h"
+#include <vespa/searchlib/attribute/iattributesavetarget.h>
+#include <vespa/searchlib/util/bufferwriter.h>
+
+namespace search::attribute {
+
+SingleRawAttributeSaver::SingleRawAttributeSaver(vespalib::GenerationHandler::Guard &&guard,
+ const attribute::AttributeHeader &header,
+ EntryRefVector&& ref_vector,
+ const RawBufferStore& raw_store)
+ : AttributeSaver(std::move(guard), header),
+ _ref_vector(std::move(ref_vector)),
+ _raw_store(raw_store)
+{
+}
+
+SingleRawAttributeSaver::~SingleRawAttributeSaver() = default;
+
+void
+SingleRawAttributeSaver::save_raw_store(BufferWriter& writer) const
+{
+ RawBufferStoreWriter raw_writer(_raw_store, writer);
+ for (auto ref : _ref_vector) {
+ raw_writer.write(ref);
+ }
+ writer.flush();
+}
+
+bool
+SingleRawAttributeSaver::onSave(IAttributeSaveTarget &saveTarget)
+{
+ std::unique_ptr<search::BufferWriter> writer(saveTarget.datWriter().allocBufferWriter());
+ assert(!saveTarget.getEnumerated());
+ save_raw_store(*writer);
+ return true;
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.h b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.h
new file mode 100644
index 00000000000..ebcdc504231
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/attribute/single_raw_attribute_saver.h
@@ -0,0 +1,32 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "attributesaver.h"
+#include "save_utils.h"
+
+namespace search { class BufferWriter; }
+
+namespace search::attribute {
+
+class RawBufferStore;
+
+/**
+ * Class for saving a single raw attribute.
+ */
+class SingleRawAttributeSaver : public AttributeSaver
+{
+ EntryRefVector _ref_vector;
+ const RawBufferStore& _raw_store;
+
+ void save_raw_store(BufferWriter& writer) const;
+ bool onSave(IAttributeSaveTarget &saveTarget) override;
+public:
+ SingleRawAttributeSaver(vespalib::GenerationHandler::Guard &&guard,
+ const attribute::AttributeHeader &header,
+ EntryRefVector&& ref_vector,
+ const RawBufferStore& raw_store);
+ ~SingleRawAttributeSaver();
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp
index 1857e942136..1f3f3e104d1 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.cpp
@@ -4,7 +4,6 @@
#include "iattributesavetarget.h"
#include <vespa/searchlib/util/bufferwriter.h>
-
using search::attribute::EntryRefVector;
using vespalib::GenerationHandler;
@@ -14,25 +13,20 @@ SingleValueEnumAttributeSaver::
SingleValueEnumAttributeSaver(GenerationHandler::Guard &&guard,
const attribute::AttributeHeader &header,
EntryRefVector &&indices,
- const IEnumStore &enumStore)
+ IEnumStore &enumStore)
: AttributeSaver(std::move(guard), header),
_indices(std::move(indices)),
_enumSaver(enumStore)
{
}
-
-SingleValueEnumAttributeSaver::~SingleValueEnumAttributeSaver()
-{
-}
-
+SingleValueEnumAttributeSaver::~SingleValueEnumAttributeSaver() = default;
bool
SingleValueEnumAttributeSaver::onSave(IAttributeSaveTarget &saveTarget)
{
_enumSaver.writeUdat(saveTarget);
- std::unique_ptr<search::BufferWriter> datWriter(saveTarget.datWriter().
- allocBufferWriter());
+ std::unique_ptr<search::BufferWriter> datWriter(saveTarget.datWriter().allocBufferWriter());
assert(saveTarget.getEnumerated());
auto &enumerator = _enumSaver.get_enumerator();
enumerator.enumerateValues();
@@ -49,5 +43,4 @@ SingleValueEnumAttributeSaver::onSave(IAttributeSaveTarget &saveTarget)
return true;
}
-
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.h b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.h
index af83d36cbbb..7f1c62c7720 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.h
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattributesaver.h
@@ -22,7 +22,7 @@ public:
SingleValueEnumAttributeSaver(vespalib::GenerationHandler::Guard &&guard,
const attribute::AttributeHeader &header,
attribute::EntryRefVector &&indices,
- const IEnumStore &enumStore);
+ IEnumStore &enumStore);
~SingleValueEnumAttributeSaver() override;
};
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.h b/searchlib/src/vespa/searchlib/bitcompression/compression.h
index 7c5ba3e94ca..a77d82d9e8f 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.h
@@ -1248,9 +1248,7 @@ public:
void setReadContext(search::ComprFileReadContext *readContext) {
_readContext = readContext;
}
- search::ComprFileReadContext *getReadContext() const {
- return _readContext;
- }
+
void readComprBuffer() {
_readContext->readComprBuffer();
}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
index 5f5e782a382..53588fa2894 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/feature_store.h
@@ -154,12 +154,8 @@ public:
void setupForReadFeatures(vespalib::datastore::EntryRef ref, DecodeContextCooked &decoder) const {
const uint8_t * bits = getBits(ref);
decoder.setByteCompr(bits);
- uint32_t bufferId = RefType(ref).bufferId();
- const vespalib::datastore::BufferState &state = _store.getBufferState(bufferId);
- decoder.setEnd(
- ((_store.getEntryArray<uint8_t>(RefType(0, bufferId), buffer_array_size) + state.size() -
- bits) + 7) / 8,
- false);
+ constexpr uint32_t maxOffset = RefType::offsetSize() * buffer_array_size;
+ decoder.setEnd(maxOffset, false);
}
/**
diff --git a/searchsummary/src/tests/docsummary/attributedfw/attributedfw_test.cpp b/searchsummary/src/tests/docsummary/attributedfw/attributedfw_test.cpp
index bba3a5ab506..88b3423a199 100644
--- a/searchsummary/src/tests/docsummary/attributedfw/attributedfw_test.cpp
+++ b/searchsummary/src/tests/docsummary/attributedfw/attributedfw_test.cpp
@@ -23,6 +23,10 @@ using search::docsummary::test::SlimeValue;
using ElementVector = std::vector<uint32_t>;
+std::vector<char> as_vector(vespalib::stringref value) {
+ return {value.data(), value.data() + value.size()};
+}
+
class AttributeDFWTest : public ::testing::Test {
protected:
MockAttributeManager _attrs;
@@ -48,7 +52,8 @@ public:
_attrs.build_string_attribute("wset_str", { {"a", "b", "c"}, {} }, CollectionType::WSET);
_attrs.build_int_attribute("wset_int", BasicType::INT32, { {10, 20, 30}, {} }, CollectionType::WSET);
_attrs.build_float_attribute("wset_float", { {10.5, 20.5, 30.5}, {} }, CollectionType::WSET);
-
+ _attrs.build_string_attribute("single_str", { {"world"}, {}}, CollectionType::SINGLE);
+ _attrs.build_raw_attribute("single_raw", { {as_vector("hello")}, {} });
_state._attrCtx = _attrs.mgr().createContext();
}
~AttributeDFWTest() {}
@@ -59,17 +64,24 @@ public:
}
_writer = AttributeDFWFactory::create(_attrs.mgr(), field_name, filter_elements, _matching_elems_fields);
_writer->setIndex(0);
- EXPECT_TRUE(_writer->setFieldWriterStateIndex(0));
- _state._fieldWriterStates.resize(1);
+ auto attr = _state._attrCtx->getAttribute(field_name);
+ if (attr->hasMultiValue()) {
+ EXPECT_TRUE(_writer->setFieldWriterStateIndex(0));
+ _state._fieldWriterStates.resize(1);
+ } else {
+ EXPECT_FALSE(_writer->setFieldWriterStateIndex(0));
+ }
_field_name = field_name;
_state._attributes.resize(1);
- _state._attributes[0] = _state._attrCtx->getAttribute(field_name);
+ _state._attributes[0] = attr;
}
void expect_field(const vespalib::string& exp_slime_as_json, uint32_t docid) {
vespalib::Slime act;
vespalib::slime::SlimeInserter inserter(act);
- _writer->insertField(docid, nullptr, _state, inserter);
+ if (!_writer->isDefaultValue(docid, _state)) {
+ _writer->insertField(docid, nullptr, _state, inserter);
+ }
SlimeValue exp(exp_slime_as_json);
EXPECT_EQ(exp.slime, act);
@@ -150,4 +162,18 @@ TEST_F(AttributeDFWTest, filteres_matched_elements_in_wset_attribute)
expect_filtered({3}, "null");
}
+TEST_F(AttributeDFWTest, single_string)
+{
+ setup("single_str", false);
+ expect_field(R"("world")", 1);
+ expect_field("null", 2);
+}
+
+TEST_F(AttributeDFWTest, single_value_raw)
+{
+ setup("single_raw", false);
+ expect_field("x68656C6C6F", 1);
+ expect_field("null", 2);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/searchsummary/src/vespa/searchsummary/docsummary/attributedfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/attributedfw.cpp
index e606c6f08bb..6f0f6737c0f 100644
--- a/searchsummary/src/vespa/searchsummary/docsummary/attributedfw.cpp
+++ b/searchsummary/src/vespa/searchsummary/docsummary/attributedfw.cpp
@@ -101,6 +101,11 @@ SingleAttrDFW::insertField(uint32_t docid, GetDocsumsState& state, Inserter &tar
target.insertString(vespalib::Memory(s.data(), s.size()));
break;
}
+ case BasicType::RAW: {
+ auto s = v.get_raw(docid);
+ target.insertData(vespalib::Memory(s.data(), s.size()));
+ break;
+ }
case BasicType::REFERENCE:
case BasicType::PREDICATE:
break; // Should never use attribute docsum field writer
diff --git a/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.cpp b/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.cpp
index 4bfaf105501..77dc38d3057 100644
--- a/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.cpp
+++ b/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.cpp
@@ -5,12 +5,14 @@
#include <vespa/searchlib/attribute/floatbase.h>
#include <vespa/searchlib/attribute/integerbase.h>
#include <vespa/searchlib/attribute/stringbase.h>
+#include <vespa/searchlib/attribute/single_raw_attribute.h>
#include <vespa/searchcommon/attribute/config.h>
#include <cassert>
using search::attribute::BasicType;
using search::attribute::CollectionType;
using search::attribute::Config;
+using search::attribute::SingleRawAttribute;
namespace search::docsummary::test {
@@ -29,8 +31,14 @@ MockAttributeManager::build_attribute(const vespalib::string& name, BasicType ty
for (const auto& docValues : values) {
uint32_t docId = 0;
attr->addDoc(docId);
- for (const auto& value : docValues) {
- attr->append(docId, value, 1);
+ attr->clearDoc(docId);
+ if (attr->hasMultiValue()) {
+ for (const auto& value : docValues) {
+ attr->append(docId, value, 1);
+ }
+ } else if (!docValues.empty()) {
+ assert(docValues.size() == 1);
+ attr->update(docId, docValues[0]);
}
attr->commit();
}
@@ -68,4 +76,11 @@ MockAttributeManager::build_int_attribute(const vespalib::string& name, BasicTyp
build_attribute<IntegerAttribute, int64_t>(name, type, col_type, values);
}
+void
+MockAttributeManager::build_raw_attribute(const vespalib::string& name,
+ const std::vector<std::vector<std::vector<char>>>& values)
+{
+ build_attribute<SingleRawAttribute, std::vector<char>>(name, BasicType::Type::RAW, CollectionType::SINGLE, values);
+}
+
}
diff --git a/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.h b/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.h
index 0c4f3a5cc9b..6cdcfcbb6db 100644
--- a/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.h
+++ b/searchsummary/src/vespa/searchsummary/test/mock_attribute_manager.h
@@ -31,7 +31,8 @@ public:
void build_int_attribute(const vespalib::string& name, search::attribute::BasicType type,
const std::vector<std::vector<int64_t>>& values,
search::attribute::CollectionType col_type = search::attribute::CollectionType::ARRAY);
-
+ void build_raw_attribute(const vespalib::string& name,
+ const std::vector<std::vector<std::vector<char>>>& values);
};
}
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 02f75c19907..18475a8e4d9 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -145,20 +145,20 @@ org.codehaus.plexus:plexus-sec-dispatcher:2.0
org.codehaus.plexus:plexus-utils:3.3.1
org.eclipse.collections:eclipse-collections:11.0.0
org.eclipse.collections:eclipse-collections-api:11.0.0
-org.eclipse.jetty:jetty-alpn-client:11.0.13
-org.eclipse.jetty:jetty-alpn-java-server:11.0.13
-org.eclipse.jetty:jetty-alpn-server:11.0.13
-org.eclipse.jetty:jetty-client:11.0.13
-org.eclipse.jetty:jetty-http:11.0.13
-org.eclipse.jetty:jetty-io:11.0.13
-org.eclipse.jetty:jetty-jmx:11.0.13
-org.eclipse.jetty:jetty-security:11.0.13
-org.eclipse.jetty:jetty-server:11.0.13
-org.eclipse.jetty:jetty-servlet:11.0.13
-org.eclipse.jetty:jetty-util:11.0.13
-org.eclipse.jetty.http2:http2-common:11.0.13
-org.eclipse.jetty.http2:http2-hpack:11.0.13
-org.eclipse.jetty.http2:http2-server:11.0.13
+org.eclipse.jetty:jetty-alpn-client:11.0.14
+org.eclipse.jetty:jetty-alpn-java-server:11.0.14
+org.eclipse.jetty:jetty-alpn-server:11.0.14
+org.eclipse.jetty:jetty-client:11.0.14
+org.eclipse.jetty:jetty-http:11.0.14
+org.eclipse.jetty:jetty-io:11.0.14
+org.eclipse.jetty:jetty-jmx:11.0.14
+org.eclipse.jetty:jetty-security:11.0.14
+org.eclipse.jetty:jetty-server:11.0.14
+org.eclipse.jetty:jetty-servlet:11.0.14
+org.eclipse.jetty:jetty-util:11.0.14
+org.eclipse.jetty.http2:http2-common:11.0.14
+org.eclipse.jetty.http2:http2-hpack:11.0.14
+org.eclipse.jetty.http2:http2-server:11.0.14
org.eclipse.jetty.toolchain:jetty-jakarta-servlet-api:5.0.2
org.eclipse.sisu:org.eclipse.sisu.inject:0.3.5
org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.5
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index 373928a3e22..fe1e2b46830 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -171,6 +171,8 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static final String SLICES = "slices";
private static final String SLICE_ID = "sliceId";
private static final String DRY_RUN = "dryRun";
+ private static final String FROM_TIMESTAMP = "fromTimestamp";
+ private static final String TO_TIMESTAMP = "toTimestamp";
private final Clock clock;
private final Duration handlerTimeout;
@@ -1227,6 +1229,12 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
getProperty(request, CONTINUATION).map(ProgressToken::fromSerializedString).ifPresent(parameters::setResumeToken);
parameters.setPriority(DocumentProtocol.Priority.NORMAL_4);
+ getProperty(request, FROM_TIMESTAMP, unsignedLongParser).ifPresent(parameters::setFromTimestamp);
+ getProperty(request, TO_TIMESTAMP, unsignedLongParser).ifPresent(parameters::setToTimestamp);
+ if (Long.compareUnsigned(parameters.getFromTimestamp(), parameters.getToTimestamp()) > 0) {
+ throw new IllegalArgumentException("toTimestamp must be greater than, or equal to, fromTimestamp");
+ }
+
StorageCluster storageCluster = resolveCluster(cluster, clusters);
parameters.setRoute(storageCluster.name());
parameters.setBucketSpace(resolveBucket(storageCluster,
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index b6ad7ba5570..851a0949266 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -223,6 +223,8 @@ public class DocumentV1ApiTest {
assertEquals("(all the things)", parameters.getDocumentSelection());
assertEquals(6000, parameters.getSessionTimeoutMs());
assertEquals(9, parameters.getTraceLevel());
+ assertEquals(1_000_000, parameters.getFromTimestamp());
+ assertEquals(2_000_000, parameters.getToTimestamp());
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
@@ -234,7 +236,7 @@ public class DocumentV1ApiTest {
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.TIMEOUT, "timeout is OK");
});
response = driver.sendRequest("http://localhost/document/v1?cluster=content&bucketSpace=default&wantedDocumentCount=1025&concurrency=123" +
- "&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9");
+ "&selection=all%20the%20things&fieldSet=[id]&timeout=6&tracelevel=9&fromTimestamp=1000000&toTimestamp=2000000");
assertSameJson("""
{
"pathId": "/document/v1",
@@ -284,6 +286,8 @@ public class DocumentV1ApiTest {
assertEquals(6000, parameters.getTimeoutMs());
assertEquals(4, parameters.getSlices());
assertEquals(1, parameters.getSliceId());
+ assertEquals(0, parameters.getFromTimestamp()); // not set; 0 is default
+ assertEquals(0, parameters.getToTimestamp()); // not set; 0 is default
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
@@ -500,6 +504,15 @@ public class DocumentV1ApiTest {
"}", response.readAll());
assertEquals(200, response.getStatus());
+ // GET with from timestamp > to timestamp is an error
+ access.expect(parameters -> { fail("unreachable"); });
+ response = driver.sendRequest("http://localhost/document/v1/?cluster=content&fromTimestamp=100&toTimestamp=99");
+ assertSameJson("{" +
+ " \"pathId\": \"/document/v1/\"," +
+ " \"message\": \"toTimestamp must be greater than, or equal to, fromTimestamp\"" +
+ "}", response.readAll());
+ assertEquals(400, response.getStatus());
+
// GET with full document ID is a document get operation which returns 404 when no document is found
access.session.expect((id, parameters) -> {
assertEquals(doc1.getId(), id);
diff --git a/vespalib/src/tests/btree/btree_test.cpp b/vespalib/src/tests/btree/btree_test.cpp
index 2a465f2c60a..ef64549e16a 100644
--- a/vespalib/src/tests/btree/btree_test.cpp
+++ b/vespalib/src/tests/btree/btree_test.cpp
@@ -1064,6 +1064,7 @@ adjustAllocatedBytes(size_t nodeCount, size_t nodeSize)
TEST_F(BTreeTest, require_that_memory_usage_is_calculated)
{
+ constexpr size_t BASE = 163912;
typedef BTreeNodeAllocator<int32_t, int8_t,
btree::NoAggregated,
MyTraits::INTERNAL_SLOTS, MyTraits::LEAF_SLOTS> NodeAllocator;
@@ -1082,6 +1083,8 @@ TEST_F(BTreeTest, require_that_memory_usage_is_calculated)
const uint32_t initialLeafNodes = 128u;
mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode)));
mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode)));
+ mu.incAllocatedBytes(BASE);
+ mu.incUsedBytes(BASE);
mu.incUsedBytes(sizeof(INode));
mu.incDeadBytes(sizeof(INode));
EXPECT_TRUE(assertMemoryUsage(mu, tm.getMemoryUsage()));
@@ -1112,6 +1115,8 @@ TEST_F(BTreeTest, require_that_memory_usage_is_calculated)
mu = vespalib::MemoryUsage();
mu.incAllocatedBytes(adjustAllocatedBytes(initialInternalNodes, sizeof(INode)));
mu.incAllocatedBytes(adjustAllocatedBytes(initialLeafNodes, sizeof(LNode)));
+ mu.incAllocatedBytes(BASE);
+ mu.incUsedBytes(BASE);
mu.incUsedBytes(sizeof(INode) * 2);
mu.incDeadBytes(sizeof(INode) * 2);
mu.incUsedBytes(sizeof(LNode));
diff --git a/vespalib/src/tests/datastore/array_store/array_store_test.cpp b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
index 063ac445933..97e1ddb985d 100644
--- a/vespalib/src/tests/datastore/array_store/array_store_test.cpp
+++ b/vespalib/src/tests/datastore/array_store/array_store_test.cpp
@@ -97,13 +97,13 @@ struct ArrayStoreTest : public TestT
uint32_t getBufferId(EntryRef ref) const {
return EntryRefType(ref).bufferId();
}
- void assertBufferState(EntryRef ref, const MemStats& expStats) const {
+ void assertBufferState(EntryRef ref, const MemStats& expStats) {
EXPECT_EQ(expStats._used, store.bufferState(ref).size());
EXPECT_EQ(expStats._hold, store.bufferState(ref).stats().hold_elems());
EXPECT_EQ(expStats._dead, store.bufferState(ref).stats().dead_elems());
}
- void assert_buffer_stats(EntryRef ref, const TestBufferStats& exp_stats) const {
- auto& state = store.bufferState(ref);
+ void assert_buffer_stats(EntryRef ref, const TestBufferStats& exp_stats) {
+ const auto& state = store.bufferState(ref);
EXPECT_EQ(exp_stats._used, state.size());
EXPECT_EQ(exp_stats._hold, state.stats().hold_elems());
EXPECT_EQ(exp_stats._dead, state.stats().dead_elems());
@@ -218,8 +218,8 @@ TEST_P(NumberStoreTest, control_static_sizes) {
#endif
EXPECT_EQ(112u, sizeof(NumberStoreTest::ArrayStoreType::SmallBufferType));
MemoryUsage usage = store.getMemoryUsage();
- EXPECT_EQ(960u, usage.allocatedBytes());
- EXPECT_EQ(32u, usage.usedBytes());
+ EXPECT_EQ(1312160u, usage.allocatedBytes());
+ EXPECT_EQ(1311232u, usage.usedBytes());
}
TEST_P(NumberStoreTest, add_and_get_small_arrays_of_trivial_type)
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index bffd32816a4..794be39ae9b 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -474,6 +474,7 @@ TEST(DataStoreTest, require_that_memory_stats_are_calculated)
TEST(DataStoreTest, require_that_memory_usage_is_calculated)
{
+ constexpr size_t BASE = 676;
MyStore s;
MyRef r = s.addEntry(10);
s.addEntry(20);
@@ -482,8 +483,8 @@ TEST(DataStoreTest, require_that_memory_usage_is_calculated)
s.holdBuffer(r.bufferId());
s.assign_generation(100);
vespalib::MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQ(5 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE, m.allocatedBytes());
+ EXPECT_EQ(5 * sizeof(int) + BASE, m.usedBytes());
EXPECT_EQ(0 * sizeof(int), m.deadBytes());
EXPECT_EQ(5 * sizeof(int), m.allocatedBytesOnHold());
s.reclaim_memory(101);
@@ -491,27 +492,28 @@ TEST(DataStoreTest, require_that_memory_usage_is_calculated)
TEST(DataStoreTest, require_that_we_can_disable_elemement_hold_list)
{
+ constexpr size_t BASE = 676;
MyStore s;
MyRef r1 = s.addEntry(10);
MyRef r2 = s.addEntry(20);
MyRef r3 = s.addEntry(30);
(void) r3;
vespalib::MemoryUsage m = s.getMemoryUsage();
- EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE, m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int) + BASE, m.usedBytes());
EXPECT_EQ(1 * sizeof(int), m.deadBytes());
EXPECT_EQ(0 * sizeof(int), m.allocatedBytesOnHold());
s.holdElem(r1, 1);
m = s.getMemoryUsage();
- EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE, m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int) + BASE, m.usedBytes());
EXPECT_EQ(1 * sizeof(int), m.deadBytes());
EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.disableElemHoldList();
s.holdElem(r2, 1);
m = s.getMemoryUsage();
- EXPECT_EQ(MyRef::offsetSize() * sizeof(int), m.allocatedBytes());
- EXPECT_EQ(4 * sizeof(int), m.usedBytes());
+ EXPECT_EQ(MyRef::offsetSize() * sizeof(int) + BASE, m.allocatedBytes());
+ EXPECT_EQ(4 * sizeof(int) + BASE, m.usedBytes());
EXPECT_EQ(2 * sizeof(int), m.deadBytes());
EXPECT_EQ(1 * sizeof(int), m.allocatedBytesOnHold());
s.assign_generation(100);
@@ -536,30 +538,31 @@ void assertGrowStats(GrowthStats expSizes,
TEST(DataStoreTest, require_that_buffer_growth_works)
{
+ constexpr size_t BASE = 41032u;
// Always switch to new buffer, min size 4
assertGrowStats({ 4, 4, 4, 4, 8, 16, 16, 32, 64, 64 },
- { 4 }, 20, 4, 0);
+ { 4 }, 20 + BASE, 4, 0);
// Resize if buffer size is less than 4, min size 0
assertGrowStats({ 4, 4, 8, 32, 32, 64, 64, 128, 128, 128 },
- { 0, 1, 2, 4 }, 4, 0, 4);
+ { 0, 1, 2, 4 }, 4 + BASE, 0, 4);
// Always switch to new buffer, min size 16
assertGrowStats({ 16, 16, 16, 32, 32, 64, 128, 128, 128 },
- { 16 }, 68, 16, 0);
+ { 16 }, 68 + BASE, 16, 0);
// Resize if buffer size is less than 16, min size 0
assertGrowStats({ 16, 32, 32, 128, 128, 128, 128, 128, 128 },
- { 0, 1, 2, 4, 8, 16 }, 4, 0, 16);
+ { 0, 1, 2, 4, 8, 16 }, 4 + BASE, 0, 16);
// Resize if buffer size is less than 16, min size 4
assertGrowStats({ 16, 32, 32, 128, 128, 128, 128, 128, 128 },
- { 4, 8, 16 }, 20, 4, 16);
+ { 4, 8, 16 }, 20 + BASE, 4, 16);
// Always switch to new buffer, min size 0
assertGrowStats({ 1, 1, 1, 1, 1, 2, 2, 4, 8, 8, 16, 32 },
- { 0, 1 }, 4, 0, 0);
+ { 0, 1 }, 4 + BASE, 0, 0);
// Buffers with sizes larger than the huge page size of the mmap allocator.
ASSERT_EQ(524288u, HUGE_PAGE_ARRAY_SIZE);
assertGrowStats({ 262144, 524288, 524288, 524288 * 3, 524288 * 3, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5, 524288 * 5 },
{ 0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144 },
- 4, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
+ 4 + BASE, 0, HUGE_PAGE_ARRAY_SIZE / 2, HUGE_PAGE_ARRAY_SIZE * 5);
}
using RefType15 = EntryRefT<15>; // offsetSize=32768
diff --git a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
index 76dfdb79c01..cf62d238d53 100644
--- a/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store/unique_store_test.cpp
@@ -471,10 +471,13 @@ TEST_F(DoubleTest, nan_is_handled)
TEST_F(DoubleTest, control_memory_usage) {
EXPECT_EQ(464, sizeof(store));
- EXPECT_EQ(32u, store.get_values_memory_usage().allocatedBytes());
- EXPECT_EQ(98208u, store.get_dictionary_memory_usage().allocatedBytes());
- EXPECT_EQ(98240u, store.getMemoryUsage().allocatedBytes());
EXPECT_EQ(144u, sizeof(BufferState));
+ EXPECT_EQ(163908u, store.get_values_memory_usage().allocatedBytes());
+ EXPECT_EQ(163892u, store.get_values_memory_usage().usedBytes());
+ EXPECT_EQ(262120u, store.get_dictionary_memory_usage().allocatedBytes());
+ EXPECT_EQ(164176u, store.get_dictionary_memory_usage().usedBytes());
+ EXPECT_EQ(426028u, store.getMemoryUsage().allocatedBytes());
+ EXPECT_EQ(328068u, store.getMemoryUsage().usedBytes());
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
index 64e0c1599bd..7d4451556c8 100644
--- a/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
+++ b/vespalib/src/tests/datastore/unique_store_string_allocator/unique_store_string_allocator_test.cpp
@@ -56,11 +56,11 @@ struct TestBase : public ::testing::Test {
uint32_t get_buffer_id(EntryRef ref) const {
return EntryRefType(ref).bufferId();
}
- const BufferState &buffer_state(EntryRef ref) const {
+ BufferState &buffer_state(EntryRef ref) {
return allocator.get_data_store().getBufferState(get_buffer_id(ref));
}
- void assert_buffer_state(EntryRef ref, const TestBufferStats expStats) const {
- const auto & stats = buffer_state(ref).stats();
+ void assert_buffer_state(EntryRef ref, const TestBufferStats expStats) {
+ auto & stats = buffer_state(ref).stats();
EXPECT_EQ(expStats._used, buffer_state(ref).size());
EXPECT_EQ(expStats._hold, stats.hold_elems());
EXPECT_EQ(expStats._dead, stats.dead_elems());
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
index 3fa5f1188cd..784e95e3817 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodeallocator.h
@@ -163,7 +163,7 @@ public:
vespalib::string toString(BTreeNode::Ref ref) const;
vespalib::string toString(const BTreeNode * node) const;
- bool getCompacting(EntryRef ref) const { return _nodeStore.getCompacting(ref); }
+ bool getCompacting(EntryRef ref) { return _nodeStore.getCompacting(ref); }
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst(const CompactionStrategy& compaction_strategy) { return _nodeStore.start_compact_worst(compaction_strategy); }
diff --git a/vespalib/src/vespa/vespalib/btree/btreenodestore.h b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
index 8fef0185674..73e68c3579f 100644
--- a/vespalib/src/vespa/vespalib/btree/btreenodestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreenodestore.h
@@ -182,7 +182,7 @@ public:
}
// Inherit doc from DataStoreT
- bool getCompacting(EntryRef ref) const {
+ bool getCompacting(EntryRef ref) {
return _store.getCompacting(ref);
}
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.h b/vespalib/src/vespa/vespalib/btree/btreestore.h
index c228c084e6d..9d98a9ca514 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.h
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.h
@@ -95,202 +95,90 @@ protected:
public:
BTreeStore();
-
BTreeStore(bool init);
-
~BTreeStore();
const NodeAllocatorType &getAllocator() const { return _allocator; }
- void
- disableFreeLists() {
+ void disableFreeLists() {
_store.disableFreeLists();
_allocator.disableFreeLists();
}
- void
- disableElemHoldList()
- {
+ void disableElemHoldList() {
_store.disableElemHoldList();
_allocator.disableElemHoldList();
}
- BTreeTypeRefPair
- allocNewBTree() {
+ BTreeTypeRefPair allocNewBTree() {
return _store.allocator<BTreeType>(BUFFERTYPE_BTREE).alloc();
}
- BTreeTypeRefPair
- allocBTree() {
+ BTreeTypeRefPair allocBTree() {
return _store.freeListAllocator<BTreeType, TreeReclaimer>(BUFFERTYPE_BTREE).alloc();
}
- BTreeTypeRefPair
- allocNewBTreeCopy(const BTreeType &rhs) {
+ BTreeTypeRefPair allocNewBTreeCopy(const BTreeType &rhs) {
return _store.allocator<BTreeType>(BUFFERTYPE_BTREE).alloc(rhs);
}
- BTreeTypeRefPair
- allocBTreeCopy(const BTreeType &rhs) {
+ BTreeTypeRefPair allocBTreeCopy(const BTreeType &rhs) {
return _store.freeListAllocator<BTreeType, datastore::DefaultReclaimer<BTreeType> >(BUFFERTYPE_BTREE).alloc(rhs);
}
- KeyDataTypeRefPair
- allocNewKeyData(uint32_t clusterSize);
-
- KeyDataTypeRefPair
- allocKeyData(uint32_t clusterSize);
-
- KeyDataTypeRefPair
- allocNewKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize);
-
- KeyDataTypeRefPair
- allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize);
-
- const KeyDataType *
- lower_bound(const KeyDataType *b, const KeyDataType *e,
- const KeyType &key, CompareT comp);
-
- void
- makeTree(EntryRef &ref,
- const KeyDataType *array, uint32_t clusterSize);
-
- void
- makeArray(EntryRef &ref, EntryRef leafRef, LeafNodeType *leafNode);
-
- bool
- insert(EntryRef &ref,
- const KeyType &key, const DataType &data,
- CompareT comp = CompareT());
-
- bool
- remove(EntryRef &ref,
- const KeyType &key,
- CompareT comp = CompareT());
-
- uint32_t
- getNewClusterSize(const KeyDataType *o,
- const KeyDataType *oe,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
- void
- applyCluster(const KeyDataType *o,
- const KeyDataType *oe,
- KeyDataType *d,
- const KeyDataType *de,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
-
- void
- applyModifyTree(BTreeType *tree,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
- void
- applyBuildTree(BTreeType *tree,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
- void
- applyNewArray(EntryRef &ref,
- AddIter aOrg,
- AddIter ae);
-
- void
- applyNewTree(EntryRef &ref,
- AddIter a,
- AddIter ae,
- CompareT comp);
-
- void
- applyNew(EntryRef &ref,
- AddIter a,
- AddIter ae,
- CompareT comp);
-
-
- bool
- applyCluster(EntryRef &ref,
- uint32_t clusterSize,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
- void
- applyTree(BTreeType *tree,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp);
-
- void
- normalizeTree(EntryRef &ref,
- BTreeType *tree,
- bool wasArray);
+ KeyDataTypeRefPair allocNewKeyData(uint32_t clusterSize);
+ KeyDataTypeRefPair allocKeyData(uint32_t clusterSize);
+ KeyDataTypeRefPair allocNewKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize);
+ KeyDataTypeRefPair allocKeyDataCopy(const KeyDataType *rhs, uint32_t clusterSize);
+
+ const KeyDataType * lower_bound(const KeyDataType *b, const KeyDataType *e, const KeyType &key, CompareT comp);
+
+ void makeTree(EntryRef &ref, const KeyDataType *array, uint32_t clusterSize);
+ void makeArray(EntryRef &ref, EntryRef leafRef, LeafNodeType *leafNode);
+ bool insert(EntryRef &ref, const KeyType &key, const DataType &data, CompareT comp = CompareT());
+
+ bool remove(EntryRef &ref, const KeyType &key,CompareT comp = CompareT());
+
+ uint32_t getNewClusterSize(const KeyDataType *o, const KeyDataType *oe, AddIter a, AddIter ae,
+ RemoveIter r, RemoveIter re, CompareT comp);
+
+ void applyCluster(const KeyDataType *o, const KeyDataType *oe, KeyDataType *d, const KeyDataType *de,
+ AddIter a, AddIter ae, RemoveIter r, RemoveIter re, CompareT comp);
+
+ void applyModifyTree(BTreeType *tree, AddIter a, AddIter ae, RemoveIter r, RemoveIter re, CompareT comp);
+ void applyBuildTree(BTreeType *tree, AddIter a, AddIter ae, RemoveIter r, RemoveIter re, CompareT comp);
+ void applyNewArray(EntryRef &ref, AddIter aOrg, AddIter ae);
+ void applyNewTree(EntryRef &ref, AddIter a, AddIter ae, CompareT comp);
+ void applyNew(EntryRef &ref, AddIter a, AddIter ae, CompareT comp);
+
+ bool applyCluster(EntryRef &ref, uint32_t clusterSize, AddIter a, AddIter ae,
+ RemoveIter r, RemoveIter re, CompareT comp);
+
+ void applyTree(BTreeType *tree, AddIter a, AddIter ae, RemoveIter r, RemoveIter re, CompareT comp);
+
+ void normalizeTree(EntryRef &ref, BTreeType *tree, bool wasArray);
/**
* Apply multiple changes at once.
*
* additions and removals should be sorted on key without duplicates.
* Overlap between additions and removals indicates updates.
*/
- void
- apply(EntryRef &ref,
- AddIter a,
- AddIter ae,
- RemoveIter r,
- RemoveIter re,
- CompareT comp = CompareT());
-
- void
- clear(const EntryRef ref);
-
- size_t
- size(const EntryRef ref) const;
+ void apply(EntryRef &ref, AddIter a, AddIter ae, RemoveIter r, RemoveIter re, CompareT comp = CompareT());
- size_t
- frozenSize(const EntryRef ref) const;
+ void clear(const EntryRef ref);
+ size_t size(const EntryRef ref) const;
+ size_t frozenSize(const EntryRef ref) const;
+ Iterator begin(const EntryRef ref) const;
+ ConstIterator beginFrozen(const EntryRef ref) const;
- Iterator
- begin(const EntryRef ref) const;
+ void beginFrozen(const EntryRef ref, std::vector<ConstIterator> &where) const;
- ConstIterator
- beginFrozen(const EntryRef ref) const;
-
- void
- beginFrozen(const EntryRef ref, std::vector<ConstIterator> &where) const;
-
- uint32_t
- getTypeId(RefType ref) const
- {
- return _store.getBufferState(ref.bufferId()).getTypeId();
+ uint32_t getTypeId(RefType ref) const {
+ return _store.getBufferMeta(ref.bufferId()).getTypeId();
}
- static bool
- isSmallArray(uint32_t typeId)
- {
- return typeId < clusterLimit;
- }
-
- bool
- isSmallArray(const EntryRef ref) const;
-
+ static bool isSmallArray(uint32_t typeId) { return typeId < clusterLimit; }
+ bool isSmallArray(const EntryRef ref) const;
static bool isBTree(uint32_t typeId) { return typeId == BUFFERTYPE_BTREE; }
bool isBTree(RefType ref) const { return isBTree(getTypeId(ref)); }
@@ -299,9 +187,7 @@ public:
* Cluster size == 0 means we have a tree for the given reference.
* The reference must be valid.
**/
- static uint32_t
- getClusterSize(uint32_t typeId)
- {
+ static uint32_t getClusterSize(uint32_t typeId) {
return (typeId < clusterLimit) ? typeId + 1 : 0;
}
@@ -310,11 +196,7 @@ public:
* Cluster size == 0 means we have a tree for the given reference.
* The reference must be valid.
**/
- uint32_t
- getClusterSize(RefType ref) const
- {
- return getClusterSize(getTypeId(ref));
- }
+ uint32_t getClusterSize(RefType ref) const { return getClusterSize(getTypeId(ref)); }
const BTreeType * getTreeEntry(RefType ref) const {
return _store.getEntry<BTreeType>(ref);
@@ -329,24 +211,18 @@ public:
}
// Inherit doc from DataStoreBase
- void
- reclaim_memory(generation_t oldest_used_gen)
- {
+ void reclaim_memory(generation_t oldest_used_gen) {
_allocator.reclaim_memory(oldest_used_gen);
_store.reclaim_memory(oldest_used_gen);
}
// Inherit doc from DataStoreBase
- void
- assign_generation(generation_t current_gen)
- {
+ void assign_generation(generation_t current_gen) {
_allocator.assign_generation(current_gen);
_store.assign_generation(current_gen);
}
- void
- reclaim_all_memory()
- {
+ void reclaim_all_memory() {
_allocator.reclaim_all_memory();
_store.reclaim_all_memory();
}
@@ -360,30 +236,23 @@ public:
return usage;
}
- void
- clearBuilder()
- {
+ void clearBuilder() {
_builder.clear();
}
- AggregatedType
- getAggregated(const EntryRef ref) const;
+ AggregatedType getAggregated(const EntryRef ref) const;
template <typename FunctionType>
- void
- foreach_unfrozen_key(EntryRef ref, FunctionType func) const;
+ void foreach_unfrozen_key(EntryRef ref, FunctionType func) const;
template <typename FunctionType>
- void
- foreach_frozen_key(EntryRef ref, FunctionType func) const;
+ void foreach_frozen_key(EntryRef ref, FunctionType func) const;
template <typename FunctionType>
- void
- foreach_unfrozen(EntryRef ref, FunctionType func) const;
+ void foreach_unfrozen(EntryRef ref, FunctionType func) const;
template <typename FunctionType>
- void
- foreach_frozen(EntryRef ref, FunctionType func) const;
+ void foreach_frozen(EntryRef ref, FunctionType func) const;
std::unique_ptr<vespalib::datastore::CompactingBuffers> start_compact_worst_btree_nodes(const CompactionStrategy& compaction_strategy);
void move_btree_nodes(const std::vector<EntryRef>& refs);
@@ -394,12 +263,10 @@ public:
private:
static constexpr size_t MIN_BUFFER_ARRAYS = 128u;
template <typename FunctionType, bool Frozen>
- void
- foreach_key(EntryRef ref, FunctionType func) const;
+ void foreach_key(EntryRef ref, FunctionType func) const;
template <typename FunctionType, bool Frozen>
- void
- foreach(EntryRef ref, FunctionType func) const;
+ void foreach(EntryRef ref, FunctionType func) const;
};
template <typename KeyT, typename DataT, typename AggrT, typename CompareT,
diff --git a/vespalib/src/vespa/vespalib/btree/btreestore.hpp b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
index 6b2c4d924cd..a19d0b34aa6 100644
--- a/vespalib/src/vespa/vespalib/btree/btreestore.hpp
+++ b/vespalib/src/vespa/vespalib/btree/btreestore.hpp
@@ -850,7 +850,7 @@ isSmallArray(const EntryRef ref) const
if (!ref.valid())
return true;
RefType iRef(ref);
- uint32_t typeId(_store.getBufferState(iRef.bufferId()).getTypeId());
+ uint32_t typeId(_store.getBufferMeta(iRef.bufferId()).getTypeId());
return typeId < clusterLimit;
}
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.h b/vespalib/src/vespa/vespalib/datastore/array_store.h
index d0ac6a216db..dd786e5f2e2 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.h
@@ -42,14 +42,14 @@ public:
using SmallBufferType = typename TypeMapperT::SmallBufferType;
using TypeMapper = TypeMapperT;
private:
- uint32_t _largeArrayTypeId;
- uint32_t _maxSmallArrayTypeId;
- size_t _maxSmallArraySize;
- DataStoreType _store;
- TypeMapper _mapper;
+ uint32_t _largeArrayTypeId;
+ uint32_t _maxSmallArrayTypeId;
+ size_t _maxSmallArraySize;
+ DataStoreType _store;
+ TypeMapper _mapper;
std::vector<SmallBufferType> _smallArrayTypes;
- LargeBufferType _largeArrayType;
- CompactionSpec _compaction_spec;
+ LargeBufferType _largeArrayType;
+ CompactionSpec _compaction_spec;
using generation_t = vespalib::GenerationHandler::generation_t;
void initArrayTypes(const ArrayStoreConfig &cfg, std::shared_ptr<alloc::MemoryAllocator> memory_allocator);
@@ -139,7 +139,7 @@ public:
static DataStoreBase& get_data_store_base(ArrayStore &self) { return self._store; }
// Should only be used for unit testing
- const BufferState &bufferState(EntryRef ref) const;
+ const BufferState &bufferState(EntryRef ref);
bool has_free_lists_enabled() const { return _store.has_free_lists_enabled(); }
bool has_held_buffers() const noexcept { return _store.has_held_buffers(); }
diff --git a/vespalib/src/vespa/vespalib/datastore/array_store.hpp b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
index a0fdf3f6563..301cff1e414 100644
--- a/vespalib/src/vespa/vespalib/datastore/array_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/array_store.hpp
@@ -59,9 +59,9 @@ ArrayStore<EntryT, RefT, TypeMapperT>::ArrayStore(const ArrayStoreConfig &cfg, s
template <typename EntryT, typename RefT, typename TypeMapperT>
vespalib::MemoryUsage
ArrayStore<EntryT, RefT, TypeMapperT>::getMemoryUsage() const {
- vespalib::MemoryUsage usage = _store.getMemoryUsage();
- //TODO Must be accounted
- // usage.incAllocatedBytes(_smallArrayTypes.capacity() * sizeof(SmallBufferType));
+ vespalib::MemoryUsage usage = _store.getMemoryUsage();
+ usage.incAllocatedBytes(_smallArrayTypes.capacity() * sizeof(SmallBufferType));
+ usage.incUsedBytes(_smallArrayTypes.size() * sizeof(SmallBufferType));
return usage;
}
@@ -197,7 +197,7 @@ ArrayStore<EntryT, RefT, TypeMapperT>::update_stat(const CompactionStrategy& com
template <typename EntryT, typename RefT, typename TypeMapperT>
const BufferState &
-ArrayStore<EntryT, RefT, TypeMapperT>::bufferState(EntryRef ref) const
+ArrayStore<EntryT, RefT, TypeMapperT>::bufferState(EntryRef ref)
{
RefT internalRef(ref);
return _store.getBufferState(internalRef.bufferId());
diff --git a/vespalib/src/vespa/vespalib/datastore/bufferstate.h b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
index 3f023b41c51..aa7f6dfdfa4 100644
--- a/vespalib/src/vespa/vespalib/datastore/bufferstate.h
+++ b/vespalib/src/vespa/vespalib/datastore/bufferstate.h
@@ -40,14 +40,14 @@ public:
private:
InternalBufferStats _stats;
- BufferFreeList _free_list;
+ BufferFreeList _free_list;
std::atomic<BufferTypeBase*> _typeHandler;
- Alloc _buffer;
- uint32_t _arraySize;
- uint16_t _typeId;
+ Alloc _buffer;
+ uint32_t _arraySize;
+ uint16_t _typeId;
std::atomic<State> _state;
- bool _disableElemHoldList : 1;
- bool _compacting : 1;
+ bool _disableElemHoldList : 1;
+ bool _compacting : 1;
public:
/**
@@ -130,7 +130,27 @@ public:
BufferTypeBase *getTypeHandler() { return _typeHandler.load(std::memory_order_relaxed); }
void resume_primary_buffer(uint32_t buffer_id);
+};
+class BufferAndMeta {
+public:
+ BufferAndMeta() : BufferAndMeta(nullptr, 0, 0) { }
+ BufferAndMeta(void* buffer, uint32_t typeId, uint32_t arraySize)
+ : _buffer(buffer),
+ _typeId(typeId),
+ _arraySize(arraySize)
+ { }
+ std::atomic<void*>& get_atomic_buffer() noexcept { return _buffer; }
+ void* get_buffer_relaxed() noexcept { return _buffer.load(std::memory_order_relaxed); }
+ const void* get_buffer_acquire() const noexcept { return _buffer.load(std::memory_order_acquire); }
+ uint32_t getTypeId() const { return _typeId; }
+ uint32_t getArraySize() const { return _arraySize; }
+ void setTypeId(uint32_t typeId) { _typeId = typeId; }
+ void setArraySize(uint32_t arraySize) { _arraySize = arraySize; }
+private:
+ std::atomic<void*> _buffer;
+ uint32_t _typeId;
+ uint32_t _arraySize;
};
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastore.h b/vespalib/src/vespa/vespalib/datastore/datastore.h
index f0ca9c90700..01b81d0fa58 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastore.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastore.h
@@ -49,7 +49,7 @@ public:
void reclaim_all_entry_refs() override;
- bool getCompacting(EntryRef ref) const {
+ bool getCompacting(EntryRef ref) {
return getBufferState(RefType(ref).bufferId()).getCompacting();
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 6bf5f4c16c4..99bdb19576f 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -106,15 +106,8 @@ DataStoreBase::~DataStoreBase()
void
DataStoreBase::switch_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
- size_t buffer_id = primary_buffer_id(typeId);
- for (size_t i = 0; i < getNumBuffers(); ++i) {
- // start using next buffer
- buffer_id = nextBufferId(buffer_id);
- if (getBufferState(buffer_id).isFree()) {
- break;
- }
- }
- if (!getBufferState(buffer_id).isFree()) {
+ size_t buffer_id = getFirstFreeBufferId();
+ if ((buffer_id < _states.size()) && !getBufferState(buffer_id).isFree()) {
LOG_ABORT(vespalib::make_string("switch_primary_buffer(%u, %zu): did not find a free buffer",
typeId, elemsNeeded).c_str());
}
@@ -164,6 +157,23 @@ DataStoreBase::consider_grow_active_buffer(uint32_t type_id, size_t elems_needed
return true;
}
+uint32_t
+DataStoreBase::getFirstFreeBufferId() {
+ for (uint32_t buffer_id = 0; buffer_id < _states.size(); buffer_id++) {
+ if (getBufferState(buffer_id).isFree()) {
+ return buffer_id;
+ }
+ }
+ // Need next(new) buffer
+ return _states.size();
+}
+
+BufferState &
+DataStoreBase::getBufferState(uint32_t buffer_id) noexcept {
+ assert(buffer_id < _states.size());
+ return _states[buffer_id];
+}
+
void
DataStoreBase::switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded)
{
@@ -191,15 +201,8 @@ DataStoreBase::init_primary_buffers()
{
uint32_t numTypes = _primary_buffer_ids.size();
for (uint32_t typeId = 0; typeId < numTypes; ++typeId) {
- size_t buffer_id = 0;
- for (size_t i = 0; i < getNumBuffers(); ++i) {
- if (getBufferState(buffer_id).isFree()) {
- break;
- }
- // start using next buffer
- buffer_id = nextBufferId(buffer_id);
- }
- assert(getBufferState(buffer_id).isFree());
+ size_t buffer_id = getFirstFreeBufferId();
+ assert((buffer_id == _states.size()) || getBufferState(buffer_id).isFree());
onActive(buffer_id, typeId, 0u);
_primary_buffer_ids[typeId] = buffer_id;
}
@@ -258,18 +261,9 @@ DataStoreBase::dropBuffers()
}
vespalib::MemoryUsage
-DataStoreBase::getMemoryUsage() const
+DataStoreBase::getDynamicMemoryUsage() const
{
auto stats = getMemStats();
-
- size_t extra = 0;
- extra += _buffers.capacity() * sizeof(BufferAndTypeId);
- extra += _primary_buffer_ids.capacity() * sizeof(uint32_t);
- extra += _states.capacity() * sizeof(BufferState);
- extra += _typeHandlers.capacity() * sizeof(BufferTypeBase *);
- extra += _free_lists.capacity() * sizeof(FreeList);
- (void) extra; //TODO Must be accounted as static cost
-
vespalib::MemoryUsage usage;
usage.setAllocatedBytes(stats._allocBytes);
usage.setUsedBytes(stats._usedBytes);
@@ -278,6 +272,27 @@ DataStoreBase::getMemoryUsage() const
return usage;
}
+vespalib::MemoryUsage
+DataStoreBase::getMemoryUsage() const {
+ auto usage = getDynamicMemoryUsage();
+ size_t extra_allocated = 0;
+ extra_allocated += _buffers.capacity() * sizeof(BufferAndMeta);
+ extra_allocated += _primary_buffer_ids.capacity() * sizeof(uint32_t);
+ extra_allocated += _states.capacity() * sizeof(BufferState);
+ extra_allocated += _typeHandlers.capacity() * sizeof(BufferTypeBase *);
+ extra_allocated += _free_lists.capacity() * sizeof(FreeList);
+
+ size_t extra_used = 0;
+ extra_used += _buffers.size() * sizeof(BufferAndMeta);
+ extra_used += _primary_buffer_ids.size() * sizeof(uint32_t);
+ extra_used += _states.size() * sizeof(BufferState);
+ extra_used += _typeHandlers.size() * sizeof(BufferTypeBase *);
+ extra_used += _free_lists.size() * sizeof(FreeList);
+ usage.incAllocatedBytes(extra_allocated);
+ usage.incUsedBytes(extra_used);
+ return usage;
+}
+
void
DataStoreBase::holdBuffer(uint32_t bufferId)
{
@@ -386,9 +401,11 @@ DataStoreBase::onActive(uint32_t bufferId, uint32_t typeId, size_t elemsNeeded)
{
assert(typeId < _typeHandlers.size());
assert(bufferId < _numBuffers);
- _buffers[bufferId].setTypeId(typeId);
BufferState &state = getBufferState(bufferId);
- state.onActive(bufferId, typeId, _typeHandlers[typeId], elemsNeeded, _buffers[bufferId].get_atomic_buffer());
+ BufferAndMeta & bufferMeta = _buffers[bufferId];
+ state.onActive(bufferId, typeId, _typeHandlers[typeId], elemsNeeded, bufferMeta.get_atomic_buffer());
+ bufferMeta.setTypeId(typeId);
+ bufferMeta.setArraySize(state.getArraySize());
enableFreeList(bufferId);
}
@@ -449,7 +466,7 @@ DataStoreBase::start_compact_worst_buffers(CompactionSpec compaction_spec, const
compaction_strategy.get_active_buffers_ratio(),
compaction_strategy.getMaxDeadAddressSpaceRatio() / 2,
CompactionStrategy::DEAD_ADDRESS_SPACE_SLACK);
- uint32_t free_buffers = 0;
+ uint32_t free_buffers = _buffers.size() - _states.size();
for (uint32_t bufferId = 0; bufferId < _numBuffers; ++bufferId) {
const auto &state = getBufferState(bufferId);
if (state.isActive()) {
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 16426218d47..8749f2a27e6 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -65,6 +65,7 @@ public:
void switch_primary_buffer(uint32_t typeId, size_t elemsNeeded);
vespalib::MemoryUsage getMemoryUsage() const;
+ vespalib::MemoryUsage getDynamicMemoryUsage() const;
vespalib::AddressSpace getAddressSpaceUsage() const;
@@ -72,8 +73,8 @@ public:
* Get the primary buffer id for the given type id.
*/
uint32_t primary_buffer_id(uint32_t typeId) const { return _primary_buffer_ids[typeId]; }
- const BufferState &getBufferState(uint32_t bufferId) const { return _states[bufferId]; }
- BufferState &getBufferState(uint32_t bufferId) { return _states[bufferId]; }
+ BufferState &getBufferState(uint32_t buffer_id) noexcept;
+ const BufferAndMeta & getBufferMeta(uint32_t buffer_id) const { return _buffers[buffer_id]; }
uint32_t getNumBuffers() const { return _numBuffers; }
/**
@@ -210,15 +211,6 @@ private:
class BufferHold;
- /**
- * Get the next buffer id after the given buffer id.
- */
- uint32_t nextBufferId(uint32_t bufferId) {
- uint32_t ret = bufferId + 1;
- if (ret == _numBuffers)
- ret = 0;
- return ret;
- }
bool consider_grow_active_buffer(uint32_t type_id, size_t elems_needed);
void switch_or_grow_primary_buffer(uint32_t typeId, size_t elemsNeeded);
void markCompacting(uint32_t bufferId);
@@ -242,24 +234,11 @@ private:
void inc_hold_buffer_count();
void fallbackResize(uint32_t bufferId, size_t elementsNeeded);
+ uint32_t getFirstFreeBufferId();
virtual void reclaim_all_entry_refs() = 0;
- class BufferAndTypeId {
- public:
- BufferAndTypeId() : BufferAndTypeId(nullptr, 0) { }
- BufferAndTypeId(void* buffer, uint32_t typeId) : _buffer(buffer), _typeId(typeId) { }
- std::atomic<void*>& get_atomic_buffer() noexcept { return _buffer; }
- void* get_buffer_relaxed() noexcept { return _buffer.load(std::memory_order_relaxed); }
- const void* get_buffer_acquire() const noexcept { return _buffer.load(std::memory_order_acquire); }
- uint32_t getTypeId() const { return _typeId; }
- void setTypeId(uint32_t typeId) { _typeId = typeId; }
- private:
- std::atomic<void*> _buffer;
- uint32_t _typeId;
- };
-
- std::vector<BufferAndTypeId> _buffers; // For fast mapping with known types
+ std::vector<BufferAndMeta> _buffers; // For fast mapping with known types
// Provides a mapping from typeId -> primary buffer for that type.
// The primary buffer is used for allocations of new element(s) if no available slots are found in free lists.
diff --git a/vespalib/src/vespa/vespalib/datastore/entryref.h b/vespalib/src/vespa/vespalib/datastore/entryref.h
index a0016f4fdcb..752d660a097 100644
--- a/vespalib/src/vespa/vespalib/datastore/entryref.h
+++ b/vespalib/src/vespa/vespalib/datastore/entryref.h
@@ -38,8 +38,8 @@ public:
EntryRefT(const EntryRef & ref_) noexcept : EntryRef(ref_.ref()) {}
size_t offset() const noexcept { return _ref & (offsetSize() - 1); }
uint32_t bufferId() const noexcept { return _ref >> OffsetBits; }
- static size_t offsetSize() noexcept { return 1ul << OffsetBits; }
- static uint32_t numBuffers() noexcept { return 1 << BufferBits; }
+ static constexpr size_t offsetSize() noexcept { return 1ul << OffsetBits; }
+ static constexpr uint32_t numBuffers() noexcept { return 1 << BufferBits; }
};
vespalib::asciistream& operator<<(vespalib::asciistream& os, const EntryRef& ref);
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.h b/vespalib/src/vespa/vespalib/datastore/unique_store.h
index 1313d57fbab..f2d62020ab4 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.h
@@ -67,7 +67,8 @@ public:
Allocator& get_allocator() { return _allocator; }
const Allocator& get_allocator() const { return _allocator; }
IUniqueStoreDictionary& get_dictionary() { return *_dict; }
- inline const DataStoreType& get_data_store() const noexcept { return _allocator.get_data_store(); }
+ const DataStoreType& get_data_store() const noexcept { return _allocator.get_data_store(); }
+ DataStoreType& get_data_store() noexcept { return _allocator.get_data_store(); }
// Pass on hold list management to underlying store
void assign_generation(generation_t current_gen);
@@ -78,7 +79,7 @@ public:
uint32_t getNumUniques() const;
Builder getBuilder(uint32_t uniqueValuesHint);
- Enumerator getEnumerator(bool sort_unique_values) const;
+ Enumerator getEnumerator(bool sort_unique_values);
// Should only be used for unit testing
const BufferState &bufferState(EntryRef ref) const;
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
index d9d5f9fee7f..aef6ea07290 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store.hpp
@@ -221,7 +221,7 @@ UniqueStore<EntryT, RefT, Compare, Allocator>::getBuilder(uint32_t uniqueValuesH
template <typename EntryT, typename RefT, typename Compare, typename Allocator>
typename UniqueStore<EntryT, RefT, Compare, Allocator>::Enumerator
-UniqueStore<EntryT, RefT, Compare, Allocator>::getEnumerator(bool sort_unique_values) const
+UniqueStore<EntryT, RefT, Compare, Allocator>::getEnumerator(bool sort_unique_values)
{
return Enumerator(*_dict, _store, sort_unique_values);
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
index c4baff2206b..a6e7c2c974e 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.h
@@ -30,9 +30,9 @@ private:
EnumValues _enumValues;
uint32_t _next_enum_val;
- void allocate_enum_values();
+ void allocate_enum_values(DataStoreBase &store);
public:
- UniqueStoreEnumerator(const IUniqueStoreDictionary &dict, const DataStoreBase &store, bool sort_unique_values);
+ UniqueStoreEnumerator(const IUniqueStoreDictionary &dict, DataStoreBase &store, bool sort_unique_values);
~UniqueStoreEnumerator();
void enumerateValue(EntryRef ref);
void enumerateValues();
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
index 52437fc765c..6d08a027bf1 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_enumerator.hpp
@@ -9,7 +9,7 @@
namespace vespalib::datastore {
template <typename RefT>
-UniqueStoreEnumerator<RefT>::UniqueStoreEnumerator(const IUniqueStoreDictionary &dict, const DataStoreBase &store, bool sort_unique_values)
+UniqueStoreEnumerator<RefT>::UniqueStoreEnumerator(const IUniqueStoreDictionary &dict, DataStoreBase &store, bool sort_unique_values)
: _dict_snapshot(dict.get_read_snapshot()),
_store(store),
_enumValues(),
@@ -19,7 +19,7 @@ UniqueStoreEnumerator<RefT>::UniqueStoreEnumerator(const IUniqueStoreDictionary
if (sort_unique_values) {
_dict_snapshot->sort();
}
- allocate_enum_values();
+ allocate_enum_values(store);
}
template <typename RefT>
@@ -40,11 +40,11 @@ UniqueStoreEnumerator<RefT>::enumerateValue(EntryRef ref)
template <typename RefT>
void
-UniqueStoreEnumerator<RefT>::allocate_enum_values()
+UniqueStoreEnumerator<RefT>::allocate_enum_values(DataStoreBase & store)
{
_enumValues.resize(RefType::numBuffers());
for (uint32_t bufferId = 0; bufferId < RefType::numBuffers(); ++bufferId) {
- const BufferState &state = _store.getBufferState(bufferId);
+ const BufferState &state = store.getBufferState(bufferId);
if (state.isActive()) {
_enumValues[bufferId].resize(state.get_used_arrays());
}
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
index 265478fbaf5..a85b73f423d 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_allocator.h
@@ -114,7 +114,7 @@ public:
EntryRef move_on_compact(EntryRef ref) override;
const UniqueStoreEntryBase& get_wrapped(EntryRef ref) const {
RefType iRef(ref);
- auto &state = _store.getBufferState(iRef.bufferId());
+ auto &state = _store.getBufferMeta(iRef.bufferId());
auto type_id = state.getTypeId();
if (type_id != 0) {
return *reinterpret_cast<const UniqueStoreEntryBase *>(_store.template getEntryArray<char>(iRef, state.getArraySize()));
@@ -124,7 +124,7 @@ public:
}
const char *get(EntryRef ref) const {
RefType iRef(ref);
- auto &state = _store.getBufferState(iRef.bufferId());
+ auto &state = _store.getBufferMeta(iRef.bufferId());
auto type_id = state.getTypeId();
if (type_id != 0) {
return reinterpret_cast<const UniqueStoreSmallStringEntry *>(_store.template getEntryArray<char>(iRef, state.getArraySize()))->value();
diff --git a/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h b/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h
index 73a812ccd0b..e507132a085 100644
--- a/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h
+++ b/vespalib/src/vespa/vespalib/datastore/unique_store_string_comparator.h
@@ -25,10 +25,10 @@ protected:
const char *get(EntryRef ref) const {
if (ref.valid()) {
RefType iRef(ref);
- auto &state = _store.getBufferState(iRef.bufferId());
- auto type_id = state.getTypeId();
+ const auto &meta = _store.getBufferMeta(iRef.bufferId());
+ auto type_id = meta.getTypeId();
if (type_id != 0) {
- return reinterpret_cast<const UniqueStoreSmallStringEntry *>(_store.template getEntryArray<char>(iRef, state.getArraySize()))->value();
+ return reinterpret_cast<const UniqueStoreSmallStringEntry *>(_store.template getEntryArray<char>(iRef, meta.getArraySize()))->value();
} else {
return _store.template getEntry<WrappedExternalEntryType>(iRef)->value().c_str();
}
diff --git a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
index 272055dfd57..9ada45204e8 100644
--- a/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
+++ b/zookeeper-server/zookeeper-server-common/src/main/java/com/yahoo/vespa/zookeeper/ZooKeeperRunner.java
@@ -68,12 +68,7 @@ public class ZooKeeperRunner implements Runnable {
Instant end = now.plus(START_TIMEOUT);
for (int attempt = 1; now.isBefore(end) && !executorService.isShutdown(); attempt++) {
try {
- log.log(Level.INFO, "Starting ZooKeeper server with " + path.toFile().getAbsolutePath() +
- ". Trying to establish ZooKeeper quorum (members: " +
- zookeeperServerHostnames(zookeeperServerConfig) + ", attempt " + attempt + ")");
- log.log(Level.INFO, "Current content of zookeeper config file at '" + path + "':\n" +
- Exceptions.uncheck(() -> Files.readString(path)));
- startServer(path); // Will block in a real implementation of VespaZooKeeperServer
+ startServer(path, attempt); // Will block in a real implementation of VespaZooKeeperServer
return;
} catch (RuntimeException e) {
String messagePart = "Starting " + serverDescription() + " failed on attempt " + attempt;
@@ -89,13 +84,21 @@ public class ZooKeeperRunner implements Runnable {
now = Instant.now();
}
}
+ // Failed, log config
+ log.log(Level.INFO, "Current content of zookeeper config file at '" + path + "':\n" +
+ Exceptions.uncheck(() -> Files.readString(path)));
}
private String serverDescription() {
return (server.reconfigurable() ? "" : "non-") + "reconfigurable ZooKeeper server";
}
- private void startServer(Path path) {
+ private void startServer(Path path, int attempt) {
+ if (attempt > 1)
+ log.log(Level.INFO, "Starting ZooKeeper server with " + path.toFile().getAbsolutePath() +
+ ". Trying to establish ZooKeeper quorum (members: " +
+ zookeeperServerHostnames(zookeeperServerConfig) + ", attempt " + attempt + ")");
+
// Note: Hack to make this work in ZooKeeper 3.6, where metrics provider class is
// loaded by using Thread.currentThread().getContextClassLoader() which does not work
// well in the container
diff --git a/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java b/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
index 3bc6becd257..38646be5c0b 100644
--- a/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
+++ b/zookeeper-server/zookeeper-server/src/main/java/com/yahoo/vespa/zookeeper/VespaZooKeeperAdminImpl.java
@@ -54,7 +54,7 @@ public class VespaZooKeeperAdminImpl implements VespaZooKeeperAdmin {
private ZooKeeperAdmin createAdmin(String connectionSpec) {
return uncheck(() -> new ZooKeeperAdmin(connectionSpec, (int) sessionTimeout().toMillis(),
- (event) -> log.log(Level.INFO, event.toString()), new ZkClientConfigBuilder().toConfig()));
+ (event) -> log.log(Level.FINE, event.toString()), new ZkClientConfigBuilder().toConfig()));
}
/** Creates a node in zookeeper, with hostname as part of node name, this ensures that server is up and working before returning */