aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/DeploymentMetricsResponse.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java13
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java6
-rw-r--r--configserver/src/test/resources/metrics/clustercontroller_metrics.json42
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/ContainerTest.java4
-rw-r--r--container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java8
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ApplicationReindexing.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java50
-rw-r--r--document/src/main/java/com/yahoo/document/json/TokenBuffer.java35
-rw-r--r--document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java1
-rw-r--r--document/src/main/java/com/yahoo/document/json/readers/StructReader.java3
-rw-r--r--document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java23
-rw-r--r--metrics/src/tests/metrictest.cpp6
-rw-r--r--metrics/src/vespa/metrics/countmetric.h24
-rw-r--r--metrics/src/vespa/metrics/countmetric.hpp18
-rw-r--r--metrics/src/vespa/metrics/countmetricvalues.h4
-rw-r--r--metrics/src/vespa/metrics/metric.h7
-rw-r--r--metrics/src/vespa/metrics/metricmanager.cpp24
-rw-r--r--metrics/src/vespa/metrics/metricmanager.h4
-rw-r--r--metrics/src/vespa/metrics/metricset.cpp5
-rw-r--r--metrics/src/vespa/metrics/metricset.h25
-rw-r--r--metrics/src/vespa/metrics/metricsnapshot.cpp8
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.h20
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.hpp22
-rw-r--r--metrics/src/vespa/metrics/summetric.h6
-rw-r--r--metrics/src/vespa/metrics/valuemetric.h17
-rw-r--r--metrics/src/vespa/metrics/valuemetric.hpp26
-rw-r--r--metrics/src/vespa/metrics/valuemetricvalues.h24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java10
-rw-r--r--searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp41
-rw-r--r--searchcore/src/tests/proton/index/fusionrunner_test.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp5
-rw-r--r--searchlib/src/tests/diskindex/fusion/fusion_test.cpp25
-rw-r--r--searchlib/src/tests/memoryindex/field_index/field_index_test.cpp23
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h19
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldwriter.h43
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp128
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/indexbuilder.h61
-rw-r--r--searchlib/src/vespa/searchlib/test/diskindex/testdiskindex.cpp20
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp15
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java6
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java2
56 files changed, 395 insertions, 538 deletions
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/DeploymentMetricsResponse.java
index b2ebcee3a58..cf3ae88272e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/DeploymentMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/response/DeploymentMetricsResponse.java
@@ -40,11 +40,6 @@ public class DeploymentMetricsResponse extends SlimeJsonResponse {
metrics.setDouble("diskUtil", disk.util());
metrics.setDouble("diskFeedBlockLimit", disk.feedBlockLimit());
});
-
- aggregator.reindexingProgress().ifPresent(reindexingProgress -> {
- Cursor progressObject = cluster.setObject("reindexingProgress");
- reindexingProgress.forEach(progressObject::setDouble);
- });
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index 19cb31b6575..c1bc1027690 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -32,7 +32,6 @@ import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
/**
@@ -152,10 +151,6 @@ public class ClusterDeploymentMetricsRetriever {
values.field("cluster-controller.resource_usage.memory_limit.last").asDouble())
.addDiskUsage(values.field("cluster-controller.resource_usage.max_disk_utilization.max").asDouble(),
values.field("cluster-controller.resource_usage.disk_limit.last").asDouble()));
- optionalDouble(values.field("reindexing.progress.last")).ifPresent(progress -> {
- if (progress < 0 || progress >= 1) return;
- aggregator.get().addReindexingProgress(metric.field("dimensions").field("documenttype").asString(), progress);
- });
break;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
index ba1dd20bb2c..6e41bea2c64 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.metrics;
-import java.util.HashMap;
-import java.util.Map;
import java.util.Optional;
/**
@@ -17,7 +15,6 @@ public class DeploymentMetricsAggregator {
private Double documentCount;
private ResourceUsage memoryUsage;
private ResourceUsage diskUsage;
- private Map<String, Double> reindexingProgress;
public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) {
this.feed = combineLatency(this.feed, sum, count);
@@ -49,12 +46,6 @@ public class DeploymentMetricsAggregator {
return this;
}
- public synchronized DeploymentMetricsAggregator addReindexingProgress(String documentType, double progress) {
- if (reindexingProgress == null) this.reindexingProgress = new HashMap<>();
- this.reindexingProgress.put(documentType, progress);
- return this;
- }
-
public Optional<Double> aggregateFeedLatency() {
return Optional.ofNullable(feed).map(m -> m.sum / m.count).filter(num -> !num.isNaN());
}
@@ -89,10 +80,6 @@ public class DeploymentMetricsAggregator {
return Optional.ofNullable(diskUsage);
}
- public Optional<Map<String, Double>> reindexingProgress() {
- return Optional.ofNullable(reindexingProgress);
- }
-
private static LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
return Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new).combine(sum, count);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
index 37688e2676c..2353c480a34 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
@@ -20,6 +20,7 @@ import com.yahoo.path.Path;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.text.Utf8;
import com.yahoo.transaction.Transaction;
+import com.yahoo.vespa.config.server.NotFoundException;
import com.yahoo.vespa.config.server.UserConfigDefinitionRepo;
import com.yahoo.vespa.config.server.deploy.ZooKeeperClient;
import com.yahoo.vespa.config.server.deploy.ZooKeeperDeployer;
@@ -171,7 +172,7 @@ public class SessionZooKeeperClient {
public ApplicationId readApplicationId() {
return curator.getData(applicationIdPath()).map(d -> ApplicationId.fromSerializedForm(Utf8.toString(d)))
- .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId));
+ .orElseThrow(() -> new NotFoundException("Could not find application id for session " + sessionId));
}
private Path tagsPath() {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
index df52ab624c6..8701bbfdcb2 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
@@ -15,7 +15,6 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiConsumer;
-import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
@@ -70,9 +69,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
new DeploymentMetricsAggregator()
.addDocumentCount(6000.0)
.addMemoryUsage(0.89074, 0.8)
- .addDiskUsage(0.83517, 0.75)
- .addReindexingProgress("test_artifacts", 0.71)
- .addReindexingProgress("announcements", 0),
+ .addDiskUsage(0.83517, 0.75),
aggregatorMap.get(expectedContentCluster)
);
@@ -115,7 +112,6 @@ public class ClusterDeploymentMetricsRetrieverTest {
compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
- assertEquals(expected.reindexingProgress(), actual.reindexingProgress());
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
diff --git a/configserver/src/test/resources/metrics/clustercontroller_metrics.json b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
index c60000b34de..279635ff76f 100644
--- a/configserver/src/test/resources/metrics/clustercontroller_metrics.json
+++ b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
@@ -20,48 +20,6 @@
},
{
"values": {
- "reindexing.progress.last": 0.71
- },
- "dimensions": {
- "clustertype": "content",
- "clusterid": "content_cluster_id",
- "documenttype": "test_artifacts"
- }
- },
- {
- "values": {
- "reindexing.progress.last": 1
- },
- "dimensions": {
- "clustertype": "content",
- "clusterid": "content_cluster_id",
- "documenttype": "builds"
- }
- },
- {
- "values": {
- "reindexing.progress.last": 0
- },
- "dimensions": {
- "clustertype": "content",
- "clusterid": "content_cluster_id",
- "documenttype": "announcements",
- "state": "running"
- }
- },
- {
- "values": {
- "reindexing.progress.last": -1
- },
- "dimensions": {
- "clustertype": "content",
- "clusterid": "content_cluster_id",
- "documenttype": "announcements",
- "state": "successful"
- }
- },
- {
- "values": {
"some.other.metrics": 1
},
"dimensions": {
diff --git a/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java b/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java
index d869562cc19..741f381c44c 100644
--- a/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/di/ContainerTest.java
@@ -292,10 +292,12 @@ public class ContainerTest extends ContainerTestBase {
// expect to time out
}
+ dirConfigSource.clearCheckedConfigs();
writeBootstrapConfigs("myId2", ComponentTakingConfig.class);
container.reloadConfig(3);
- assertNotNull(newGraph.get(10, TimeUnit.SECONDS));
+ dirConfigSource.awaitConfigChecked(10_000);
+ assertNotNull(newGraph.get(1, TimeUnit.SECONDS));
container.shutdownConfigRetriever();
container.shutdown(newGraph.get());
diff --git a/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java b/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java
index 3d1d052b35b..a4abfd0971e 100644
--- a/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java
+++ b/container-core/src/test/java/com/yahoo/container/di/DirConfigSource.java
@@ -13,6 +13,8 @@ import java.nio.file.Files;
import java.util.HashSet;
import java.util.Set;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
/**
* @author Tony Vaagenes
* @author gjoranv
@@ -49,7 +51,7 @@ class DirConfigSource {
synchronized void awaitConfigChecked(long millis) throws InterruptedException {
long remaining, doom = System.currentTimeMillis() + millis;
while ( ! doubleChecked && (remaining = doom - System.currentTimeMillis()) > 0) wait(remaining);
- Assertions.assertTrue(doubleChecked, "no config was checked more than once during " + millis + " millis");
+ assertTrue(doubleChecked, "some config should be checked more than once during " + millis + " millis; checked ones: " + checked);
}
ConfigSource configSource() {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
index d702b6b09f2..3759a327c3d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
@@ -23,13 +23,11 @@ public class ClusterMetrics {
private final String clusterId;
private final String clusterType;
private final Map<String, Double> metrics;
- private final Map<String, Double> reindexingProgress;
- public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics, Map<String, Double> reindexingProgress) {
+ public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics) {
this.clusterId = clusterId;
this.clusterType = clusterType;
this.metrics = Map.copyOf(metrics);
- this.reindexingProgress = Map.copyOf(reindexingProgress);
}
public String getClusterId() {
@@ -75,8 +73,4 @@ public class ClusterMetrics {
public Optional<Double> diskFeedBlockLimit() {
return Optional.ofNullable(metrics.get(DISK_FEED_BLOCK_LIMIT));
}
-
- public Map<String, Double> reindexingProgress() {
- return reindexingProgress;
- }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ApplicationReindexing.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ApplicationReindexing.java
index d1ecae63f79..366ba1625c8 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ApplicationReindexing.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ApplicationReindexing.java
@@ -54,6 +54,8 @@ public class ApplicationReindexing {
public static class Cluster {
+ public static final ApplicationReindexing.Cluster EMPTY = new ApplicationReindexing.Cluster(Map.of(), Map.of());
+
private final Map<String, Long> pending;
private final Map<String, Status> ready;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index f196ca610c8..c48f6a34441 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -7,6 +7,7 @@ import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.yolean.Exceptions;
@@ -68,7 +69,8 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics)
.recordActivityAt(now, existingDeployment.zone())));
- controller().notificationsDb().setDeploymentMetricsNotifications(deploymentId, clusterMetrics);
+ ApplicationReindexing applicationReindexing = controller().serviceRegistry().configServer().getReindexing(deploymentId);
+ controller().notificationsDb().setDeploymentMetricsNotifications(deploymentId, clusterMetrics, applicationReindexing);
});
} catch (Exception e) {
failures.incrementAndGet();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
index a2960754570..cc84e012960 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
@@ -9,6 +9,7 @@ import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import java.time.Clock;
@@ -20,6 +21,7 @@ import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing.Cluster;
import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
@@ -116,17 +118,18 @@ public class NotificationsDb {
* - that are (Level.error) or are nearly (Level.warning) feed blocked,
* - that are (Level.info) currently reindexing at least 1 document type.
*/
- public void setDeploymentMetricsNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
+ public void setDeploymentMetricsNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics, ApplicationReindexing applicationReindexing) {
Instant now = clock.instant();
List<Notification> changed = List.of();
List<Notification> newNotifications = clusterMetrics.stream()
.flatMap(metric -> {
NotificationSource source = NotificationSource.from(deploymentId, ClusterSpec.Id.from(metric.getClusterId()));
+ Cluster cluster = applicationReindexing.clusters().getOrDefault(metric.getClusterId(), Cluster.EMPTY);
return Stream.of(createFeedBlockNotification(source, now, metric),
- createReindexNotification(source, now, metric));
+ createReindexNotification(source, now, cluster));
})
.flatMap(Optional::stream)
- .collect(Collectors.toUnmodifiableList());
+ .toList();
NotificationSource deploymentSource = NotificationSource.from(deploymentId);
try (Mutex lock = curatorDb.lockNotifications(deploymentSource.tenant())) {
@@ -139,7 +142,7 @@ public class NotificationsDb {
!deploymentSource.contains(notification.source())),
// ... and add the new notifications for this deployment
newNotifications.stream())
- .collect(Collectors.toUnmodifiableList());
+ .toList();
if (!initial.equals(updated)) {
curatorDb.writeNotifications(deploymentSource.tenant(), updated);
}
@@ -170,16 +173,18 @@ public class NotificationsDb {
List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
.filter(status -> status.getFirst() == level) // Do not mix message from different levels
.map(Pair::getSecond)
- .collect(Collectors.toUnmodifiableList());
+ .toList();
return Optional.of(new Notification(at, Type.feedBlock, level, source, messages));
}
- private static Optional<Notification> createReindexNotification(NotificationSource source, Instant at, ClusterMetrics metric) {
- if (metric.reindexingProgress().isEmpty()) return Optional.empty();
- List<String> messages = metric.reindexingProgress().entrySet().stream()
- .map(entry -> Text.format("document type '%s' (%.1f%% done)", entry.getKey(), 100 * entry.getValue()))
+ private static Optional<Notification> createReindexNotification(NotificationSource source, Instant at, Cluster cluster) {
+ List<String> messages = cluster.ready().entrySet().stream()
+ .filter(entry -> entry.getValue().progress().isPresent())
+ .map(entry -> Text.format("document type '%s'%s (%.1f%% done)",
+ entry.getKey(), entry.getValue().cause().map(s -> " " + s).orElse(""), 100 * entry.getValue().progress().get()))
.sorted()
- .collect(Collectors.toUnmodifiableList());
+ .toList();
+ if (messages.isEmpty()) return Optional.empty();
return Optional.of(new Notification(at, Type.reindex, Level.info, source, messages));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index 3bd4be3bb23..9a2870f53f9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -113,8 +113,8 @@ public class DeploymentMetricsMaintainerTest {
@Test
void cluster_metric_aggregation_test() {
List<ClusterMetrics> clusterMetrics = List.of(
- new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0), Map.of()),
- new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0), Map.of()));
+ new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0)),
+ new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0)));
DeploymentMetrics deploymentMetrics = DeploymentMetricsMaintainer.updateDeploymentMetrics(DeploymentMetrics.none, clusterMetrics);
@@ -126,7 +126,7 @@ public class DeploymentMetricsMaintainerTest {
}
private void setMetrics(ApplicationId application, Map<String, Double> metrics) {
- var clusterMetrics = new ClusterMetrics("default", "container", metrics, Map.of());
+ var clusterMetrics = new ClusterMetrics("default", "container", metrics);
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
index 75e2bbb06e7..fad85ef9b48 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
@@ -83,7 +83,7 @@ public class TrafficShareUpdaterTest {
}
private void setQpsMetric(double qps, ApplicationId application, ZoneId zone, DeploymentTester tester) {
- var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps), Map.of());
+ var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps));
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, zone), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
index cd1debc71fd..834777abb62 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
@@ -38,6 +39,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
+import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing.Cluster;
import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -49,6 +51,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
*/
public class NotificationsDbTest {
+ private static final ApplicationReindexing emptyReindexing = new ApplicationReindexing(false, Map.of());
private static final TenantName tenant = TenantName.from("tenant1");
private static final Email email = new Email("user1@example.com", true);
private static final CloudTenant cloudTenant = new CloudTenant(tenant,
@@ -154,23 +157,21 @@ public class NotificationsDbTest {
@Test
void deployment_metrics_notify_test() {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3"));
- NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1"));
- List<Notification> expected = new ArrayList<>(notifications);
// No metrics, no new notification
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of());
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(), emptyReindexing);
assertEquals(0, mailer.inbox(email.getEmailAddress()).size());
// Metrics that contain none of the feed block metrics does not create new notification
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null)), emptyReindexing);
assertEquals(0, mailer.inbox(email.getEmailAddress()).size());
// One resource is at warning
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5)), emptyReindexing);
assertEquals(1, mailer.inbox(email.getEmailAddress()).size());
// One resource over the limit
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5)), emptyReindexing);
assertEquals(2, mailer.inbox(email.getEmailAddress()).size());
}
@@ -181,29 +182,29 @@ public class NotificationsDbTest {
List<Notification> expected = new ArrayList<>(notifications);
// No metrics, no new notification
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of());
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(), emptyReindexing);
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that contain none of the feed block metrics does not create new notification
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null)), emptyReindexing);
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that only contain util or limit (should not be possible) should not cause any issues
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5)), emptyReindexing);
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource is at warning
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5)), emptyReindexing);
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 88.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Both resources over the limit
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5)), emptyReindexing);
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource at warning, one at error: Only show error message
- notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5, Map.of())));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5)), emptyReindexing);
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1,
"memory (usage: 70.0%, feed block limit: 50.0%)", "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
@@ -218,19 +219,27 @@ public class NotificationsDbTest {
List<Notification> expected = new ArrayList<>(notifications);
// Cluster1 and cluster2 are having feed block issues, cluster 3 is reindexing
+ ApplicationReindexing applicationReindexing1 = new ApplicationReindexing(true, Map.of(
+ "cluster3", new Cluster(Map.of(), Map.of(
+ "announcements", reindexingStatus("reindexing due to a schema change", 0.75),
+ "build", reindexingStatus(null, 0.50)))));
notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9, Map.of("announcements", 0.75, "build", 0.5))));
+ clusterMetrics("cluster1", 0.88, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9)), applicationReindexing1);
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 88.0%, feed block limit: 90.0%)"));
expected.add(notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
- expected.add(notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (75.0% done)", "document type 'build' (50.0% done)"));
+ expected.add(notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' reindexing due to a schema change (75.0% done)", "document type 'build' (50.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Cluster1 improves, while cluster3 starts having feed block issues and finishes reindexing 'build' documents
+ ApplicationReindexing applicationReindexing2 = new ApplicationReindexing(true, Map.of(
+ "cluster3", new Cluster(Map.of(), Map.of(
+ "announcements", reindexingStatus("reindexing due to a schema change", 0.90),
+ "build", reindexingStatus(null, null)))));
notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.78, 0.8, 0.2, 0.9, Map.of("announcements", 0.9))));
+ clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.78, 0.8, 0.2, 0.9)), applicationReindexing2);
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
expected.set(7, notification(12345, Type.feedBlock, Level.warning, sourceCluster3, "disk (usage: 78.0%, feed block limit: 80.0%)"));
- expected.set(8, notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (90.0% done)"));
+ expected.set(8, notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' reindexing due to a schema change (90.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
}
@@ -250,13 +259,16 @@ public class NotificationsDbTest {
}
private static ClusterMetrics clusterMetrics(String clusterId,
- Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit,
- Map<String, Double> reindexingProgress) {
+ Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit) {
Map<String, Double> metrics = new HashMap<>();
if (diskUtil != null) metrics.put(ClusterMetrics.DISK_UTIL, diskUtil);
if (diskLimit != null) metrics.put(ClusterMetrics.DISK_FEED_BLOCK_LIMIT, diskLimit);
if (memoryUtil != null) metrics.put(ClusterMetrics.MEMORY_UTIL, memoryUtil);
if (memoryLimit != null) metrics.put(ClusterMetrics.MEMORY_FEED_BLOCK_LIMIT, memoryLimit);
- return new ClusterMetrics(clusterId, "content", metrics, reindexingProgress);
+ return new ClusterMetrics(clusterId, "content", metrics);
+ }
+
+ private static ApplicationReindexing.Status reindexingStatus(String causeOrNull, Double progressOrNull) {
+ return new ApplicationReindexing.Status(null, null, null, null, null, progressOrNull, null, causeOrNull);
}
}
diff --git a/document/src/main/java/com/yahoo/document/json/TokenBuffer.java b/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
index 58ceeec4f4c..6b2bdbd53d8 100644
--- a/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
+++ b/document/src/main/java/com/yahoo/document/json/TokenBuffer.java
@@ -17,18 +17,6 @@ import com.google.common.base.Preconditions;
*/
public class TokenBuffer {
- public static final class Token {
- public final JsonToken token;
- public final String name;
- public final String text;
-
- Token(JsonToken token, String name, String text) {
- this.token = token;
- this.name = name;
- this.text = text;
- }
- }
-
private final Deque<Token> buffer;
private int nesting = 0;
@@ -181,4 +169,27 @@ public class TokenBuffer {
} while ( nesting() > initialNesting + relativeNesting);
}
+ public Deque<Token> rest() {
+ return buffer;
+ }
+
+ public static final class Token {
+
+ public final JsonToken token;
+ public final String name;
+ public final String text;
+
+ Token(JsonToken token, String name, String text) {
+ this.token = token;
+ this.name = name;
+ this.text = text;
+ }
+
+ @Override
+ public String toString() {
+ return "Token(" + token + ", " + name + ", " + text + ")";
+ }
+
+ }
+
}
diff --git a/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java b/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java
index 6cbc1c1e0b1..27c18ea2f69 100644
--- a/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java
+++ b/document/src/main/java/com/yahoo/document/json/readers/CompositeReader.java
@@ -7,7 +7,6 @@ import com.yahoo.document.PositionDataType;
import com.yahoo.document.datatypes.CollectionFieldValue;
import com.yahoo.document.datatypes.FieldValue;
import com.yahoo.document.datatypes.MapFieldValue;
-import com.yahoo.document.datatypes.StringFieldValue;
import com.yahoo.document.datatypes.StructuredFieldValue;
import com.yahoo.document.datatypes.TensorFieldValue;
import com.yahoo.document.datatypes.WeightedSet;
diff --git a/document/src/main/java/com/yahoo/document/json/readers/StructReader.java b/document/src/main/java/com/yahoo/document/json/readers/StructReader.java
index b944d273a72..d489c03eb9d 100644
--- a/document/src/main/java/com/yahoo/document/json/readers/StructReader.java
+++ b/document/src/main/java/com/yahoo/document/json/readers/StructReader.java
@@ -30,8 +30,7 @@ public class StructReader {
throw new IllegalArgumentException("No field '" + buffer.currentName() + "' in the structure of type '" +
parent.getDataType().getDataTypeName() +
"', which has the fields: " + parent.getDataType().getFields());
-
- buffer.skipToRelativeNesting(0);
+ buffer.skipToRelativeNesting(1);
fullyApplied = false;
continue;
}
diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
index 97422217857..41d607b0d8e 100644
--- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
+++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
@@ -108,6 +108,7 @@ public class JsonReaderTestCase {
x.addField(new Field("field2", DataType.STRING));
x.addField(new Field("int1", DataType.INT));
x.addField(new Field("flag", DataType.BOOL));
+ x.addField(new Field("tensor1", DataType.getTensor(TensorType.fromSpec("tensor(x{})"))));
types.registerDocumentType(x);
}
{
@@ -990,7 +991,7 @@ public class JsonReaderTestCase {
}
@Test
- public void nonExistingFieldCausesException() throws IOException{
+ public void nonExistingFieldCausesException() throws IOException {
JsonReader r = createReader(inputJson(
"{ 'put': 'id:unittest:smoke::whee',",
" 'fields': {",
@@ -1009,7 +1010,7 @@ public class JsonReaderTestCase {
}
@Test
- public void nonExistingFieldsCanBeIgnoredInPut() throws IOException{
+ public void nonExistingFieldsCanBeIgnoredInPut() throws IOException {
JsonReader r = createReader(inputJson(
"{ ",
" 'put': 'id:unittest:smoke::doc1',",
@@ -1023,7 +1024,11 @@ public class JsonReaderTestCase {
" }",
" },",
" 'field2': 'value2',",
- " 'nonexisting3': 'ignored value'",
+ " 'nonexisting3': {",
+ " 'cells': [{'address': {'x': 'x1'}, 'value': 1.0}]",
+ " },",
+ " 'tensor1': {'cells': {'x1': 1.0}},",
+ " 'nonexisting4': 'ignored value'",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
@@ -1036,6 +1041,8 @@ public class JsonReaderTestCase {
assertNull(put.getDocument().getField("nonexisting2"));
assertEquals("value2", put.getDocument().getFieldValue("field2").toString());
assertNull(put.getDocument().getField("nonexisting3"));
+ assertEquals(Tensor.from("tensor(x{}):{{x:x1}:1.0}"), put.getDocument().getFieldValue("tensor1").getWrappedValue());
+ assertNull(put.getDocument().getField("nonexisting4"));
}
@Test
@@ -1055,7 +1062,13 @@ public class JsonReaderTestCase {
" }",
" },",
" 'field2': { 'assign': 'value2' },",
- " 'nonexisting3': { 'assign': 'ignored value' }",
+ " 'nonexisting3': {",
+ " 'assign' : {",
+ " 'cells': [{'address': {'x': 'x1'}, 'value': 1.0}]",
+ " }",
+ " },",
+ " 'tensor1': {'assign': { 'cells': {'x1': 1.0} } },",
+ " 'nonexisting4': { 'assign': 'ignored value' }",
" }",
"}"));
DocumentParseInfo parseInfo = r.parseDocument().get();
@@ -1068,6 +1081,8 @@ public class JsonReaderTestCase {
assertNull(update.getFieldUpdate("nonexisting2"));
assertEquals("value2", update.getFieldUpdate("field2").getValueUpdates().get(0).getValue().getWrappedValue().toString());
assertNull(update.getFieldUpdate("nonexisting3"));
+ assertEquals(Tensor.from("tensor(x{}):{{x:x1}:1.0}"), update.getFieldUpdate("tensor1").getValueUpdates().get(0).getValue().getWrappedValue());
+ assertNull(update.getFieldUpdate("nonexisting4"));
}
@Test
diff --git a/metrics/src/tests/metrictest.cpp b/metrics/src/tests/metrictest.cpp
index dc86e9ec281..f6f65780aa4 100644
--- a/metrics/src/tests/metrictest.cpp
+++ b/metrics/src/tests/metrictest.cpp
@@ -54,19 +54,19 @@ TEST(MetricTest, mangled_name_lists_dimensions_in_lexicographic_order)
{
LongValueMetric m("test",
{{"xyz", "bar"}, {"abc", "foo"}, {"def", "baz"}},
- "");
+ "", nullptr);
EXPECT_EQ(vespalib::string("test{abc:foo,def:baz,xyz:bar}"), m.getMangledName());
}
TEST(MetricTest, mangling_does_not_change_original_metric_name)
{
- LongValueMetric m("test", {{"foo", "bar"}}, "");
+ LongValueMetric m("test", {{"foo", "bar"}}, "", nullptr);
EXPECT_EQ(vespalib::string("test"), m.getName());
}
TEST(MetricTest, legacy_tags_do_not_create_mangled_name)
{
- LongValueMetric m("test", {{"foo"},{"bar"}}, "");
+ LongValueMetric m("test", {{"foo"},{"bar"}}, "", nullptr);
EXPECT_EQ(vespalib::string("test"), m.getName());
EXPECT_EQ(vespalib::string("test"), m.getMangledName());
}
diff --git a/metrics/src/vespa/metrics/countmetric.h b/metrics/src/vespa/metrics/countmetric.h
index b6b89f4ab15..5225cd58f12 100644
--- a/metrics/src/vespa/metrics/countmetric.h
+++ b/metrics/src/vespa/metrics/countmetric.h
@@ -29,15 +29,13 @@ struct AbstractCountMetric : public Metric {
protected:
AbstractCountMetric(const String& name, Tags dimensions,
- const String& description, MetricSet* owner = 0)
+ const String& description, MetricSet* owner)
: Metric(name, std::move(dimensions), description, owner)
- {
- }
+ { }
AbstractCountMetric(const AbstractCountMetric& other, MetricSet* owner)
: Metric(other, owner)
- {
- }
+ { }
void logWarning(const char* msg, const char * op) const;
};
@@ -49,10 +47,11 @@ class CountMetric : public AbstractCountMetric
MetricValueSet<Values> _values;
public:
- CountMetric(const String& name, Tags dimensions,
- const String& description, MetricSet* owner = 0);
-
- CountMetric(const CountMetric<T, SumOnAdd>& other, CopyType, MetricSet* owner);
+ CountMetric(const String& name, Tags dimensions, const String& description)
+ : CountMetric(name, std::move(dimensions), description, nullptr)
+ {}
+ CountMetric(const String& name, Tags dimensions, const String& description, MetricSet* owner);
+ CountMetric(const CountMetric<T, SumOnAdd>& other, MetricSet* owner);
~CountMetric() override;
@@ -71,11 +70,8 @@ public:
CountMetric t(a); t -= b; return t;
}
-
-
- CountMetric * clone(std::vector<Metric::UP> &, CopyType type, MetricSet* owner,
- bool /*includeUnused*/) const override {
- return new CountMetric<T, SumOnAdd>(*this, type, owner);
+ CountMetric * clone(std::vector<Metric::UP> &, CopyType, MetricSet* owner, bool) const override {
+ return new CountMetric<T, SumOnAdd>(*this, owner);
}
T getValue() const { return _values.getValues()._value; }
diff --git a/metrics/src/vespa/metrics/countmetric.hpp b/metrics/src/vespa/metrics/countmetric.hpp
index e0e7c235f47..900c5eeb8ca 100644
--- a/metrics/src/vespa/metrics/countmetric.hpp
+++ b/metrics/src/vespa/metrics/countmetric.hpp
@@ -15,10 +15,9 @@ CountMetric<T, SumOnAdd>::CountMetric(const String& name, Tags dimensions,
{}
template <typename T, bool SumOnAdd>
-CountMetric<T, SumOnAdd>::CountMetric(const CountMetric<T, SumOnAdd>& other,
- CopyType copyType, MetricSet* owner)
+CountMetric<T, SumOnAdd>::CountMetric(const CountMetric<T, SumOnAdd>& other, MetricSet* owner)
: AbstractCountMetric(other, owner),
- _values(other._values, copyType == CLONE ? other._values.size() : 1)
+ _values(other._values)
{
}
@@ -114,11 +113,9 @@ CountMetric<T, SumOnAdd>::dec(T value)
template <typename T, bool SumOnAdd>
void
-CountMetric<T, SumOnAdd>::addToSnapshot(
- Metric& other, std::vector<Metric::UP> &) const
+CountMetric<T, SumOnAdd>::addToSnapshot(Metric& other, std::vector<Metric::UP> &) const
{
- CountMetric<T, SumOnAdd>& o(
- reinterpret_cast<CountMetric<T, SumOnAdd>&>(other));
+ CountMetric<T, SumOnAdd>& o(reinterpret_cast<CountMetric<T, SumOnAdd>&>(other));
o.inc(_values.getValues()._value);
}
@@ -126,8 +123,7 @@ template <typename T, bool SumOnAdd>
void
CountMetric<T, SumOnAdd>::addToPart(Metric& other) const
{
- CountMetric<T, SumOnAdd>& o(
- reinterpret_cast<CountMetric<T, SumOnAdd>&>(other));
+ CountMetric<T, SumOnAdd>& o(reinterpret_cast<CountMetric<T, SumOnAdd>&>(other));
if (SumOnAdd) {
o.inc(_values.getValues()._value);
} else {
@@ -158,9 +154,7 @@ void
CountMetric<T, SumOnAdd>::addMemoryUsage(MemoryConsumption& mc) const
{
++mc._countMetricCount;
- mc._countMetricValues += _values.getMemoryUsageAllocatedInternally();
- mc._countMetricMeta += sizeof(CountMetric<T, SumOnAdd>)
- - sizeof(Metric);
+ mc._countMetricMeta += sizeof(CountMetric<T, SumOnAdd>) - sizeof(Metric);
Metric::addMemoryUsage(mc);
}
diff --git a/metrics/src/vespa/metrics/countmetricvalues.h b/metrics/src/vespa/metrics/countmetricvalues.h
index 981c4c07631..902b5294c28 100644
--- a/metrics/src/vespa/metrics/countmetricvalues.h
+++ b/metrics/src/vespa/metrics/countmetricvalues.h
@@ -23,7 +23,9 @@ struct CountMetricValues : public MetricValueClass {
T _value;
struct AtomicImpl {
- std::atomic<T> _value {0};
+ AtomicImpl() noexcept : _value(0) {}
+ AtomicImpl(const AtomicImpl & rhs) noexcept : _value(rhs._value.load(std::memory_order_relaxed)) {}
+ std::atomic<T> _value;
};
void relaxedStoreInto(AtomicImpl& target) const noexcept {
diff --git a/metrics/src/vespa/metrics/metric.h b/metrics/src/vespa/metrics/metric.h
index 89b772bf750..33a3d9dee23 100644
--- a/metrics/src/vespa/metrics/metric.h
+++ b/metrics/src/vespa/metrics/metric.h
@@ -148,9 +148,7 @@ public:
* unused metrics, but while generating sum metric sum in active
* metrics we want to. This has no affect if type is CLONE.
*/
- virtual Metric* clone(std::vector<Metric::UP> &ownerList,
- CopyType type, MetricSet* owner,
- bool includeUnused = false) const = 0;
+ virtual Metric* clone(std::vector<Metric::UP> &ownerList, CopyType type, MetricSet* owner, bool includeUnused) const = 0;
/**
* Utility function for assigning values from one metric of identical type
@@ -163,8 +161,7 @@ public:
/** Reset all metric values. */
virtual void reset() = 0;
- void print(std::ostream& out, bool verbose,
- const std::string& indent) const override {
+ void print(std::ostream& out, bool verbose, const std::string& indent) const override {
print(out, verbose, indent, 0);
}
virtual void print(std::ostream&, bool verbose, const std::string& indent,
diff --git a/metrics/src/vespa/metrics/metricmanager.cpp b/metrics/src/vespa/metrics/metricmanager.cpp
index 2fd09094f4c..ae75968e605 100644
--- a/metrics/src/vespa/metrics/metricmanager.cpp
+++ b/metrics/src/vespa/metrics/metricmanager.cpp
@@ -77,7 +77,7 @@ MetricManager::MetricManager(std::unique_ptr<Timer> timer)
_lastProcessedTime(0),
_snapshotUnsetMetrics(false),
_consumerConfigChanged(false),
- _metricManagerMetrics("metricmanager", {}, "Metrics for the metric manager upkeep tasks"),
+ _metricManagerMetrics("metricmanager", {}, "Metrics for the metric manager upkeep tasks", nullptr),
_periodicHookLatency("periodichooklatency", {}, "Time in ms used to update a single periodic hook", &_metricManagerMetrics),
_snapshotHookLatency("snapshothooklatency", {}, "Time in ms used to update a single snapshot hook", &_metricManagerMetrics),
_resetLatency("resetlatency", {}, "Time in ms used to reset all metrics.", &_metricManagerMetrics),
@@ -383,11 +383,9 @@ MetricManager::handleMetricsAltered(const MetricLockGuard & guard)
configMap[consumer.name] = std::make_shared<ConsumerSpec>(std::move(consumerMetricBuilder._matchedMetrics));
}
LOG(debug, "Recreating snapshots to include altered metrics");
- _totalMetrics->recreateSnapshot(_activeMetrics.getMetrics(),
- _snapshotUnsetMetrics);
+ _totalMetrics->recreateSnapshot(_activeMetrics.getMetrics(), _snapshotUnsetMetrics);
for (uint32_t i=0; i<_snapshots.size(); ++i) {
- _snapshots[i]->recreateSnapshot(_activeMetrics.getMetrics(),
- _snapshotUnsetMetrics);
+ _snapshots[i]->recreateSnapshot(_activeMetrics.getMetrics(), _snapshotUnsetMetrics);
}
LOG(debug, "Setting new consumer config. Clearing dirty flag");
_consumerConfig.swap(configMap);
@@ -395,8 +393,7 @@ MetricManager::handleMetricsAltered(const MetricLockGuard & guard)
}
namespace {
- bool setSnapshotName(std::ostream& out, const char* name,
- uint32_t length, uint32_t period)
+ bool setSnapshotName(std::ostream& out, const char* name, uint32_t length, uint32_t period)
{
if (length % period != 0) return false;
out << (length / period) << ' ' << name;
@@ -412,9 +409,8 @@ MetricManager::createSnapshotPeriods(const Config& config)
try{
for (uint32_t i=0; i<config.snapshot.periods.size(); ++i) {
uint32_t length = config.snapshot.periods[i];
- if (length < 1) throw vespalib::IllegalStateException(
- "Snapshot periods must be positive numbers",
- VESPA_STRLOC);
+ if (length < 1)
+ throw vespalib::IllegalStateException("Snapshot periods must be positive numbers", VESPA_STRLOC);
std::ostringstream name;
if (setSnapshotName(name, "week", length, 60 * 60 * 24 * 7)) {
} else if (setSnapshotName(name, "day", length, 60 * 60 * 24)) {
@@ -483,15 +479,13 @@ MetricManager::configure(const MetricLockGuard & , std::unique_ptr<Config> confi
VESPA_STRLOC);
}
}
- _snapshots.push_back(MetricSnapshotSet::SP(new MetricSnapshotSet(
+ _snapshots.push_back(std::make_shared<MetricSnapshotSet>(
snapshotPeriods[i].second, snapshotPeriods[i].first, count,
- _activeMetrics.getMetrics(), _snapshotUnsetMetrics)));
+ _activeMetrics.getMetrics(), _snapshotUnsetMetrics));
count = nextCount;
}
// Add all time snapshot.
- _totalMetrics = MetricSnapshot::SP(new MetricSnapshot(
- "All time snapshot", 0, _activeMetrics.getMetrics(),
- _snapshotUnsetMetrics));
+ _totalMetrics = std::make_shared<MetricSnapshot>("All time snapshot", 0, _activeMetrics.getMetrics(), _snapshotUnsetMetrics);
_totalMetrics->reset(currentTime);
}
if (_config.get() == 0
diff --git a/metrics/src/vespa/metrics/metricmanager.h b/metrics/src/vespa/metrics/metricmanager.h
index 323e90253b9..5f35c349f7f 100644
--- a/metrics/src/vespa/metrics/metricmanager.h
+++ b/metrics/src/vespa/metrics/metricmanager.h
@@ -104,7 +104,6 @@ private:
std::list<UpdateHook*> _snapshotUpdateHooks;
mutable std::mutex _waiter;
mutable std::condition_variable _cond;
- using PeriodTimePair = std::pair<uint32_t, time_t>;
std::vector<MetricSnapshotSet::SP> _snapshots;
MetricSnapshot::SP _totalMetrics;
std::unique_ptr<Timer> _timer;
@@ -195,8 +194,7 @@ public:
* of consumers. readConfig() will start a config subscription. It should
* not be called multiple times.
*/
- void init(const config::ConfigUri & uri, FastOS_ThreadPool&,
- bool startThread = true);
+ void init(const config::ConfigUri & uri, FastOS_ThreadPool&, bool startThread = true);
/**
* Visit a given snapshot for a given consumer. (Empty consumer name means
diff --git a/metrics/src/vespa/metrics/metricset.cpp b/metrics/src/vespa/metrics/metricset.cpp
index 602dd9e500c..f583d2f4716 100644
--- a/metrics/src/vespa/metrics/metricset.cpp
+++ b/metrics/src/vespa/metrics/metricset.cpp
@@ -43,8 +43,7 @@ MetricSet::MetricSet(const MetricSet& other,
MetricSet::~MetricSet() = default;
MetricSet*
-MetricSet::clone(std::vector<Metric::UP> &ownerList, CopyType type,
- MetricSet* owner, bool includeUnused) const
+MetricSet::clone(std::vector<Metric::UP> &ownerList, CopyType type, MetricSet* owner, bool includeUnused) const
{
return new MetricSet(*this, ownerList, type, owner, includeUnused);
}
@@ -233,7 +232,7 @@ MetricSet::addTo(Metric& other, std::vector<Metric::UP> *ownerList) const
if (target == map2.end() || source->first < target->first) {
// Source missing in snapshot to add to. Lets create and add.
if (!mustAdd && source->second->used()) {
- Metric::UP copy(source->second->clone(*ownerList, INACTIVE, &o));
+ Metric::UP copy(source->second->clone(*ownerList, INACTIVE, &o, false));
newMetrics[source->first] = copy.get();
ownerList->push_back(std::move(copy));
}
diff --git a/metrics/src/vespa/metrics/metricset.h b/metrics/src/vespa/metrics/metricset.h
index 9789fe789f4..ca4df1ceb58 100644
--- a/metrics/src/vespa/metrics/metricset.h
+++ b/metrics/src/vespa/metrics/metricset.h
@@ -22,11 +22,15 @@ class MetricSet : public Metric
// it was reset
public:
- MetricSet(const String& name, Tags dimensions,
- const String& description, MetricSet* owner = 0);
-
- MetricSet(const MetricSet&, std::vector<Metric::UP> &ownerList,
- CopyType, MetricSet* owner = 0, bool includeUnused = false);
+ MetricSet(const String& name, Tags dimensions, const String& description) :
+ MetricSet(name, std::move(dimensions), description, nullptr)
+ {}
+ MetricSet(const String& name, Tags dimensions, const String& description, MetricSet* owner);
+ MetricSet(const MetricSet&, std::vector<Metric::UP> &ownerList, CopyType, MetricSet* owner, bool includeUnused);
+ // Do not generate default copy constructor or assignment operator
+ // These would screw up metric registering
+ MetricSet(const MetricSet&) = delete;
+ MetricSet& operator=(const MetricSet&) = delete;
~MetricSet();
// If no path, this metric is not registered within another
@@ -44,8 +48,7 @@ public:
void registerMetric(Metric& m);
void unregisterMetric(Metric& m);
- MetricSet* clone(std::vector<Metric::UP> &ownerList, CopyType type,
- MetricSet* owner, bool includeUnused = false) const override;
+ MetricSet* clone(std::vector<Metric::UP> &ownerList, CopyType type, MetricSet* owner, bool includeUnused) const override;
void reset() override;
@@ -59,8 +62,7 @@ public:
const Metric* getMetric(stringref name) const;
Metric* getMetric(stringref name) {
- return const_cast<Metric*>(
- const_cast<const MetricSet*>(this)->getMetric(name));
+ return const_cast<Metric*>(const_cast<const MetricSet*>(this)->getMetric(name));
}
void addToSnapshot(Metric& m, std::vector<Metric::UP> &o) const override { addTo(m, &o); }
@@ -75,11 +77,6 @@ public:
void addToPart(Metric& m) const override { addTo(m, 0); }
private:
- // Do not generate default copy constructor or assignment operator
- // These would screw up metric registering
- MetricSet(const MetricSet&);
- MetricSet& operator=(const MetricSet&);
-
void tagRegistrationAltered();
const Metric* getMetricInternal(stringref name) const;
diff --git a/metrics/src/vespa/metrics/metricsnapshot.cpp b/metrics/src/vespa/metrics/metricsnapshot.cpp
index 34ba32e9d95..1580f340f0e 100644
--- a/metrics/src/vespa/metrics/metricsnapshot.cpp
+++ b/metrics/src/vespa/metrics/metricsnapshot.cpp
@@ -13,14 +13,12 @@ MetricSnapshot::MetricSnapshot(const Metric::String& name)
_period(0),
_fromTime(0),
_toTime(0),
- _snapshot(new MetricSet("top", {}, "")),
+ _snapshot(new MetricSet("top", {}, "", nullptr)),
_metrics()
{
}
-MetricSnapshot::MetricSnapshot(
- const Metric::String& name, uint32_t period, const MetricSet& source,
- bool copyUnset)
+MetricSnapshot::MetricSnapshot(const Metric::String& name, uint32_t period, const MetricSet& source, bool copyUnset)
: _name(name),
_period(period),
_fromTime(0),
@@ -34,7 +32,7 @@ MetricSnapshot::MetricSnapshot(
_metrics.shrink_to_fit();
}
-MetricSnapshot::~MetricSnapshot() { }
+MetricSnapshot::~MetricSnapshot() = default;
void
MetricSnapshot::reset(time_t currentTime)
diff --git a/metrics/src/vespa/metrics/metricvalueset.h b/metrics/src/vespa/metrics/metricvalueset.h
index bb2a7409ce7..b38cab4bfac 100644
--- a/metrics/src/vespa/metrics/metricvalueset.h
+++ b/metrics/src/vespa/metrics/metricvalueset.h
@@ -49,20 +49,16 @@ struct MetricValueClass {
template<typename ValueClass>
class MetricValueSet {
using AtomicValues = typename ValueClass::AtomicImpl;
- std::vector<AtomicValues> _values;
- std::atomic<uint32_t> _activeValueIndex;
- std::atomic<uint32_t> _flags;
+ std::array<AtomicValues, 3> _values;
+ std::atomic<uint32_t> _activeValueIndex;
+ std::atomic<uint32_t> _flags;
enum Flag { RESET = 1 };
bool isReset() const { return hasFlag(RESET); }
-
- void validateCorrectValueSuperClass(const MetricValueClass&) {}
-
public:
- MetricValueSet(uint32_t copyCount = 3);
- MetricValueSet(const MetricValueSet& other, uint32_t copyCount = 3);
-
- MetricValueSet& operator=(const MetricValueSet& other);
+ MetricValueSet() noexcept;
+ MetricValueSet(const MetricValueSet&) noexcept;
+ MetricValueSet& operator=(const MetricValueSet& other) noexcept;
/** Get the current values. */
ValueClass getValues() const;
@@ -82,10 +78,6 @@ public:
std::string toString();
- uint32_t getMemoryUsageAllocatedInternally() const {
- return _values.capacity() * sizeof(ValueClass);
- }
-
uint32_t size() const { return _values.size(); }
bool hasFlag(uint32_t flags) const {
diff --git a/metrics/src/vespa/metrics/metricvalueset.hpp b/metrics/src/vespa/metrics/metricvalueset.hpp
index fa42708f35e..ada833b20e2 100644
--- a/metrics/src/vespa/metrics/metricvalueset.hpp
+++ b/metrics/src/vespa/metrics/metricvalueset.hpp
@@ -7,23 +7,21 @@
namespace metrics {
template<typename ValueClass>
-MetricValueSet<ValueClass>::MetricValueSet(uint32_t copyCount)
- : _values(copyCount),
+MetricValueSet<ValueClass>::MetricValueSet() noexcept
+ : _values(),
_activeValueIndex(0),
_flags(0)
{ }
template<typename ValueClass>
-MetricValueSet<ValueClass>::MetricValueSet(const MetricValueSet& other, uint32_t copyCount)
- : _values(copyCount),
- _activeValueIndex(0),
- _flags(other._flags.load(std::memory_order_relaxed))
-{
- setValues(other.getValues());
-}
+MetricValueSet<ValueClass>::MetricValueSet(const MetricValueSet& rhs) noexcept
+ : _values(rhs._values),
+ _activeValueIndex(rhs._activeValueIndex.load(std::memory_order_relaxed)),
+ _flags(rhs._flags.load(std::memory_order_relaxed))
+{ }
template<typename ValueClass>
-MetricValueSet<ValueClass> & MetricValueSet<ValueClass>::operator=(const MetricValueSet& other)
+MetricValueSet<ValueClass> & MetricValueSet<ValueClass>::operator=(const MetricValueSet& other) noexcept
{
setValues(other.getValues());
return *this;
@@ -47,11 +45,9 @@ MetricValueSet<ValueClass>::getValues() const {
template<typename ValueClass>
bool
MetricValueSet<ValueClass>::setValues(const ValueClass& values) {
- validateCorrectValueSuperClass(values);
// Only setter-thread can write _activeValueIndex, so relaxed
// load suffices.
- uint32_t nextIndex = (_activeValueIndex.load(std::memory_order_relaxed)
- + 1) % _values.size();
+ uint32_t nextIndex = (_activeValueIndex.load(std::memory_order_relaxed) + 1) % _values.size();
// Reset flag is loaded/stored with relaxed semantics since it does not
// carry data dependencies. _activeValueIndex has a dependency on
// _values, however, so we must ensure that stores are published
diff --git a/metrics/src/vespa/metrics/summetric.h b/metrics/src/vespa/metrics/summetric.h
index b94c4b791ec..0906ade5cc9 100644
--- a/metrics/src/vespa/metrics/summetric.h
+++ b/metrics/src/vespa/metrics/summetric.h
@@ -43,11 +43,11 @@ private:
std::vector<const AddendMetric*> _metricsToSum;
public:
- SumMetric(const String& name, Tags tags, const String& description, MetricSet* owner = 0);
- SumMetric(const SumMetric<AddendMetric>& other, std::vector<Metric::UP> & ownerList, MetricSet* owner = 0);
+ SumMetric(const String& name, Tags tags, const String& description, MetricSet* owner);
+ SumMetric(const SumMetric<AddendMetric>& other, std::vector<Metric::UP> & ownerList, MetricSet* owner);
~SumMetric();
- Metric* clone( std::vector<Metric::UP> &, CopyType, MetricSet* owner, bool includeUnused = false) const override;
+ Metric* clone( std::vector<Metric::UP> &, CopyType, MetricSet* owner, bool includeUnused) const override;
/**
* If you want to support sums of collections of metrics that may
diff --git a/metrics/src/vespa/metrics/valuemetric.h b/metrics/src/vespa/metrics/valuemetric.h
index 1be796ab1aa..0211fcf6974 100644
--- a/metrics/src/vespa/metrics/valuemetric.h
+++ b/metrics/src/vespa/metrics/valuemetric.h
@@ -76,13 +76,13 @@ class ValueMetric : public AbstractValueMetric {
bool checkFinite(AvgVal, std::false_type) { return true; }
public:
- ValueMetric(const ValueMetric<AvgVal, TotVal, SumOnAdd> &,
- CopyType, MetricSet *owner);
+ ValueMetric(const ValueMetric<AvgVal, TotVal, SumOnAdd> &, MetricSet *owner);
+ ValueMetric(const String &name, Tags dimensions, const String &description)
+ : ValueMetric(name, std::move(dimensions), description, nullptr)
+ {}
+ ValueMetric(const String &name, Tags dimensions, const String &description, MetricSet *owner);
- ValueMetric(const String &name, Tags dimensions,
- const String &description, MetricSet *owner = 0);
-
- ~ValueMetric();
+ ~ValueMetric() override;
MetricValueClass::UP getValues() const override {
return std::make_unique<Values>(_values.getValues());
@@ -90,9 +90,8 @@ public:
void unsetOnZeroValue() { _values.setFlag(UNSET_ON_ZERO_VALUE); }
- ValueMetric *clone(std::vector<Metric::UP> &, CopyType type, MetricSet *owner,
- bool /*includeUnused*/) const override {
- return new ValueMetric<AvgVal,TotVal,SumOnAdd>(*this, type, owner);
+ ValueMetric *clone(std::vector<Metric::UP> &, CopyType , MetricSet *owner, bool) const override {
+ return new ValueMetric<AvgVal,TotVal,SumOnAdd>(*this, owner);
}
ValueMetric & operator+=(const ValueMetric &);
diff --git a/metrics/src/vespa/metrics/valuemetric.hpp b/metrics/src/vespa/metrics/valuemetric.hpp
index 5e0ef95e9e5..2609a39f509 100644
--- a/metrics/src/vespa/metrics/valuemetric.hpp
+++ b/metrics/src/vespa/metrics/valuemetric.hpp
@@ -18,11 +18,9 @@ ValueMetric<AvgVal, TotVal, SumOnAdd>::ValueMetric(
{}
template<typename AvgVal, typename TotVal, bool SumOnAdd>
-ValueMetric<AvgVal, TotVal, SumOnAdd>::ValueMetric(
- const ValueMetric<AvgVal, TotVal, SumOnAdd>& other,
- CopyType copyType, MetricSet* owner)
+ValueMetric<AvgVal, TotVal, SumOnAdd>::ValueMetric(const ValueMetric<AvgVal, TotVal, SumOnAdd>& other, MetricSet* owner)
: AbstractValueMetric(other, owner),
- _values(other._values, copyType == CLONE ? other._values.size() : 1)
+ _values(other._values)
{}
template<typename AvgVal, typename TotVal, bool SumOnAdd>
@@ -66,11 +64,9 @@ void ValueMetric<AvgVal, TotVal, SumOnAdd>::dec(AvgVal decVal)
template<typename AvgVal, typename TotVal, bool SumOnAdd>
void
-ValueMetric<AvgVal, TotVal, SumOnAdd>::addToSnapshot(
- Metric& other, std::vector<Metric::UP> &) const
+ValueMetric<AvgVal, TotVal, SumOnAdd>::addToSnapshot(Metric& other, std::vector<Metric::UP> &) const
{
- ValueMetric<AvgVal, TotVal, SumOnAdd>& o(
- reinterpret_cast<ValueMetric<AvgVal, TotVal, SumOnAdd>&>(other));
+ auto & o = reinterpret_cast<ValueMetric<AvgVal, TotVal, SumOnAdd>&>(other);
if (_values.getValues()._count == 0) return; // Don't add if not set
o.add(_values.getValues(), false);
}
@@ -79,9 +75,7 @@ template<typename AvgVal, typename TotVal, bool SumOnAdd>
void
ValueMetric<AvgVal, TotVal, SumOnAdd>::addToPart(Metric& other) const
{
- ValueMetric<AvgVal, TotVal, SumOnAdd>& o(
- reinterpret_cast<ValueMetric<AvgVal, TotVal, SumOnAdd>&>(
- other));
+ auto & o = reinterpret_cast<ValueMetric<AvgVal, TotVal, SumOnAdd>&>(other);
o.add(_values.getValues(), SumOnAdd);
}
@@ -229,10 +223,8 @@ ValueMetric<AvgVal, TotVal, SumOnAdd>::getDoubleValue(stringref id) const
return getAverage();
if (id == "count") return static_cast<double>(values._count);
if (id == "total") return static_cast<double>(values._total);
- if (id == "min") return static_cast<double>(
- values._count > 0 ? values._min : 0);
- if (id == "max") return static_cast<double>(
- values._count > 0 ? values._max : 0);
+ if (id == "min") return static_cast<double>(values._count > 0 ? values._min : 0);
+ if (id == "max") return static_cast<double>(values._count > 0 ? values._max : 0);
throw vespalib::IllegalArgumentException(
"No value " + vespalib::string(id) + " in average metric.", VESPA_STRLOC);
}
@@ -242,9 +234,7 @@ void
ValueMetric<AvgVal, TotVal, SumOnAdd>::addMemoryUsage(MemoryConsumption& mc) const
{
++mc._valueMetricCount;
- mc._valueMetricValues += _values.getMemoryUsageAllocatedInternally();
- mc._valueMetricMeta += sizeof(ValueMetric<AvgVal, TotVal, SumOnAdd>)
- - sizeof(Metric);
+ mc._valueMetricMeta += sizeof(ValueMetric<AvgVal, TotVal, SumOnAdd>) - sizeof(Metric);
Metric::addMemoryUsage(mc);
}
diff --git a/metrics/src/vespa/metrics/valuemetricvalues.h b/metrics/src/vespa/metrics/valuemetricvalues.h
index 905d334214c..43a6e68c754 100644
--- a/metrics/src/vespa/metrics/valuemetricvalues.h
+++ b/metrics/src/vespa/metrics/valuemetricvalues.h
@@ -25,11 +25,25 @@ struct ValueMetricValues : MetricValueClass {
TotVal _total;
struct AtomicImpl {
- std::atomic<uint32_t> _count {0};
- std::atomic<AvgVal> _min {std::numeric_limits<AvgVal>::max()};
- std::atomic<AvgVal> _max {std::numeric_limits<AvgVal>::min()};
- std::atomic<AvgVal> _last {0};
- std::atomic<TotVal> _total {0};
+ AtomicImpl() noexcept
+ : _count(0),
+ _min(std::numeric_limits<AvgVal>::max()),
+ _max(std::numeric_limits<AvgVal>::min()),
+ _last(0),
+ _total(0)
+ {}
+ AtomicImpl(const AtomicImpl & rhs) noexcept
+ : _count(rhs._count.load(std::memory_order_relaxed)),
+ _min(rhs._min.load(std::memory_order_relaxed)),
+ _max(rhs._max.load(std::memory_order_relaxed)),
+ _last(rhs._last.load(std::memory_order_relaxed)),
+ _total(rhs._total.load(std::memory_order_relaxed))
+ {}
+ std::atomic<uint32_t> _count;
+ std::atomic<AvgVal> _min;
+ std::atomic<AvgVal> _max;
+ std::atomic<AvgVal> _last;
+ std::atomic<TotVal> _total;
};
ValueMetricValues();
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index 2590db9434b..4bfcea4acd2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -235,10 +235,11 @@ public class Nodes {
if ( ! zone.environment().isProduction() || zone.system().isCd())
return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
- var stateless = NodeList.copyOf(nodes).stateless();
- var stateful = NodeList.copyOf(nodes).stateful();
- var statefulToInactive = stateful.not().reusable();
- var statefulToDirty = stateful.reusable();
+ NodeList nodeList = NodeList.copyOf(nodes);
+ NodeList stateless = nodeList.stateless();
+ NodeList stateful = nodeList.stateful();
+ NodeList statefulToInactive = stateful.not().reusable();
+ NodeList statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
index 120b7f00b38..32fe9ba9f7b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/OsUpgrader.java
@@ -34,12 +34,12 @@ public abstract class OsUpgrader {
abstract void disableUpgrade(NodeType type);
/** Returns the number of upgrade slots available for given target */
- final int upgradeSlots(OsVersionTarget target, NodeList activeNodes) {
- if (!activeNodes.stream().allMatch(node -> node.type() == target.nodeType())) {
+ final int upgradeSlots(OsVersionTarget target, NodeList candidates) {
+ if (!candidates.stream().allMatch(node -> node.type() == target.nodeType())) {
throw new IllegalArgumentException("All node types must type of OS version target " + target.nodeType());
}
int max = target.nodeType() == NodeType.host ? maxActiveUpgrades.value() : 1;
- int upgrading = activeNodes.changingOsVersionTo(target.version()).size();
+ int upgrading = candidates.changingOsVersionTo(target.version()).size();
return Math.max(0, max - upgrading);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
index f6779d08fd7..212bf5ffb12 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RebuildingOsUpgrader.java
@@ -67,7 +67,7 @@ public class RebuildingOsUpgrader extends OsUpgrader {
// Rebuild hosts not containing stateful clusters with retiring nodes, up to rebuild limit
NodeList activeHosts = hostsOfTargetType.state(Node.State.active);
- int rebuildLimit = upgradeSlots(target, activeHosts);
+ int rebuildLimit = upgradeSlots(target, activeHosts.rebuilding(softRebuild));
List<Node> hostsToRebuild = new ArrayList<>(rebuildLimit);
NodeList candidates = activeHosts.not().rebuilding(softRebuild)
.osVersionIsBefore(target.version())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
index 4d98885b72c..de4915d60aa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
@@ -56,7 +56,7 @@ public class RetiringOsUpgrader extends OsUpgrader {
.osVersionIsBefore(target.version())
.matching(node -> canUpgradeAt(instant, node))
.byIncreasingOsVersion()
- .first(upgradeSlots(target, activeNodes));
+ .first(upgradeSlots(target, activeNodes.deprovisioning()));
}
/** Upgrade given host by retiring and deprovisioning it */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index 38df1725662..cebc185360a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -93,7 +93,6 @@ public class CuratorDb {
private void initZK() {
db.create(root);
- db.deleteRecursively(nodesPath); // TODO(mpolden): Remove before we start reading from this path
db.create(nodesPath);
// TODO(mpolden): Remove state paths after migration to nodesPath
for (Node.State state : Node.State.values())
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 4635d3ff525..9f9e6b85545 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -26,7 +26,6 @@ import java.util.List;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Supplier;
-import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -181,9 +180,16 @@ public class OsVersionsTest {
versions.setTarget(NodeType.host, version1, false);
versions.resumeUpgradeOf(NodeType.host, true);
- // One host is deprovisioning
+ // First batch of hosts starts deprovisioning
assertEquals(maxActiveUpgrades, hostNodes.get().deprovisioning().size());
+ // Deprovisioning is rescheduled if some other agent resets wantToRetire/wantToDeprovision
+ Node host0 = hostNodes.get().deprovisioning().first().get();
+ tester.patchNode(host0, (h) -> h.withWantToRetire(false, false, Agent.system,
+ tester.nodeRepository().clock().instant()));
+ versions.resumeUpgradeOf(NodeType.host, true);
+ assertTrue(hostNodes.get().deprovisioning().node(host0.hostname()).isPresent());
+
// Nothing happens on next resume as first batch has not completed upgrade
versions.resumeUpgradeOf(NodeType.host, true);
NodeList nodesDeprovisioning = hostNodes.get().deprovisioning();
diff --git a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
index 47800db91e9..6838f61967e 100644
--- a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
+++ b/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
@@ -114,8 +114,7 @@ const string word2 = "bar";
const DocumentIdT doc_id1 = 1;
const DocumentIdT doc_id2 = 2;
-Document::UP buildDocument(DocBuilder & doc_builder, int id,
- const string &word) {
+Document::UP buildDocument(DocBuilder & doc_builder, int id, const string &word) {
ostringstream ost;
ost << "id:ns:searchdocument::" << id;
auto doc = doc_builder.make_document(ost.str());
@@ -124,8 +123,7 @@ Document::UP buildDocument(DocBuilder & doc_builder, int id,
}
// Performs a search using a Searchable.
-void Test::testSearch(Searchable &source,
- const string &term, uint32_t doc_id)
+void Test::testSearch(Searchable &source, const string &term, uint32_t doc_id)
{
FakeRequestContext requestContext;
uint32_t fieldId = 0;
@@ -137,15 +135,13 @@ void Test::testSearch(Searchable &source,
Blueprint::UP result = source.createBlueprint(requestContext,
FieldSpecList().add(FieldSpec(field_name, 0, handle)), node);
result->fetchPostings(search::queryeval::ExecuteInfo::TRUE);
- SearchIterator::UP search_iterator =
- result->createSearch(*match_data, true);
+ SearchIterator::UP search_iterator = result->createSearch(*match_data, true);
search_iterator->initFullRange();
ASSERT_TRUE(search_iterator.get());
ASSERT_TRUE(search_iterator->seek(doc_id));
EXPECT_EQUAL(doc_id, search_iterator->getDocId());
search_iterator->unpack(doc_id);
- FieldPositionsIterator it =
- match_data->resolveTermField(handle)->getIterator();
+ FieldPositionsIterator it = match_data->resolveTermField(handle)->getIterator();
ASSERT_TRUE(it.valid());
EXPECT_EQUAL(1u, it.size());
EXPECT_EQUAL(1u, it.getPosition()); // All hits are at pos 1 in this index
@@ -178,13 +174,12 @@ void Test::requireThatMemoryIndexCanBeDumpedAndSearched() {
testSearch(memory_index, word2, doc_id2);
const string index_dir = "test_index";
- IndexBuilder index_builder(schema);
- index_builder.setPrefix(index_dir);
const uint32_t docIdLimit = memory_index.getDocIdLimit();
const uint64_t num_words = memory_index.getNumWords();
+ IndexBuilder index_builder(schema, index_dir, docIdLimit);
search::TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
- index_builder.open(docIdLimit, num_words, MockFieldLengthInspector(), tuneFileIndexing, fileHeaderContext);
+ index_builder.open(num_words, MockFieldLengthInspector(), tuneFileIndexing, fileHeaderContext);
memory_index.dump(index_builder);
index_builder.close();
@@ -198,12 +193,7 @@ void Test::requireThatMemoryIndexCanBeDumpedAndSearched() {
ASSERT_TRUE(fret1);
SelectorArray selector(fusionDocIdLimit, 0);
{
- Fusion fusion(schema,
- index_dir2,
- fusionInputs,
- selector,
- tuneFileIndexing,
- fileHeaderContext);
+ Fusion fusion(schema, index_dir2, fusionInputs, selector, tuneFileIndexing, fileHeaderContext);
bool fret2 = fusion.merge(sharedExecutor, std::make_shared<FlushToken>());
ASSERT_TRUE(fret2);
}
@@ -217,12 +207,7 @@ void Test::requireThatMemoryIndexCanBeDumpedAndSearched() {
ASSERT_TRUE(fret3);
SelectorArray selector2(fusionDocIdLimit, 1);
{
- Fusion fusion(schema,
- index_dir3,
- fusionInputs,
- selector2,
- tuneFileIndexing,
- fileHeaderContext);
+ Fusion fusion(schema, index_dir3, fusionInputs, selector2, tuneFileIndexing, fileHeaderContext);
bool fret4 = fusion.merge(sharedExecutor, std::make_shared<FlushToken>());
ASSERT_TRUE(fret4);
}
@@ -236,14 +221,8 @@ void Test::requireThatMemoryIndexCanBeDumpedAndSearched() {
ASSERT_TRUE(fret5);
SelectorArray selector3(fusionDocIdLimit, 0);
{
- Fusion fusion(schema,
- index_dir4,
- fusionInputs,
- selector3,
- tuneFileIndexing,
- fileHeaderContext);
- bool fret6 = fusion.merge(sharedExecutor,
- std::make_shared<FlushToken>());
+ Fusion fusion(schema, index_dir4, fusionInputs, selector3, tuneFileIndexing, fileHeaderContext);
+ bool fret6 = fusion.merge(sharedExecutor, std::make_shared<FlushToken>());
ASSERT_TRUE(fret6);
}
diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
index 5b9f35d74f2..7ee47a2ef91 100644
--- a/searchcore/src/tests/proton/index/fusionrunner_test.cpp
+++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
@@ -199,21 +199,15 @@ void Test::createIndex(const string &dir, uint32_t id, bool fusion) {
addDocument(doc_builder, memory_index, *_selector, id, id + 2, "baz");
addDocument(doc_builder, memory_index, *_selector, id, id + 3, "qux");
- const uint32_t docIdLimit =
- std::min(memory_index.getDocIdLimit(), _selector->getDocIdLimit());
- IndexBuilder index_builder(schema);
- index_builder.setPrefix(index_dir);
+ const uint32_t docIdLimit = std::min(memory_index.getDocIdLimit(), _selector->getDocIdLimit());
+ IndexBuilder index_builder(schema, index_dir, docIdLimit);
TuneFileIndexing tuneFileIndexing;
TuneFileAttributes tuneFileAttributes;
- index_builder.open(docIdLimit, memory_index.getNumWords(),
- MockFieldLengthInspector(),
- tuneFileIndexing,
- _fileHeaderContext);
+ index_builder.open(memory_index.getNumWords(), MockFieldLengthInspector(), tuneFileIndexing, _fileHeaderContext);
memory_index.dump(index_builder);
index_builder.close();
- _selector->extractSaveInfo(index_dir + "/selector")->
- save(tuneFileAttributes, _fileHeaderContext);
+ _selector->extractSaveInfo(index_dir + "/selector")->save(tuneFileAttributes, _fileHeaderContext);
}
set<uint32_t> readFusionIds(const string &dir) {
diff --git a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp
index b6153308fe7..219283dce04 100644
--- a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp
+++ b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp
@@ -36,10 +36,9 @@ MemoryIndexWrapper::flushToDisk(const vespalib::string &flushDir, uint32_t docId
{
const uint64_t numWords = _index.getNumWords();
_index.freeze(); // TODO(geirst): is this needed anymore?
- IndexBuilder indexBuilder(_index.getSchema());
- indexBuilder.setPrefix(flushDir);
+ IndexBuilder indexBuilder(_index.getSchema(), flushDir, docIdLimit);
SerialNumFileHeaderContext fileHeaderContext(_fileHeaderContext, serialNum);
- indexBuilder.open(docIdLimit, numWords, *this, _tuneFileIndexing, fileHeaderContext);
+ indexBuilder.open(numWords, *this, _tuneFileIndexing, fileHeaderContext);
_index.dump(indexBuilder);
indexBuilder.close();
}
diff --git a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
index 272d74cde2c..ca8eaa176a4 100644
--- a/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
+++ b/searchlib/src/tests/diskindex/fusion/fusion_test.cpp
@@ -25,7 +25,6 @@
#include <vespa/searchlib/util/filekit.h>
#include <vespa/vespalib/btree/btreenode.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/util/gate.h>
#include <vespa/vespalib/util/destructor_callbacks.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
@@ -363,11 +362,11 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
inv.invertDocument(12, *doc, {});
myPushDocument(inv);
- IndexBuilder ib(schema);
- vespalib::string dump2dir = prefix + "dump2";
- ib.setPrefix(dump2dir);
- uint32_t numDocs = 12 + 1;
- uint32_t numWords = fic.getNumUniqueWords();
+
+ const vespalib::string dump2dir = prefix + "dump2";
+ constexpr uint32_t numDocs = 12 + 1;
+ IndexBuilder ib(schema, dump2dir, numDocs);
+ const uint32_t numWords = fic.getNumUniqueWords();
MockFieldLengthInspector mock_field_length_inspector;
TuneFileIndexing tuneFileIndexing;
TuneFileSearch tuneFileSearch;
@@ -380,7 +379,7 @@ FusionTest::requireThatFusionIsWorking(const vespalib::string &prefix, bool dire
if (readmmap) {
tuneFileSearch._read.setWantMemoryMap();
}
- ib.open(numDocs, numWords, mock_field_length_inspector, tuneFileIndexing, fileHeaderContext);
+ ib.open(numWords, mock_field_length_inspector, tuneFileIndexing, fileHeaderContext);
fic.dump(ib);
ib.close();
@@ -475,8 +474,8 @@ void
FusionTest::make_simple_index(const vespalib::string &dump_dir, const IFieldLengthInspector &field_length_inspector)
{
FieldIndexCollection fic(_schema, field_length_inspector);
- uint32_t numDocs = 20;
- uint32_t numWords = 1000;
+ constexpr uint32_t numDocs = 20;
+ constexpr uint32_t numWords = 1000;
DocBuilder b(make_add_fields());
auto invertThreads = SequencedTaskExecutor::create(invert_executor, 2);
auto pushThreads = SequencedTaskExecutor::create(push_executor, 2);
@@ -487,11 +486,10 @@ FusionTest::make_simple_index(const vespalib::string &dump_dir, const IFieldLeng
inv.invertDocument(10, *doc10, {});
myPushDocument(inv);
- IndexBuilder ib(_schema);
+ IndexBuilder ib(_schema, dump_dir, numDocs);
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
- ib.setPrefix(dump_dir);
- ib.open(numDocs, numWords, field_length_inspector, tuneFileIndexing, fileHeaderContext);
+ ib.open(numWords, field_length_inspector, tuneFileIndexing, fileHeaderContext);
fic.dump(ib);
ib.close();
}
@@ -503,8 +501,7 @@ FusionTest::try_merge_simple_indexes(const vespalib::string &dump_dir, const std
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
SelectorArray selector(20, 0);
- Fusion fusion(_schema, dump_dir, sources, selector,
- tuneFileIndexing, fileHeaderContext);
+ Fusion fusion(_schema, dump_dir, sources, selector, tuneFileIndexing, fileHeaderContext);
fusion.set_force_small_merge_chunk(_force_small_merge_chunk);
return fusion.merge(executor, flush_token);
}
diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
index 42d55c8f769..5f8b6b2df48 100644
--- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
+++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp
@@ -86,7 +86,7 @@ public:
_firstDoc(true)
{}
- virtual void startWord(vespalib::stringref word) override {
+ void startWord(vespalib::stringref word) override {
assert(_insideField);
assert(!_insideWord);
if (!_firstWord)
@@ -96,14 +96,14 @@ public:
_insideWord = true;
}
- virtual void endWord() override {
+ void endWord() override {
assert(_insideWord);
_ss << "]";
_firstWord = false;
_insideWord = false;
}
- virtual void startField(uint32_t fieldId) override {
+ void startField(uint32_t fieldId) override {
assert(!_insideField);
if (!_firstField) _ss << ",";
_ss << "f=" << fieldId << "[";
@@ -111,7 +111,7 @@ public:
_insideField = true;
}
- virtual void endField() override {
+ void endField() override {
assert(_insideField);
assert(!_insideWord);
_ss << "]";
@@ -119,7 +119,7 @@ public:
_insideField = false;
}
- virtual void add_document(const DocIdAndFeatures &features) override {
+ void add_document(const DocIdAndFeatures &features) override {
assert(_insideWord);
if (!_firstDoc) {
_ss << ",";
@@ -875,11 +875,10 @@ TEST_F(FieldIndexCollectionTest, require_that_dumping_words_with_no_docs_to_inde
b.toStr());
}
{
- search::diskindex::IndexBuilder b(schema);
- b.setPrefix("dump");
+ search::diskindex::IndexBuilder b(schema, "dump", 5);
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
- b.open(5, 2, MockFieldLengthInspector(), tuneFileIndexing, fileHeaderContext);
+ b.open(2, MockFieldLengthInspector(), tuneFileIndexing, fileHeaderContext);
fic.dump(b);
b.close();
}
@@ -1224,14 +1223,10 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working)
EXPECT_TRUE(itr->isAtEnd());
}
{
- search::diskindex::IndexBuilder dib(_schema);
- dib.setPrefix("urldump");
+ search::diskindex::IndexBuilder dib(_schema, "urldump", 11);
TuneFileIndexing tuneFileIndexing;
DummyFileHeaderContext fileHeaderContext;
- dib.open(11, _fic.getNumUniqueWords(),
- MockFieldLengthInspector(),
- tuneFileIndexing,
- fileHeaderContext);
+ dib.open(_fic.getNumUniqueWords(), MockFieldLengthInspector(), tuneFileIndexing, fileHeaderContext);
_fic.dump(dib);
dib.close();
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
index 162ca512cec..f6316bd7db7 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
@@ -17,12 +17,8 @@ class BitVectorFileWrite : public BitVectorIdxFileWrite
{
private:
using Parent = BitVectorIdxFileWrite;
-
std::unique_ptr<Fast_BufferedFile> _datFile;
-public:
-
-private:
- uint32_t _datHeaderLen;
+ uint32_t _datHeaderLen;
public:
BitVectorFileWrite(const BitVectorFileWrite &) = delete;
@@ -30,7 +26,7 @@ public:
BitVectorFileWrite& operator=(const BitVectorFileWrite &) = delete;
BitVectorFileWrite& operator=(const BitVectorFileWrite &&) = delete;
BitVectorFileWrite(BitVectorKeyScope scope);
- ~BitVectorFileWrite();
+ ~BitVectorFileWrite() override;
void open(const vespalib::string &name, uint32_t docIdLimit,
const TuneFileSeqWrite &tuneFileWrite,
@@ -53,16 +49,17 @@ class BitVectorCandidate
{
private:
std::vector<uint32_t, vespalib::allocator_large<uint32_t>> _array;
- uint64_t _numDocs;
- uint32_t _bitVectorLimit;
- BitVector::UP _bv;
+ BitVector::UP _bv;
+ uint64_t _numDocs;
+ const uint32_t _bitVectorLimit;
+
public:
BitVectorCandidate(uint32_t docIdLimit, uint32_t bitVectorLimit)
: _array(),
+ _bv(BitVector::create(docIdLimit)),
_numDocs(0u),
- _bitVectorLimit(bitVectorLimit),
- _bv(BitVector::create(docIdLimit))
+ _bitVectorLimit(bitVectorLimit)
{
_array.reserve(_bitVectorLimit);
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
index d39270069eb..3442a610f4d 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
@@ -17,17 +17,17 @@ using vespalib::getLastErrorString;
using common::FileHeaderContext;
FieldWriter::FieldWriter(uint32_t docIdLimit, uint64_t numWordIds)
- : _wordNum(noWordNum()),
- _prevDocId(0),
- _dictFile(),
+ : _dictFile(),
_posoccfile(),
_bvc(docIdLimit),
_bmapfile(BitVectorKeyScope::PERFIELD_WORDS),
- _docIdLimit(docIdLimit),
- _numWordIds(numWordIds),
_prefix(),
+ _word(),
+ _numWordIds(numWordIds),
_compactWordNum(0),
- _word()
+ _wordNum(noWordNum()),
+ _prevDocId(0),
+ _docIdLimit(docIdLimit)
{
}
@@ -162,12 +162,6 @@ FieldWriter::close()
}
void
-FieldWriter::setFeatureParams(const PostingListParams &params)
-{
- _posoccfile->setFeatureParams(params);
-}
-
-void
FieldWriter::getFeatureParams(PostingListParams &params)
{
_posoccfile->getFeatureParams(params);
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
index bf62965719d..d541fd59be8 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
@@ -18,36 +18,11 @@ namespace search::diskindex {
* and by the memory index dump code to write a field to disk.
*/
class FieldWriter {
-private:
- uint64_t _wordNum;
- uint32_t _prevDocId;
-
- static uint64_t noWordNum() { return 0u; }
public:
-
- using DictionaryFileSeqWrite = index::DictionaryFileSeqWrite;
-
- using PostingListFileSeqWrite = index::PostingListFileSeqWrite;
using DocIdAndFeatures = index::DocIdAndFeatures;
using Schema = index::Schema;
- using PostingListCounts = index::PostingListCounts;
using PostingListParams = index::PostingListParams;
- std::unique_ptr<DictionaryFileSeqWrite> _dictFile;
- std::unique_ptr<PostingListFileSeqWrite> _posoccfile;
-
-private:
- BitVectorCandidate _bvc;
- BitVectorFileWrite _bmapfile;
- uint32_t _docIdLimit;
- uint64_t _numWordIds;
- vespalib::string _prefix;
- uint64_t _compactWordNum;
- vespalib::string _word;
-
- void flush();
-
-public:
FieldWriter(const FieldWriter &rhs) = delete;
FieldWriter(const FieldWriter &&rhs) = delete;
FieldWriter &operator=(const FieldWriter &rhs) = delete;
@@ -78,9 +53,25 @@ public:
bool close();
- void setFeatureParams(const PostingListParams &params);
void getFeatureParams(PostingListParams &params);
static void remove(const vespalib::string &prefix);
+private:
+ using DictionaryFileSeqWrite = index::DictionaryFileSeqWrite;
+ using PostingListFileSeqWrite = index::PostingListFileSeqWrite;
+ using PostingListCounts = index::PostingListCounts;
+ std::unique_ptr<DictionaryFileSeqWrite> _dictFile;
+ std::unique_ptr<PostingListFileSeqWrite> _posoccfile;
+ BitVectorCandidate _bvc;
+ BitVectorFileWrite _bmapfile;
+ vespalib::string _prefix;
+ vespalib::string _word;
+ const uint64_t _numWordIds;
+ uint64_t _compactWordNum;
+ uint64_t _wordNum;
+ uint32_t _prevDocId;
+ const uint32_t _docIdLimit;
+ void flush();
+ static uint64_t noWordNum() { return 0u; }
};
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
index f0964de23f5..6815db0ae0c 100644
--- a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
@@ -8,7 +8,6 @@
#include <vespa/searchlib/index/schemautil.h>
#include <vespa/searchlib/common/documentsummary.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/util/error.h>
#include <filesystem>
@@ -55,17 +54,13 @@ public:
class IndexBuilder::FieldHandle {
private:
- bool _valid;
- const Schema *_schema; // Ptr to allow being std::vector member
- uint32_t _fieldId;
- IndexBuilder *_builder; // Ptr to allow being std::vector member
- FileHandle _file;
-
+ const Schema &_schema;
+ IndexBuilder &_builder;
+ FileHandle _file;
+ const uint32_t _fieldId;
+ const bool _valid;
public:
- FieldHandle(const Schema &schema,
- uint32_t fieldId,
- IndexBuilder *builder);
-
+ FieldHandle(const Schema &schema, uint32_t fieldId, IndexBuilder & builder, bool valid);
~FieldHandle();
void new_word(vespalib::stringref word);
@@ -80,7 +75,6 @@ public:
const FileHeaderContext &fileHeaderContext);
void close();
- void setValid() { _valid = true; }
bool getValid() const { return _valid; }
uint32_t getIndexId() const { return _fieldId; }
};
@@ -124,8 +118,7 @@ FileHandle::close()
bool closeRes = _fieldWriter->close();
_fieldWriter.reset();
if (!closeRes) {
- LOG(error,
- "Could not close field writer");
+ LOG(error, "Could not close field writer");
ret = false;
}
}
@@ -133,14 +126,12 @@ FileHandle::close()
(void) ret;
}
-IndexBuilder::FieldHandle::FieldHandle(const Schema &schema,
- uint32_t fieldId,
- IndexBuilder *builder)
- : _valid(false),
- _schema(&schema),
- _fieldId(fieldId),
+IndexBuilder::FieldHandle::FieldHandle(const Schema &schema, uint32_t fieldId, IndexBuilder &builder, bool valid)
+ : _schema(schema),
_builder(builder),
- _file()
+ _file(),
+ _fieldId(fieldId),
+ _valid(valid)
{
}
@@ -162,7 +153,7 @@ IndexBuilder::FieldHandle::add_document(const index::DocIdAndFeatures &features)
const Schema::IndexField &
IndexBuilder::FieldHandle::getSchemaField()
{
- return _schema->getIndexField(_fieldId);
+ return _schema.getIndexField(_fieldId);
}
const vespalib::string &
@@ -174,7 +165,7 @@ IndexBuilder::FieldHandle::getName()
vespalib::string
IndexBuilder::FieldHandle::getDir()
{
- return _builder->appendToPrefix(getName());
+ return _builder.appendToPrefix(getName());
}
void
@@ -183,11 +174,8 @@ IndexBuilder::FieldHandle::open(uint32_t docIdLimit, uint64_t numWordIds,
const TuneFileSeqWrite &tuneFileWrite,
const FileHeaderContext &fileHeaderContext)
{
- _file.open(getDir(),
- SchemaUtil::IndexIterator(*_schema, getIndexId()),
- docIdLimit, numWordIds,
- field_length_info,
- tuneFileWrite, fileHeaderContext);
+ _file.open(getDir(), SchemaUtil::IndexIterator(_schema, getIndexId()),
+ docIdLimit, numWordIds, field_length_info, tuneFileWrite, fileHeaderContext);
}
void
@@ -196,43 +184,50 @@ IndexBuilder::FieldHandle::close()
_file.close();
}
-IndexBuilder::IndexBuilder(const Schema &schema)
- : index::IndexBuilder(schema),
- _currentField(nullptr),
- _curDocId(noDocId()),
- _lowestOKDocId(1u),
- _curWord(),
- _inWord(false),
- _lowestOKFieldId(0u),
- _fields(),
- _prefix(),
- _docIdLimit(0u),
- _numWordIds(0u),
- _schema(schema)
-{
+std::vector<IndexBuilder::FieldHandle>
+IndexBuilder::extractFields(const Schema &schema, IndexBuilder & builder) {
+ std::vector<IndexBuilder::FieldHandle> fields;
+ fields.reserve(schema.getNumIndexFields());
// TODO: Filter for text indexes
- for (uint32_t i = 0, ie = schema.getNumIndexFields(); i < ie; ++i) {
+ for (uint32_t i = 0; i < schema.getNumIndexFields(); ++i) {
const Schema::IndexField &iField = schema.getIndexField(i);
- FieldHandle fh(schema, i, this);
// Only know how to handle string index for now.
- if (iField.getDataType() == DataType::STRING) {
- fh.setValid();
- }
- _fields.push_back(fh);
+ bool valid = (iField.getDataType() == DataType::STRING);
+ fields.emplace_back(schema, i, builder, valid);
}
+ return fields;
+}
+
+IndexBuilder::IndexBuilder(const Schema &schema, vespalib::stringref prefix, uint32_t docIdLimit)
+ : index::IndexBuilder(schema),
+ _schema(schema),
+ _fields(extractFields(schema, *this)),
+ _prefix(prefix),
+ _curWord(),
+ _docIdLimit(docIdLimit),
+ _curFieldId(-1),
+ _lowestOKFieldId(0u),
+ _curDocId(noDocId()),
+ _inWord(false)
+{
}
IndexBuilder::~IndexBuilder() = default;
+IndexBuilder::FieldHandle &
+IndexBuilder::currentField() {
+ assert(_curFieldId >= 0);
+ assert(_curFieldId < int32_t(_fields.size()));
+ return _fields[_curFieldId];
+}
void
IndexBuilder::startField(uint32_t fieldId)
{
assert(_curDocId == noDocId());
- assert(_currentField == nullptr);
+ assert(_curFieldId == -1);
assert(fieldId < _fields.size());
assert(fieldId >= _lowestOKFieldId);
- _currentField = &_fields[fieldId];
- assert(_currentField != nullptr);
+ _curFieldId = fieldId;
}
void
@@ -240,47 +235,37 @@ IndexBuilder::endField()
{
assert(_curDocId == noDocId());
assert(!_inWord);
- assert(_currentField != nullptr);
- _lowestOKFieldId = _currentField->getIndexId() + 1;
- _currentField = nullptr;
+ _lowestOKFieldId = currentField().getIndexId() + 1;
+ _curFieldId = -1;
}
void
IndexBuilder::startWord(vespalib::stringref word)
{
- assert(_currentField != nullptr);
assert(!_inWord);
// TODO: Check sort order
_curWord = word;
_inWord = true;
- _currentField->new_word(word);
+ currentField().new_word(word);
}
void
IndexBuilder::endWord()
{
assert(_inWord);
- assert(_currentField != nullptr);
+ assert(_curFieldId != -1);
_inWord = false;
- _lowestOKDocId = 1u;
}
void
IndexBuilder::add_document(const index::DocIdAndFeatures &features)
{
assert(_inWord);
- assert(_currentField != nullptr);
- _currentField->add_document(features);
-}
-
-void
-IndexBuilder::setPrefix(vespalib::stringref prefix)
-{
- _prefix = prefix;
+ currentField().add_document(features);
}
vespalib::string
-IndexBuilder::appendToPrefix(vespalib::stringref name)
+IndexBuilder::appendToPrefix(vespalib::stringref name) const
{
if (_prefix.empty()) {
return name;
@@ -289,15 +274,13 @@ IndexBuilder::appendToPrefix(vespalib::stringref name)
}
void
-IndexBuilder::open(uint32_t docIdLimit, uint64_t numWordIds,
+IndexBuilder::open(uint64_t numWordIds,
const IFieldLengthInspector &field_length_inspector,
const TuneFileIndexing &tuneFileIndexing,
const FileHeaderContext &fileHeaderContext)
{
std::vector<uint32_t> indexes;
- _docIdLimit = docIdLimit;
- _numWordIds = numWordIds;
if (!_prefix.empty()) {
std::filesystem::create_directory(std::filesystem::path(_prefix));
}
@@ -307,10 +290,9 @@ IndexBuilder::open(uint32_t docIdLimit, uint64_t numWordIds,
continue;
}
std::filesystem::create_directory(std::filesystem::path(fh.getDir()));
- fh.open(docIdLimit, numWordIds,
+ fh.open(_docIdLimit, numWordIds,
field_length_inspector.get_field_length_info(fh.getName()),
- tuneFileIndexing._write,
- fileHeaderContext);
+ tuneFileIndexing._write, fileHeaderContext);
indexes.push_back(fh.getIndexId());
}
vespalib::string schemaFile = appendToPrefix("schema.txt");
diff --git a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.h b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.h
index c29bbe2e28f..99e39ff4998 100644
--- a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.h
+++ b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.h
@@ -21,37 +21,8 @@ class BitVectorCandidate;
*/
class IndexBuilder : public index::IndexBuilder {
public:
- class FieldHandle;
-
- using Schema = index::Schema;
-private:
- // Text fields
- FieldHandle *_currentField;
- uint32_t _curDocId;
- uint32_t _lowestOKDocId;
- vespalib::string _curWord;
- bool _inWord;
- uint32_t _lowestOKFieldId;
- std::vector<FieldHandle> _fields; // Defined fields.
- vespalib::string _prefix;
- uint32_t _docIdLimit;
- uint64_t _numWordIds;
-
- const Schema &_schema;
-
- static uint32_t noDocId() {
- return std::numeric_limits<uint32_t>::max();
- }
-
- static uint64_t noWordNumHigh() {
- return std::numeric_limits<uint64_t>::max();
- }
-
-public:
- using WordDocElementWordPosFeatures = index::WordDocElementWordPosFeatures;
-
// Schema argument must live until IndexBuilder has been deleted.
- IndexBuilder(const Schema &schema);
+ IndexBuilder(const index::Schema &schema, vespalib::stringref prefix, uint32_t docIdLimit);
~IndexBuilder() override;
void startField(uint32_t fieldId) override;
@@ -59,17 +30,35 @@ public:
void startWord(vespalib::stringref word) override;
void endWord() override;
void add_document(const index::DocIdAndFeatures &features) override;
+ vespalib::string appendToPrefix(vespalib::stringref name) const;
- void setPrefix(vespalib::stringref prefix);
-
- vespalib::string appendToPrefix(vespalib::stringref name);
-
- void open(uint32_t docIdLimit, uint64_t numWordIds,
- const index::IFieldLengthInspector &field_length_inspector,
+ void open(uint64_t numWordIds, const index::IFieldLengthInspector &field_length_inspector,
const TuneFileIndexing &tuneFileIndexing,
const common::FileHeaderContext &fileHandleContext);
void close();
+private:
+ class FieldHandle;
+ const index::Schema &_schema;
+ std::vector<FieldHandle> _fields;
+ const vespalib::string _prefix;
+ vespalib::string _curWord;
+ const uint32_t _docIdLimit;
+ int32_t _curFieldId;
+ uint32_t _lowestOKFieldId;
+ uint32_t _curDocId;
+ bool _inWord;
+
+ static std::vector<IndexBuilder::FieldHandle> extractFields(const index::Schema &schema, IndexBuilder & builder);
+
+ static uint32_t noDocId() {
+ return std::numeric_limits<uint32_t>::max();
+ }
+
+ static uint64_t noWordNumHigh() {
+ return std::numeric_limits<uint64_t>::max();
+ }
+ FieldHandle & currentField();
};
}
diff --git a/searchlib/src/vespa/searchlib/test/diskindex/testdiskindex.cpp b/searchlib/src/vespa/searchlib/test/diskindex/testdiskindex.cpp
index 4d7c67e1f82..b4ebfdc9629 100644
--- a/searchlib/src/vespa/searchlib/test/diskindex/testdiskindex.cpp
+++ b/searchlib/src/vespa/searchlib/test/diskindex/testdiskindex.cpp
@@ -36,16 +36,16 @@ struct Builder
{
search::diskindex::IndexBuilder _ib;
MockFieldLengthInspector _mock_field_length_inspector;
- TuneFileIndexing _tuneFileIndexing;
- DummyFileHeaderContext _fileHeaderContext;
- DocIdAndFeatures _features;
+ TuneFileIndexing _tuneFileIndexing;
+ DummyFileHeaderContext _fileHeaderContext;
+ DocIdAndFeatures _features;
Builder(const std::string &dir,
const Schema &s,
uint32_t docIdLimit,
uint64_t numWordIds,
bool directio)
- : _ib(s),
+ : _ib(s, dir, docIdLimit),
_tuneFileIndexing(),
_fileHeaderContext(),
_features()
@@ -54,14 +54,10 @@ struct Builder
_tuneFileIndexing._read.setWantDirectIO();
_tuneFileIndexing._write.setWantDirectIO();
}
- _ib.setPrefix(dir);
- _ib.open(docIdLimit, numWordIds, _mock_field_length_inspector, _tuneFileIndexing,
- _fileHeaderContext);
+ _ib.open(numWordIds, _mock_field_length_inspector, _tuneFileIndexing, _fileHeaderContext);
}
- void
- addDoc(uint32_t docId)
- {
+ void addDoc(uint32_t docId) {
_features.clear(docId);
_features.elements().emplace_back(0, 1, 1);
_features.elements().back().setNumOccs(1);
@@ -69,9 +65,7 @@ struct Builder
_ib.add_document(_features);
}
- void
- close()
- {
+ void close() {
_ib.close();
}
};
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp b/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
index 41a655564f5..11c428645a7 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
@@ -4,7 +4,6 @@
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/storage/common/content_bucket_space_repo.h>
#include <vespa/vespalib/util/exceptions.h>
-#include <cassert>
namespace storage {
@@ -30,13 +29,13 @@ ContentBucketDbMetrics::ContentBucketDbMetrics(metrics::MetricSet* owner)
ContentBucketDbMetrics::~ContentBucketDbMetrics() = default;
BucketSpaceMetrics::BucketSpaceMetrics(const vespalib::string& space_name, metrics::MetricSet* owner)
- : metrics::MetricSet("bucket_space", {{"bucketSpace", space_name}}, "", owner),
- buckets_total("buckets_total", {}, "Total number buckets present in the bucket space (ready + not ready)", this),
- docs("docs", {}, "Documents stored in the bucket space", this),
- bytes("bytes", {}, "Bytes stored across all documents in the bucket space", this),
- active_buckets("active_buckets", {}, "Number of active buckets in the bucket space", this),
- ready_buckets("ready_buckets", {}, "Number of ready buckets in the bucket space", this),
- bucket_db_metrics(this)
+ : metrics::MetricSet("bucket_space", {{"bucketSpace", space_name}}, "", owner),
+ buckets_total("buckets_total", {}, "Total number buckets present in the bucket space (ready + not ready)", this),
+ docs("docs", {}, "Documents stored in the bucket space", this),
+ bytes("bytes", {}, "Bytes stored across all documents in the bucket space", this),
+ active_buckets("active_buckets", {}, "Number of active buckets in the bucket space", this),
+ ready_buckets("ready_buckets", {}, "Number of ready buckets in the bucket space", this),
+ bucket_db_metrics(this)
{}
BucketSpaceMetrics::~BucketSpaceMetrics() = default;
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
index 42f9713c54e..d16822aa46e 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/impl/CliArguments.java
@@ -187,7 +187,7 @@ class CliArguments {
Compression compression() throws CliArgumentsException {
try {
- return stringValue(COMPRESSION).map(Compression::valueOf).orElse(none);
+ return stringValue(COMPRESSION).map(Compression::valueOf).orElse(null);
}
catch (IllegalArgumentException e) {
throw new CliArgumentsException("Invalid " + COMPRESSION + " argument: " + e.getMessage(), e);
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
index b2672b4ebf3..639aebf7c46 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/ApacheCluster.java
@@ -49,7 +49,7 @@ class ApacheCluster implements Cluster {
new BasicHeader("Vespa-Client-Version", Vespa.VERSION));
private final Header gzipEncodingHeader = new BasicHeader(HttpHeaders.CONTENT_ENCODING, "gzip");
private final RequestConfig requestConfig;
- private final boolean gzip;
+ private final Compression compression;
private int someNumber = 0;
private final ExecutorService dispatchExecutor = Executors.newFixedThreadPool(8, t -> new Thread(t, "request-dispatch-thread"));
@@ -60,7 +60,7 @@ class ApacheCluster implements Cluster {
for (URI endpoint : builder.endpoints)
endpoints.add(new Endpoint(createHttpClient(builder), endpoint));
this.requestConfig = createRequestConfig(builder);
- this.gzip = builder.compression == Compression.gzip;
+ this.compression = builder.compression;
}
@Override
@@ -89,7 +89,7 @@ class ApacheCluster implements Cluster {
wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get()));
if (wrapped.body() != null) {
byte[] body = wrapped.body();
- if (gzip) {
+ if (compression == Compression.gzip || compression == null && body.length > 512) {
request.setHeader(gzipEncodingHeader);
body = gzipped(body);
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
index 6886dc3d2b9..b364ba953eb 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/FeedClientBuilderImpl.java
@@ -51,7 +51,7 @@ public class FeedClientBuilderImpl implements FeedClientBuilder {
boolean benchmark = true;
boolean dryrun = false;
boolean speedTest = false;
- Compression compression = none;
+ Compression compression = null;
URI proxy;