summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java15
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java7
-rw-r--r--container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java16
-rw-r--r--container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsVisitor.java40
-rw-r--r--container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VisitorFactory.java2
-rw-r--r--container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java28
-rw-r--r--container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsVisitorTestCase.java21
-rw-r--r--document/src/tests/serialization/vespadocumentserializer_test.cpp18
-rw-r--r--document/src/vespa/document/serialization/vespadocumentserializer.cpp4
-rw-r--r--logd/src/apps/logd/main.cpp4
-rw-r--r--logd/src/logd/service.cpp2
-rw-r--r--logd/src/logd/service.h1
-rw-r--r--logd/src/tests/forward/forward.cpp8
-rw-r--r--metrics/src/tests/countmetrictest.cpp4
-rw-r--r--metrics/src/tests/loadmetrictest.cpp10
-rw-r--r--metrics/src/tests/metric_timer_test.cpp2
-rw-r--r--metrics/src/tests/metricmanagertest.cpp47
-rw-r--r--metrics/src/tests/metricsettest.cpp24
-rw-r--r--metrics/src/tests/metrictest.cpp14
-rw-r--r--metrics/src/tests/snapshottest.cpp34
-rw-r--r--metrics/src/tests/stresstest.cpp16
-rw-r--r--metrics/src/tests/summetrictest.cpp48
-rw-r--r--metrics/src/tests/valuemetrictest.cpp18
-rw-r--r--metrics/src/vespa/metrics/CMakeLists.txt4
-rw-r--r--metrics/src/vespa/metrics/countmetric.h9
-rw-r--r--metrics/src/vespa/metrics/countmetric.hpp11
-rw-r--r--metrics/src/vespa/metrics/jsonwriter.cpp4
-rw-r--r--metrics/src/vespa/metrics/loadmetric.hpp8
-rw-r--r--metrics/src/vespa/metrics/memoryconsumption.cpp18
-rw-r--r--metrics/src/vespa/metrics/memoryconsumption.h1
-rw-r--r--metrics/src/vespa/metrics/metric.cpp88
-rw-r--r--metrics/src/vespa/metrics/metric.h51
-rw-r--r--metrics/src/vespa/metrics/metricmanager.cpp21
-rw-r--r--metrics/src/vespa/metrics/metricmanager.h6
-rw-r--r--metrics/src/vespa/metrics/metricset.cpp23
-rw-r--r--metrics/src/vespa/metrics/metricset.h5
-rw-r--r--metrics/src/vespa/metrics/metricsnapshot.cpp9
-rw-r--r--metrics/src/vespa/metrics/metricsnapshot.h2
-rw-r--r--metrics/src/vespa/metrics/name_repo.cpp71
-rw-r--r--metrics/src/vespa/metrics/name_repo.h32
-rw-r--r--metrics/src/vespa/metrics/summetric.h2
-rw-r--r--metrics/src/vespa/metrics/summetric.hpp2
-rw-r--r--metrics/src/vespa/metrics/valuemetric.h7
-rw-r--r--metrics/src/vespa/metrics/valuemetric.hpp12
-rw-r--r--metrics/src/vespa/metrics/xmlwriter.cpp4
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelper.java177
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java248
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java58
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelper.java263
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java11
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelperTest.java324
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java85
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelperTest.java201
-rw-r--r--persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp17
-rw-r--r--searchcore/src/apps/fdispatch/fdispatch.cpp3
-rw-r--r--searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp2
-rw-r--r--searchcore/src/tests/proton/server/documentretriever_test.cpp38
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp132
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/executor_metrics.cpp8
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/legacy_attribute_metrics.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/legacy_documentdb_metrics.cpp92
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/legacy_proton_metrics.cpp22
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/legacy_sessionmanager_metrics.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/memory_usage_metrics.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/resource_usage_metrics.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/sessionmanager_metrics.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/trans_log_server_metrics.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/engine/transport_metrics.cpp16
-rw-r--r--slobrok/src/apps/slobrok/slobrok.cpp4
-rw-r--r--slobrok/src/vespa/slobrok/server/sbenv.cpp6
-rw-r--r--staging_vespalib/src/vespa/vespalib/metrics/handle.h3
-rw-r--r--staging_vespalib/src/vespa/vespalib/metrics/name_collection.h2
-rw-r--r--storage/src/tests/common/metricstest.cpp2
-rw-r--r--storage/src/tests/storageserver/statereportertest.cpp2
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp22
-rw-r--r--storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp18
-rw-r--r--storage/src/vespa/storage/distributor/distributormetricsset.cpp12
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.cpp55
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.h2
-rw-r--r--storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp30
-rw-r--r--storage/src/vespa/storage/distributor/visitormetricsset.cpp6
-rw-r--r--storage/src/vespa/storage/frameworkimpl/status/statuswebserver.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp77
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp7
-rw-r--r--storage/src/vespa/storage/storageserver/bouncer_metrics.cpp6
-rw-r--r--storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp8
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp3
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanagermetrics.cpp18
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.cpp32
-rw-r--r--storage/src/vespa/storage/storageserver/storagemetricsset.cpp32
-rw-r--r--storage/src/vespa/storage/visiting/visitormetrics.cpp16
-rw-r--r--storage/src/vespa/storage/visiting/visitorthreadmetrics.h24
-rw-r--r--storageframework/src/vespa/storageframework/defaultimplementation/component/componentregisterimpl.cpp2
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java9
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java9
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java9
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/Tensor.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/TensorType.java17
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java17
106 files changed, 1443 insertions, 1587 deletions
diff --git a/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java b/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
index 805fa52c105..8220f715b92 100644
--- a/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
+++ b/container-accesslogging/src/main/java/com/yahoo/container/logging/LogFileHandler.java
@@ -277,7 +277,7 @@ public class LogFileHandler extends StreamHandler {
}
}
- private void triggerCompression(File oldFile) {
+ private void triggerCompression(File oldFile) throws InterruptedException {
try {
String oldFileName = oldFile.getPath();
String gzippedFileName = oldFileName + ".gz";
@@ -285,12 +285,17 @@ public class LogFileHandler extends StreamHandler {
StringBuilder cmd = new StringBuilder("gzip");
cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName);
Process p = r.exec(cmd.toString());
- NativeIO nativeIO = new NativeIO();
- nativeIO.dropFileFromCache(oldFile); // Drop from cache in case somebody else has a reference to it preventing from dying quickly.
- oldFile.delete();
- nativeIO.dropFileFromCache(new File(gzippedFileName));
// Detonator pattern: Think of all the fun we can have if gzip isn't what we
// think it is, if it doesn't return, etc, etc
+
+ int retval = p.waitFor();
+ NativeIO nativeIO = new NativeIO();
+ nativeIO.dropFileFromCache(oldFile); // Drop from cache in case somebody else has a reference to it preventing from dying quickly.
+ if (retval == 0) {
+ oldFile.delete();
+ nativeIO.dropFileFromCache(new File(gzippedFileName));
+ }
+
} catch (IOException e) {
// little we can do...
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
index 116db906755..aa04f6ca20b 100644
--- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java
@@ -216,11 +216,16 @@ public class ClusterSearcher extends Searcher {
CacheParams cacheParams,
LegacyEmulationConfig emulConfig,
SummaryParameters docSumParams,
- DocumentdbInfoConfig documentdbInfoConfig) {
+ DocumentdbInfoConfig documentdbInfoConfig)
+ {
+ if (searchClusterConfig.searchdef().size() != 1) {
+ throw new IllegalArgumentException("Search clusters in streaming search shall only contain a single searchdefinition : " + searchClusterConfig.searchdef());
+ }
ClusterParams clusterParams = makeClusterParams(searchclusterIndex, emulConfig, 0);
VdsStreamingSearcher searcher = (VdsStreamingSearcher) VespaBackEndSearcher
.getSearcher("com.yahoo.vespa.streamingvisitors.VdsStreamingSearcher");
searcher.setSearchClusterConfigId(searchClusterConfig.rankprofiles().configid());
+ searcher.setDocumentType(searchClusterConfig.searchdef(0));
searcher.setStorageClusterRouteSpec(searchClusterConfig.storagecluster().routespec());
searcher.init(docSumParams, clusterParams, cacheParams, documentdbInfoConfig);
return searcher;
diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
index 53c170301fc..a6e4ddaafbc 100644
--- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
@@ -56,14 +56,18 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher {
private Route route;
/** The configId used to access the searchcluster. */
private String searchClusterConfigId = null;
+ private String documentType;
/** The route to the storage cluster. */
private String storageClusterRouteSpec = null;
- String getSearchClusterConfigId() { return searchClusterConfigId; }
- String getStorageClusterRouteSpec() { return storageClusterRouteSpec; }
+ private String getSearchClusterConfigId() { return searchClusterConfigId; }
+ private String getStorageClusterRouteSpec() { return storageClusterRouteSpec; }
public final void setSearchClusterConfigId(String clusterName) {
this.searchClusterConfigId = clusterName;
}
+ public final void setDocumentType(String documentType) {
+ this.documentType = documentType;
+ }
public final void setStorageClusterRouteSpec(String storageClusterRouteSpec) {
this.storageClusterRouteSpec = storageClusterRouteSpec;
@@ -71,8 +75,8 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher {
private static class VdsVisitorFactory implements VisitorFactory {
@Override
- public Visitor createVisitor(Query query, String searchCluster, Route route) {
- return new VdsVisitor(query, searchCluster, route);
+ public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType) {
+ return new VdsVisitor(query, searchCluster, route, documentType);
}
}
@@ -127,8 +131,8 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher {
"only one of these query parameters to be set: streaming.userid, streaming.groupname, " +
"streaming.selection"));
}
- query.trace("Routing to search cluster " + getSearchClusterConfigId(), 4);
- Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route);
+ query.trace("Routing to search cluster " + getSearchClusterConfigId() + " and document type " + documentType, 4);
+ Visitor visitor = visitorFactory.createVisitor(query, getSearchClusterConfigId(), route, documentType);
try {
visitor.doSearch();
} catch (ParseException e) {
diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsVisitor.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsVisitor.java
index 27da8c0cf9b..cfbea04a110 100644
--- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsVisitor.java
+++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsVisitor.java
@@ -105,16 +105,16 @@ class VdsVisitor extends VisitorDataHandler implements Visitor {
}
}
- public VdsVisitor(Query query, String searchCluster, Route route) {
- this.query = query;
- visitorSessionFactory = new MessageBusVisitorSessionFactory();
- setVisitorParameters(searchCluster, route);
+ public VdsVisitor(Query query, String searchCluster, Route route, String documentType) {
+ this(query, searchCluster, route, documentType, new MessageBusVisitorSessionFactory());
}
- public VdsVisitor(Query query, String searchCluster, Route route, VisitorSessionFactory visitorSessionFactory) {
+ public VdsVisitor(Query query, String searchCluster, Route route,
+ String documentType, VisitorSessionFactory visitorSessionFactory)
+ {
this.query = query;
this.visitorSessionFactory = visitorSessionFactory;
- setVisitorParameters(searchCluster, route);
+ setVisitorParameters(searchCluster, route, documentType);
}
private static int inferSessionTraceLevel(Query query) {
@@ -127,14 +127,28 @@ class VdsVisitor extends VisitorDataHandler implements Visitor {
return Math.max(query.getTraceLevel(), implicitLevel);
}
- private void setVisitorParameters(String searchCluster, Route route) {
- if (query.properties().getString(streamingUserid) != null) {
- params.setDocumentSelection("id.user==" + query.properties().getString(streamingUserid));
- } else if (query.properties().getString(streamingGroupname) != null) {
- params.setDocumentSelection("id.group==\"" + query.properties().getString(streamingGroupname) + "\"");
- } else if (query.properties().getString(streamingSelection) != null) {
- params.setDocumentSelection(query.properties().getString(streamingSelection));
+ private static String createSelectionString(String documentType, String selection) {
+ if ((selection == null) || selection.isEmpty()) return documentType;
+
+ StringBuilder sb = new StringBuilder(documentType);
+ sb.append(" and ( ").append(selection).append(" )");
+ return sb.toString();
+ }
+
+ private String createQuerySelectionString() {
+ String s = query.properties().getString(streamingUserid);
+ if (s != null) {
+ return "id.user==" + s;
}
+ s = query.properties().getString(streamingGroupname);
+ if (s != null) {
+ return "id.group==\"" + s + "\"";
+ }
+ return query.properties().getString(streamingSelection);
+ }
+
+ private void setVisitorParameters(String searchCluster, Route route, String documentType) {
+ params.setDocumentSelection(createSelectionString(documentType, createQuerySelectionString()));
params.setTimeoutMs(query.getTimeout()); // Per bucket visitor timeout
params.setSessionTimeoutMs(query.getTimeout());
params.setVisitorLibrary("searchvisitor");
diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VisitorFactory.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VisitorFactory.java
index 25d4bbc689d..9762d05bf45 100644
--- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VisitorFactory.java
+++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VisitorFactory.java
@@ -10,5 +10,5 @@ import com.yahoo.search.Query;
* @author <a href="mailto:ulf@yahoo-inc.com">Ulf Carlin</a>
*/
interface VisitorFactory {
- public Visitor createVisitor(Query query, String searchCluster, Route route);
+ public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType);
}
diff --git a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
index c8c653d67ed..0d58b10fde4 100644
--- a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
@@ -4,12 +4,14 @@ package com.yahoo.vespa.streamingvisitors;
import com.yahoo.config.subscription.ConfigGetter;
import com.yahoo.document.select.parser.TokenMgrException;
import com.yahoo.messagebus.routing.Route;
+import com.yahoo.prelude.fastsearch.CacheKey;
+import com.yahoo.prelude.fastsearch.CacheParams;
+import com.yahoo.prelude.fastsearch.ClusterParams;
import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig;
import com.yahoo.document.select.parser.ParseException;
import com.yahoo.fs4.QueryPacket;
-import com.yahoo.prelude.Ping;
-import com.yahoo.prelude.Pong;
-import com.yahoo.prelude.fastsearch.*;
+import com.yahoo.prelude.fastsearch.SummaryParameters;
+import com.yahoo.prelude.fastsearch.TimeoutException;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.result.Hit;
@@ -25,7 +27,11 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
/**
* @author <a href="mailto:ulf@yahoo-inc.com">Ulf Carlin</a>
@@ -35,18 +41,20 @@ public class VdsStreamingSearcherTestCase {
public static final String GROUPDOC_ID_PREFIX = "groupdoc:namespace:group1:userspecific";
private static class MockVisitor implements Visitor {
- private Query query;
- String searchCluster;
- Route route;
+ private final Query query;
+ final String searchCluster;
+ final Route route;
+ final String documentType;
int totalHitCount;
private final List<SearchResult.Hit> hits = new ArrayList<>();
private final Map<String, DocumentSummary.Summary> summaryMap = new HashMap<>();
private final List<Grouping> groupings = new ArrayList<>();
- MockVisitor(Query query, String searchCluster, Route route) {
+ MockVisitor(Query query, String searchCluster, Route route, String documentType) {
this.query = query;
this.searchCluster = searchCluster;
this.route = route;
+ this.documentType = documentType;
}
@Override
@@ -124,8 +132,8 @@ public class VdsStreamingSearcherTestCase {
private static class MockVisitorFactory implements VisitorFactory {
@Override
- public Visitor createVisitor(Query query, String searchCluster, Route route) {
- return new MockVisitor(query, searchCluster, route);
+ public Visitor createVisitor(Query query, String searchCluster, Route route, String documentType) {
+ return new MockVisitor(query, searchCluster, route, documentType);
}
}
diff --git a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsVisitorTestCase.java b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsVisitorTestCase.java
index 6964fd3b5e8..f75b78fd36f 100644
--- a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsVisitorTestCase.java
+++ b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsVisitorTestCase.java
@@ -224,17 +224,17 @@ public class VdsVisitorTestCase {
return query;
}
- private void verifyVisitorParameters(VisitorParameters params, QueryArguments qa, String searchCluster, Route route) {
+ private void verifyVisitorParameters(VisitorParameters params, QueryArguments qa, String searchCluster, String docType, Route route) {
//System.out.println("params="+params);
// Verify parameters based on properties
if (qa.userId != null) {
- assertEquals("id.user=="+qa.userId, params.getDocumentSelection());
+ assertEquals(docType + " and ( id.user=="+qa.userId + " )", params.getDocumentSelection());
} else if (qa.groupName != null) {
- assertEquals("id.group==\""+qa.groupName+"\"", params.getDocumentSelection());
- } else if (qa.selection != null) {
- assertEquals(qa.selection, params.getDocumentSelection());
+ assertEquals(docType + " and ( id.group==\""+qa.groupName+"\" )", params.getDocumentSelection());
+ } else if ((qa.selection == null) || qa.selection.isEmpty()) {
+ assertEquals(docType, params.getDocumentSelection());
} else {
- assertEquals("", params.getDocumentSelection());
+ assertEquals(docType + " and ( " + qa.selection + " )", params.getDocumentSelection());
}
assertEquals(qa.headersOnly, params.getVisitHeadersOnly());
assertEquals(qa.from, params.getFromTimestamp());
@@ -383,6 +383,9 @@ public class VdsVisitorTestCase {
qa.maxBucketsPerVisitor = 0;
qa.loadTypeName = null; // default loadTypeName, non-default priority
verifyVisitorOk(factory, qa, route, searchCluster);
+ qa.selection = "";
+
+ verifyVisitorOk(factory, qa, route, searchCluster);
// Userdoc and lots of non-default parameters
qa.setNonDefaults();
@@ -407,15 +410,15 @@ public class VdsVisitorTestCase {
}
private void verifyVisitorOk(MockVisitorSessionFactory factory, QueryArguments qa, Route route, String searchCluster) throws Exception {
- VdsVisitor visitor = new VdsVisitor(buildQuery(qa), searchCluster, route, factory);
+ VdsVisitor visitor = new VdsVisitor(buildQuery(qa), searchCluster, route, "mytype", factory);
visitor.doSearch();
- verifyVisitorParameters(factory.getParams(), qa, searchCluster, route);
+ verifyVisitorParameters(factory.getParams(), qa, searchCluster, "mytype", route);
supplyResults(visitor);
verifyResults(qa, visitor);
}
private void verifyVisitorFails(MockVisitorSessionFactory factory, QueryArguments qa, Route route, String searchCluster) throws Exception {
- VdsVisitor visitor = new VdsVisitor(buildQuery(qa), searchCluster, route, factory);
+ VdsVisitor visitor = new VdsVisitor(buildQuery(qa), searchCluster, route, "mytype", factory);
try {
visitor.doSearch();
assertTrue("Visitor did not fail", false);
diff --git a/document/src/tests/serialization/vespadocumentserializer_test.cpp b/document/src/tests/serialization/vespadocumentserializer_test.cpp
index c12a4966bcb..624cdeff04e 100644
--- a/document/src/tests/serialization/vespadocumentserializer_test.cpp
+++ b/document/src/tests/serialization/vespadocumentserializer_test.cpp
@@ -179,6 +179,11 @@ void serializeAndDeserialize(const T& value, nbostream &stream,
EXPECT_EQUAL(value, read_value);
}
stream.adjustReadPos(-serialized_size);
+ nbostream stream2;
+ VespaDocumentSerializer serializer2(stream2);
+ serializer2.write(read_value);
+ EXPECT_EQUAL(serialized_size, stream2.size());
+ EXPECT_EQUAL(0, memcmp(stream.peek() + start_size, stream2.peek(), serialized_size));
}
template<typename T>
@@ -514,6 +519,19 @@ void checkStructSerialization(const StructFieldValue &value,
EXPECT_EQUAL(60u, element2_size);
}
+TEST("requireThatEmptyStructCanBeSerialized") {
+ StructDataType structType(getStructDataType());
+ StructFieldValue value(structType);
+ nbostream stream;
+ serializeAndDeserialize(value, stream);
+ uint32_t data_size;
+ uint8_t compression_type;
+ uint8_t field_count;
+ stream >> data_size >> compression_type >> field_count;
+ EXPECT_EQUAL(0u, data_size);
+ EXPECT_EQUAL(0u, field_count);
+}
+
TEST("requireThatUncompressedStructFieldValueCanBeSerialized") {
StructDataType structType(getStructDataType());
StructFieldValue value = getStructFieldValue(structType);
diff --git a/document/src/vespa/document/serialization/vespadocumentserializer.cpp b/document/src/vespa/document/serialization/vespadocumentserializer.cpp
index a309fdd3500..28192cd6a4e 100644
--- a/document/src/vespa/document/serialization/vespadocumentserializer.cpp
+++ b/document/src/vespa/document/serialization/vespadocumentserializer.cpp
@@ -153,8 +153,8 @@ void VespaDocumentSerializer::write(const Document &value,
void VespaDocumentSerializer::visit(const StructFieldValue &value)
{
- if (!structNeedsReserialization(value)) {
- const StructFieldValue::Chunks & chunks = value.getChunks();
+ const StructFieldValue::Chunks & chunks = value.getChunks();
+ if (!structNeedsReserialization(value) && chunks.size() > 0) {
assert(chunks.size() == 1);
writeUnchanged(chunks[0]);
} else {
diff --git a/logd/src/apps/logd/main.cpp b/logd/src/apps/logd/main.cpp
index 62f1e48b233..78e23d7464f 100644
--- a/logd/src/apps/logd/main.cpp
+++ b/logd/src/apps/logd/main.cpp
@@ -83,8 +83,8 @@ int main(int, char**)
LOG(error, "stopping on error: %s", ex.what());
EV_STOPPING("logdemon", "fatal error");
return 1;
- } catch (...) {
- LOG(error, "unknown exception");
+ } catch (std::exception & ex) {
+ LOG(error, "unknown exception: %s", ex.what());
EV_STOPPING("logdemon", "unknown error");
return 1;
}
diff --git a/logd/src/logd/service.cpp b/logd/src/logd/service.cpp
index 88d17a93b44..0227daeb803 100644
--- a/logd/src/logd/service.cpp
+++ b/logd/src/logd/service.cpp
@@ -15,7 +15,7 @@ unsigned long Component::defFwd = (unsigned long)-1;
Component::Component(const std::string & servicename, const std::string & name)
: _isforwarding(defFwd), _lastseen(0.0), _lastpid(0),
_myservice(servicename), _myname(name),
- _logctlname(name.substr(name.find('.')))
+ _logctlname(name.substr(std::min(name.size(), name.find('.'))))
{
assert(ns_log::Logger::NUM_LOGLEVELS < 32);
}
diff --git a/logd/src/logd/service.h b/logd/src/logd/service.h
index 65f580ee54e..e27387c6cae 100644
--- a/logd/src/logd/service.h
+++ b/logd/src/logd/service.h
@@ -36,6 +36,7 @@ public:
void remember(double t, int p) { _lastseen = t; _lastpid = p; }
double lastSeen() const { return _lastseen; }
double lastPid() const { return _lastpid; }
+ const std::string & getLogCtlName() const { return _logctlname; }
};
class Service
diff --git a/logd/src/tests/forward/forward.cpp b/logd/src/tests/forward/forward.cpp
index a2f1a8c8b7a..a8da40555c7 100644
--- a/logd/src/tests/forward/forward.cpp
+++ b/logd/src/tests/forward/forward.cpp
@@ -68,4 +68,12 @@ TEST_FF("require that forwarder does not forward if not set", Forwarder(m), Forw
f2.verifyForward(false);
}
+TEST("test that non-dotted logctlname is correctly parsed") {
+ EXPECT_EQUAL("", Component("a.b.c", "x").getLogCtlName());
+}
+
+TEST("test that dotted logctlname is correctly parsed") {
+ EXPECT_EQUAL(".y", Component("a.b.c", "x.y").getLogCtlName());
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/metrics/src/tests/countmetrictest.cpp b/metrics/src/tests/countmetrictest.cpp
index 8ef551ead53..af94f78dfdf 100644
--- a/metrics/src/tests/countmetrictest.cpp
+++ b/metrics/src/tests/countmetrictest.cpp
@@ -20,7 +20,7 @@ CPPUNIT_TEST_SUITE_REGISTRATION(CountMetricTest);
void CountMetricTest::testLongCountMetric()
{
- LongCountMetric m("test", "tag", "description");
+ LongCountMetric m("test", {{"tag"}}, "description");
m.set(100);
CPPUNIT_ASSERT_EQUAL(uint64_t(100), m.getValue());
m.inc(5);
@@ -32,7 +32,7 @@ void CountMetricTest::testLongCountMetric()
m.reset();
CPPUNIT_ASSERT_EQUAL(uint64_t(0), m.getValue());
- LongCountMetric n("m2", "", "desc");
+ LongCountMetric n("m2", {}, "desc");
n.set(6);
CPPUNIT_ASSERT_EQUAL(uint64_t(6), n.getValue());
diff --git a/metrics/src/tests/loadmetrictest.cpp b/metrics/src/tests/loadmetrictest.cpp
index 2bdb47add3f..5cbec2c4fae 100644
--- a/metrics/src/tests/loadmetrictest.cpp
+++ b/metrics/src/tests/loadmetrictest.cpp
@@ -48,7 +48,7 @@ LoadMetricTest::testNormalUsage()
LoadTypeSetImpl loadTypes;
loadTypes.add(32, "foo").add(1000, "bar");
LoadMetric<LongValueMetric> metric(
- loadTypes, LongValueMetric("put", "", "Put"));
+ loadTypes, LongValueMetric("put", {}, "Put"));
}
namespace {
@@ -56,8 +56,8 @@ namespace {
LongAverageMetric metric;
MyMetricSet(MetricSet* owner = 0)
- : MetricSet("tick", "", "", owner),
- metric("tack", "", "", this)
+ : MetricSet("tick", {}, "", owner),
+ metric("tack", {}, "", this)
{ }
MetricSet* clone(std::vector<Metric::UP> &ownerList, CopyType copyType,
@@ -80,7 +80,7 @@ LoadMetricTest::testClone(Metric::CopyType copyType)
{
LoadTypeSetImpl loadTypes;
loadTypes.add(32, "foo").add(1000, "bar");
- MetricSet top("top", "", "");
+ MetricSet top("top", {}, "");
MyMetricSet myset;
LoadMetric<MyMetricSet> metric(loadTypes, myset, &top);
metric[loadTypes["foo"]].metric.addValue(5);
@@ -110,7 +110,7 @@ LoadMetricTest::testAdding()
{
LoadTypeSetImpl loadTypes;
loadTypes.add(32, "foo").add(1000, "bar");
- MetricSet top("top", "", "");
+ MetricSet top("top", {}, "");
MyMetricSet myset;
LoadMetric<MyMetricSet> metric(loadTypes, myset, &top);
metric[loadTypes["foo"]].metric.addValue(5);
diff --git a/metrics/src/tests/metric_timer_test.cpp b/metrics/src/tests/metric_timer_test.cpp
index 0721f2202ac..0087da713b9 100644
--- a/metrics/src/tests/metric_timer_test.cpp
+++ b/metrics/src/tests/metric_timer_test.cpp
@@ -27,7 +27,7 @@ using namespace std::literals::chrono_literals;
template <typename MetricType>
void MetricTimerTest::do_test_metric_timer_for_metric_type() {
MetricTimer timer;
- MetricType metric("foo", "", "");
+ MetricType metric("foo", {}, "");
std::this_thread::sleep_for(5ms); // Guaranteed to be monotonic time
timer.stop(metric);
// getDoubleValue() is present for both long and double metric types
diff --git a/metrics/src/tests/metricmanagertest.cpp b/metrics/src/tests/metricmanagertest.cpp
index f5d3a77d671..b89dbad738b 100644
--- a/metrics/src/tests/metricmanagertest.cpp
+++ b/metrics/src/tests/metricmanagertest.cpp
@@ -68,10 +68,10 @@ struct SubMetricSet : public MetricSet
SubMetricSet::SubMetricSet(const Metric::String & name, MetricSet* owner)
- : MetricSet(name, "sub", "sub desc", owner),
- val1("val1", "tag4 snaptest", "val1 desc", this),
- val2("val2", "tag5", "val2 desc", this),
- valsum("valsum", "tag4 snaptest", "valsum desc", this)
+ : MetricSet(name, {{"sub"}}, "sub desc", owner),
+ val1("val1", {{"tag4"},{"snaptest"}}, "val1 desc", this),
+ val2("val2", {{"tag5"}}, "val2 desc", this),
+ valsum("valsum", {{"tag4"},{"snaptest"}}, "valsum desc", this)
{
valsum.addMetricToSum(val1);
valsum.addMetricToSum(val2);
@@ -91,16 +91,17 @@ struct MultiSubMetricSet
};
MultiSubMetricSet::MultiSubMetricSet(MetricSet* owner)
- : set("multisub", "multisub", "", owner),
- count("count", "snaptest", "counter", &set),
- a("a", &set),
- b("b", &set),
- sum("sum", "snaptest", "", &set)
+ : set("multisub", {{"multisub"}}, "", owner),
+ count("count", {{"snaptest"}}, "counter", &set),
+ a("a", &set),
+ b("b", &set),
+ sum("sum", {{"snaptest"}}, "", &set)
{
sum.addMetricToSum(a);
sum.addMetricToSum(b);
}
- MultiSubMetricSet::~MultiSubMetricSet() { }
+
+MultiSubMetricSet::~MultiSubMetricSet() { }
struct TestMetricSet {
MetricSet set;
@@ -120,15 +121,15 @@ struct TestMetricSet {
};
TestMetricSet::TestMetricSet()
- : set("temp", "test", "desc of test set"),
- val1("val1", "tag1", "val1 desc", &set),
- val2("val2", "tag1 tag2", "val2 desc", &set),
- val3("val3", "tag2 tag3", "val3 desc", &set),
- val4("val4", "tag3", "val4 desc", &set),
- val5("val5", "tag2", "val5 desc", &set),
- val6("val6", "tag4 snaptest", "val6 desc", &set),
- val7("val7", "", "val7 desc", &set),
- val8("val8", "tag6", "val8 desc", &set),
+ : set("temp", {{"test"}}, "desc of test set"),
+ val1("val1", {{"tag1"}}, "val1 desc", &set),
+ val2("val2", {{"tag1"},{"tag2"}}, "val2 desc", &set),
+ val3("val3", {{"tag2"},{"tag3"}}, "val3 desc", &set),
+ val4("val4", {{"tag3"}}, "val4 desc", &set),
+ val5("val5", {{"tag2"}}, "val5 desc", &set),
+ val6("val6", {{"tag4"},{"snaptest"}}, "val6 desc", &set),
+ val7("val7", {}, "val7 desc", &set),
+ val8("val8", {{"tag6"}}, "val8 desc", &set),
val9("sub", &set),
val10(&set)
{ }
@@ -943,8 +944,8 @@ public:
CPPUNIT_ASSERT_EQUAL_MSG(_jsonText, dimensions.size(),
nthMetricDimensionCount(metricIndex));
for (auto& dim : dimensions) {
- CPPUNIT_ASSERT_EQUAL_MSG(_jsonText, std::string(dim.value),
- nthMetricDimension(metricIndex, dim.key));
+ CPPUNIT_ASSERT_EQUAL_MSG(_jsonText, std::string(dim.value()),
+ nthMetricDimension(metricIndex, dim.key()));
}
}
};
@@ -967,7 +968,7 @@ struct DimensionTestMetricSet : MetricSet
DimensionTestMetricSet::DimensionTestMetricSet(MetricSet* owner)
: MetricSet("temp", {{"foo", "megafoo"}, {"bar", "hyperbar"}}, "", owner),
- val1("val1", "tag1", "val1 desc", this),
+ val1("val1", {{"tag1"}}, "val1 desc", this),
val2("val2", {{"baz", "superbaz"}}, "val2 desc", this)
{ }
DimensionTestMetricSet::~DimensionTestMetricSet() { }
@@ -1039,7 +1040,7 @@ struct DimensionOverridableTestMetricSet : MetricSet
DimensionOverridableTestMetricSet(const std::string& dimValue,
MetricSet* owner = nullptr)
: MetricSet("temp", {{"foo", dimValue}}, "", owner),
- val("val", "", "val desc", this)
+ val("val", {}, "val desc", this)
{ }
};
diff --git a/metrics/src/tests/metricsettest.cpp b/metrics/src/tests/metricsettest.cpp
index 3deddfc2507..e4601a7b573 100644
--- a/metrics/src/tests/metricsettest.cpp
+++ b/metrics/src/tests/metricsettest.cpp
@@ -47,12 +47,12 @@ void
MetricSetTest::testNormalUsage()
{
// Set up some metrics to test..
- MetricSet set("a", "foo", "");
- DoubleValueMetric v1("c", "foo", "", &set);
- LongAverageMetric v2("b", "", "", &set);
- LongCountMetric v3("d", "bar", "", &set);
- MetricSet set2("e", "bar", "", &set);
- LongCountMetric v4("f", "foo", "", &set2);
+ MetricSet set("a", {{"foo"}}, "");
+ DoubleValueMetric v1("c", {{"foo"}}, "", &set);
+ LongAverageMetric v2("b", {}, "", &set);
+ LongCountMetric v3("d", {{"bar"}}, "", &set);
+ MetricSet set2("e", {{"bar"}}, "", &set);
+ LongCountMetric v4("f", {{"foo"}}, "", &set2);
// Give them some values
v1.addValue(4.2);
@@ -61,7 +61,7 @@ MetricSetTest::testNormalUsage()
v4.inc(3);
// Check that we can register through registerMetric function too.
- LongCountMetric v5("g", "", "");
+ LongCountMetric v5("g", {}, "");
set.registerMetric(v5);
v5.inc(3);
v5.dec();
@@ -77,7 +77,7 @@ MetricSetTest::testNormalUsage()
CPPUNIT_ASSERT(nonExistingCopy == 0);
// Check that paths are set
- MetricSet topSet("top", "", "");
+ MetricSet topSet("top", {}, "");
topSet.registerMetric(set);
CPPUNIT_ASSERT_EQUAL(vespalib::string("a"), set.getPath());
CPPUNIT_ASSERT_EQUAL(vespalib::string("a.c"), v1.getPath());
@@ -136,10 +136,10 @@ MetricSetTest::supportMultipleMetricsWithSameNameDifferentDimensions()
void
MetricSetTest::uniqueTargetMetricsAreAddedToMetricSet()
{
- MetricSet set1("a", "foo", "");
- LongCountMetric v1("wow", "foo", "", &set1);
- MetricSet set2("e", "bar", "");
- LongCountMetric v2("doge", "foo", "", &set2);
+ MetricSet set1("a", {{"foo"}}, "");
+ LongCountMetric v1("wow", {{"foo"}}, "", &set1);
+ MetricSet set2("e", {{"bar"}}, "");
+ LongCountMetric v2("doge", {{"foo"}}, "", &set2);
// Have to actually assign a value to metrics or they won't be carried over.
v1.inc();
diff --git a/metrics/src/tests/metrictest.cpp b/metrics/src/tests/metrictest.cpp
index 04f6b6e85b4..eb1f5943a87 100644
--- a/metrics/src/tests/metrictest.cpp
+++ b/metrics/src/tests/metrictest.cpp
@@ -41,7 +41,7 @@ void
MetricTest::testMetricsGetDimensionsAsPartOfMangledNameImpl()
{
MetricImpl m("test", {{"foo", "bar"}}, "description goes here");
- CPPUNIT_ASSERT_EQUAL(std::string("test{foo:bar}"), m.getMangledName());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test{foo:bar}"), m.getMangledName());
}
template <typename MetricImpl>
@@ -51,7 +51,7 @@ MetricTest::testMangledNameMayContainMultipleDimensionsImpl()
MetricImpl m("test",
{{"flarn", "yarn"}, {"foo", "bar"}},
"description goes here");
- CPPUNIT_ASSERT_EQUAL(std::string("test{flarn:yarn,foo:bar}"),
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test{flarn:yarn,foo:bar}"),
m.getMangledName());
}
@@ -88,7 +88,7 @@ MetricTest::mangledNameListsDimensionsInLexicographicOrder()
LongValueMetric m("test",
{{"xyz", "bar"}, {"abc", "foo"}, {"def", "baz"}},
"");
- CPPUNIT_ASSERT_EQUAL(std::string("test{abc:foo,def:baz,xyz:bar}"),
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test{abc:foo,def:baz,xyz:bar}"),
m.getMangledName());
}
@@ -96,15 +96,15 @@ void
MetricTest::manglingDoesNotChangeOriginalMetricName()
{
LongValueMetric m("test", {{"foo", "bar"}}, "");
- CPPUNIT_ASSERT_EQUAL(std::string("test"), m.getName());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test"), m.getName());
}
void
MetricTest::legacyTagsDoNotCreateMangledName()
{
- LongValueMetric m("test", "foo bar", "");
- CPPUNIT_ASSERT_EQUAL(std::string("test"), m.getName());
- CPPUNIT_ASSERT_EQUAL(std::string("test"), m.getMangledName());
+ LongValueMetric m("test", {{"foo"},{"bar"}}, "");
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test"), m.getName());
+ CPPUNIT_ASSERT_EQUAL(vespalib::string("test"), m.getMangledName());
}
} // metrics
diff --git a/metrics/src/tests/snapshottest.cpp b/metrics/src/tests/snapshottest.cpp
index 75af75999d1..df9dd0a1e64 100644
--- a/metrics/src/tests/snapshottest.cpp
+++ b/metrics/src/tests/snapshottest.cpp
@@ -45,21 +45,21 @@ struct SubSubMetricSet : public MetricSet {
};
SubSubMetricSet::SubSubMetricSet(vespalib::stringref name, const LoadTypeSet& loadTypes_, MetricSet* owner)
- : MetricSet(name, "", "", owner),
+ : MetricSet(name, {}, "", owner),
loadTypes(loadTypes_),
incVal(1),
- count1("count1", "", "", this),
- count2("count2", "", "", this),
- loadCount(loadTypes, LongCountMetric("loadCount", "", ""), this),
- countSum("countSum", "", "", this),
- value1("value1", "", "", this),
- value2("value2", "", "", this),
- loadValue(loadTypes, DoubleValueMetric("loadValue", "", ""), this),
- valueSum("valueSum", "", "", this),
- average1("average1", "", "", this),
- average2("average2", "", "", this),
- loadAverage(loadTypes, DoubleAverageMetric("loadAverage", "", ""), this),
- averageSum("averageSum", "", "", this)
+ count1("count1", {}, "", this),
+ count2("count2", {}, "", this),
+ loadCount(loadTypes, LongCountMetric("loadCount", {}, ""), this),
+ countSum("countSum", {}, "", this),
+ value1("value1", {}, "", this),
+ value2("value2", {}, "", this),
+ loadValue(loadTypes, DoubleValueMetric("loadValue", {}, ""), this),
+ valueSum("valueSum", {}, "", this),
+ average1("average1", {}, "", this),
+ average2("average2", {}, "", this),
+ loadAverage(loadTypes, DoubleAverageMetric("loadAverage", {}, ""), this),
+ averageSum("averageSum", {}, "", this)
{
countSum.addMetricToSum(count1);
countSum.addMetricToSum(count2);
@@ -120,12 +120,12 @@ struct SubMetricSet : public MetricSet {
};
SubMetricSet::SubMetricSet(vespalib::stringref name, const LoadTypeSet& loadTypes_, MetricSet* owner)
- : MetricSet(name, "", "", owner),
+ : MetricSet(name, {}, "", owner),
loadTypes(loadTypes_),
set1("set1", loadTypes, this),
set2("set2", loadTypes, this),
loadSet(loadTypes, *std::unique_ptr<SubSubMetricSet>(new SubSubMetricSet("loadSet", loadTypes)), this),
- setSum("setSum", "", "", this)
+ setSum("setSum", {}, "", this)
{
setSum.addMetricToSum(set1);
setSum.addMetricToSum(set2);
@@ -167,12 +167,12 @@ struct TestMetricSet : public MetricSet {
TestMetricSet::TestMetricSet(vespalib::stringref name, const LoadTypeSet& loadTypes_, MetricSet* owner)
- : MetricSet(name, "", "", owner),
+ : MetricSet(name, {}, "", owner),
loadTypes(loadTypes_),
set1("set1", loadTypes, this),
set2("set2", loadTypes, this),
loadSet(loadTypes, *std::unique_ptr<SubMetricSet>(new SubMetricSet("loadSet", loadTypes)), this),
- setSum("setSum", "", "", this)
+ setSum("setSum", {}, "", this)
{
setSum.addMetricToSum(set1);
setSum.addMetricToSum(set2);
diff --git a/metrics/src/tests/stresstest.cpp b/metrics/src/tests/stresstest.cpp
index 0c06ca70aec..e20045ad2d4 100644
--- a/metrics/src/tests/stresstest.cpp
+++ b/metrics/src/tests/stresstest.cpp
@@ -38,13 +38,13 @@ struct InnerMetricSet : public MetricSet {
};
InnerMetricSet::InnerMetricSet(const char* name, const LoadTypeSet& lt, MetricSet* owner)
- : MetricSet(name, "", "", owner),
+ : MetricSet(name, {}, "", owner),
_loadTypes(lt),
- _count("count", "", "", this),
- _value1("value1", "", "", this),
- _value2("value2", "", "", this),
- _valueSum("valuesum", "", "", this),
- _load(lt, LongAverageMetric("load", "", ""), this)
+ _count("count", {}, "", this),
+ _value1("value1", {}, "", this),
+ _value2("value2", {}, "", this),
+ _valueSum("valuesum", {}, "", this),
+ _load(lt, LongAverageMetric("load", {}, ""), this)
{
_valueSum.addMetricToSum(_value1);
_valueSum.addMetricToSum(_value2);
@@ -75,10 +75,10 @@ struct OuterMetricSet : public MetricSet {
};
OuterMetricSet::OuterMetricSet(const LoadTypeSet& lt, MetricSet* owner)
- : MetricSet("outer", "", "", owner),
+ : MetricSet("outer", {}, "", owner),
_inner1("inner1", lt, this),
_inner2("inner2", lt, this),
- _innerSum("innersum", "", "", this),
+ _innerSum("innersum", {}, "", this),
_tmp("innertmp", lt, 0),
_load(lt, _tmp, this)
{
diff --git a/metrics/src/tests/summetrictest.cpp b/metrics/src/tests/summetrictest.cpp
index 8cc622d622a..32c5ee2c309 100644
--- a/metrics/src/tests/summetrictest.cpp
+++ b/metrics/src/tests/summetrictest.cpp
@@ -25,11 +25,11 @@ CPPUNIT_TEST_SUITE_REGISTRATION(SumMetricTest);
void
SumMetricTest::testLongCountMetric()
{
- MetricSet parent("parent", "", "");
- SumMetric<LongCountMetric> sum("foo", "", "foodesc", &parent);
+ MetricSet parent("parent", {}, "");
+ SumMetric<LongCountMetric> sum("foo", {}, "foodesc", &parent);
- LongCountMetric v1("ff", "", "", &parent);
- LongCountMetric v2("aa", "", "", &parent);
+ LongCountMetric v1("ff", {}, "", &parent);
+ LongCountMetric v2("aa", {}, "", &parent);
sum.addMetricToSum(v1);
sum.addMetricToSum(v2);
@@ -46,11 +46,11 @@ SumMetricTest::testLongCountMetric()
void
SumMetricTest::testAverageMetric() {
- MetricSet parent("parent", "", "");
- SumMetric<LongAverageMetric> sum("foo", "", "foodesc", &parent);
+ MetricSet parent("parent", {}, "");
+ SumMetric<LongAverageMetric> sum("foo", {}, "foodesc", &parent);
- LongAverageMetric v1("ff", "", "", &parent);
- LongAverageMetric v2("aa", "", "", &parent);
+ LongAverageMetric v1("ff", {}, "", &parent);
+ LongAverageMetric v2("aa", {}, "", &parent);
sum.addMetricToSum(v1);
sum.addMetricToSum(v2);
@@ -69,15 +69,15 @@ SumMetricTest::testAverageMetric() {
void
SumMetricTest::testMetricSet() {
- MetricSet parent("parent", "", "");
- SumMetric<MetricSet> sum("foo", "", "bar", &parent);
+ MetricSet parent("parent", {}, "");
+ SumMetric<MetricSet> sum("foo", {}, "bar", &parent);
- MetricSet set1("a", "", "", &parent);
- MetricSet set2("b", "", "", &parent);
- LongValueMetric v1("c", "", "", &set1);
- LongValueMetric v2("d", "", "", &set2);
- LongCountMetric v3("e", "", "", &set1);
- LongCountMetric v4("f", "", "", &set2);
+ MetricSet set1("a", {}, "", &parent);
+ MetricSet set2("b", {}, "", &parent);
+ LongValueMetric v1("c", {}, "", &set1);
+ LongValueMetric v2("d", {}, "", &set2);
+ LongCountMetric v3("e", {}, "", &set1);
+ LongCountMetric v4("f", {}, "", &set2);
sum.addMetricToSum(set1);
sum.addMetricToSum(set2);
@@ -100,12 +100,12 @@ SumMetricTest::testMetricSet() {
void
SumMetricTest::testRemove()
{
- MetricSet parent("parent", "", "");
- SumMetric<LongCountMetric> sum("foo", "", "foodesc", &parent);
+ MetricSet parent("parent", {}, "");
+ SumMetric<LongCountMetric> sum("foo", {}, "foodesc", &parent);
- LongCountMetric v1("ff", "", "", &parent);
- LongCountMetric v2("aa", "", "", &parent);
- LongCountMetric v3("zz", "", "", &parent);
+ LongCountMetric v1("ff", {}, "", &parent);
+ LongCountMetric v2("aa", {}, "", &parent);
+ LongCountMetric v3("zz", {}, "", &parent);
sum.addMetricToSum(v1);
sum.addMetricToSum(v2);
@@ -125,9 +125,9 @@ void
SumMetricTest::testStartValue()
{
MetricSnapshot snapshot("active");
- SumMetric<LongValueMetric> sum("foo", "", "foodesc",
+ SumMetric<LongValueMetric> sum("foo", {}, "foodesc",
&snapshot.getMetrics());
- LongValueMetric start("start", "", "", 0);
+ LongValueMetric start("start", {}, "", 0);
start.set(50);
sum.setStartValue(start);
@@ -138,7 +138,7 @@ SumMetricTest::testStartValue()
copy.recreateSnapshot(snapshot.getMetrics(), true);
snapshot.addToSnapshot(copy, 100);
- LongValueMetric value("value", "", "", &snapshot.getMetrics());
+ LongValueMetric value("value", {}, "", &snapshot.getMetrics());
sum.addMetricToSum(value);
value.set(10);
diff --git a/metrics/src/tests/valuemetrictest.cpp b/metrics/src/tests/valuemetrictest.cpp
index e8c144c96aa..da730b7c8a9 100644
--- a/metrics/src/tests/valuemetrictest.cpp
+++ b/metrics/src/tests/valuemetrictest.cpp
@@ -46,7 +46,7 @@ CPPUNIT_TEST_SUITE_REGISTRATION(ValueMetricTest);
void ValueMetricTest::testDoubleValueMetric()
{
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
m.addValue(100);
ASSERT_AVERAGE(m, 100, 100, 100, 1, 100);
m.addValue(100);
@@ -58,7 +58,7 @@ void ValueMetricTest::testDoubleValueMetric()
m.reset();
ASSERT_AVERAGE(m, 0, 0, 0, 0, 0);
- DoubleValueMetric n("m2", "", "desc");
+ DoubleValueMetric n("m2", {}, "desc");
n.addValue(60);
ASSERT_AVERAGE(n, 60, 60, 60, 1, 60);
@@ -94,7 +94,7 @@ void ValueMetricTest::testDoubleValueMetric()
void
ValueMetricTest::testDoubleValueMetricNotUpdatedOnNaN()
{
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
m.addValue(std::numeric_limits<double>::quiet_NaN());
CPPUNIT_ASSERT_EQUAL(std::string(), m.toString());
@@ -111,7 +111,7 @@ ValueMetricTest::testDoubleValueMetricNotUpdatedOnNaN()
void
ValueMetricTest::testDoubleValueMetricNotUpdatedOnInfinity()
{
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
m.addValue(std::numeric_limits<double>::infinity());
CPPUNIT_ASSERT_EQUAL(std::string(), m.toString());
@@ -127,7 +127,7 @@ ValueMetricTest::testDoubleValueMetricNotUpdatedOnInfinity()
void ValueMetricTest::testLongValueMetric()
{
- LongValueMetric m("test", "tag", "description");
+ LongValueMetric m("test", {{"tag"}}, "description");
m.addValue(100);
ASSERT_AVERAGE(m, 100, 100, 100, 1, 100);
m.addValue(100);
@@ -139,7 +139,7 @@ void ValueMetricTest::testLongValueMetric()
m.reset();
ASSERT_AVERAGE(m, 0, 0, 0, 0, 0);
- LongValueMetric n("m2", "", "desc");
+ LongValueMetric n("m2", {}, "desc");
n.addValue(60);
ASSERT_AVERAGE(n, 60, 60, 60, 1, 60);
@@ -174,7 +174,7 @@ void ValueMetricTest::testLongValueMetric()
void ValueMetricTest::testSmallAverage()
{
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
m.addValue(0.0001);
m.addValue(0.0002);
m.addValue(0.0003);
@@ -186,7 +186,7 @@ void ValueMetricTest::testSmallAverage()
}
void ValueMetricTest::testAddValueBatch() {
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
m.addValueBatch(100, 3, 80, 120);
ASSERT_AVERAGE(m, 100, 80, 120, 3, 100);
m.addValueBatch(123, 0, 12, 1234);
@@ -222,7 +222,7 @@ namespace {
void ValueMetricTest::testJson() {
MetricManager mm;
- DoubleValueMetric m("test", "tag", "description");
+ DoubleValueMetric m("test", {{"tag"}}, "description");
mm.registerMetric(mm.getMetricLock(), m);
vespalib::string expected("'\n"
diff --git a/metrics/src/vespa/metrics/CMakeLists.txt b/metrics/src/vespa/metrics/CMakeLists.txt
index 6eae8cd75e4..147e88cd61c 100644
--- a/metrics/src/vespa/metrics/CMakeLists.txt
+++ b/metrics/src/vespa/metrics/CMakeLists.txt
@@ -5,8 +5,8 @@ vespa_add_library(metrics
countmetricvalues.cpp
jsonwriter.cpp
loadmetric.cpp
- metric.cpp
memoryconsumption.cpp
+ metric.cpp
metricmanager.cpp
metricset.cpp
metricsnapshot.cpp
@@ -14,12 +14,14 @@ vespa_add_library(metrics
metricvalueset.cpp
namehash.cpp
printutils.cpp
+ name_repo.cpp
state_api_adapter.cpp
summetric.cpp
textwriter.cpp
valuemetric.cpp
valuemetricvalues.cpp
xmlwriter.cpp
+
INSTALL lib64
DEPENDS
)
diff --git a/metrics/src/vespa/metrics/countmetric.h b/metrics/src/vespa/metrics/countmetric.h
index 912fc52449b..675b5698049 100644
--- a/metrics/src/vespa/metrics/countmetric.h
+++ b/metrics/src/vespa/metrics/countmetric.h
@@ -28,12 +28,6 @@ struct AbstractCountMetric : public Metric {
virtual bool inUse(const MetricValueClass& v) const = 0;
protected:
- AbstractCountMetric(const String& name, const String& tags,
- const String& description, MetricSet* owner = 0)
- : Metric(name, tags, description, owner)
- {
- }
-
AbstractCountMetric(const String& name, Tags dimensions,
const String& description, MetricSet* owner = 0)
: Metric(name, std::move(dimensions), description, owner)
@@ -60,9 +54,6 @@ class CountMetric : public AbstractCountMetric
bool logIfUnset() const { return _values.hasFlag(LOG_IF_UNSET); }
public:
- CountMetric(const String& name, const String& tags,
- const String& description, MetricSet* owner = 0);
-
CountMetric(const String& name, Tags dimensions,
const String& description, MetricSet* owner = 0);
diff --git a/metrics/src/vespa/metrics/countmetric.hpp b/metrics/src/vespa/metrics/countmetric.hpp
index e150ce0d632..810ba7e371d 100644
--- a/metrics/src/vespa/metrics/countmetric.hpp
+++ b/metrics/src/vespa/metrics/countmetric.hpp
@@ -8,15 +8,6 @@
namespace metrics {
template <typename T, bool SumOnAdd>
-CountMetric<T, SumOnAdd>::CountMetric(const String& name, const String& tags,
- const String& desc, MetricSet* owner)
- : AbstractCountMetric(name, tags, desc, owner),
- _values()
-{
- _values.setFlag(LOG_IF_UNSET);
-}
-
-template <typename T, bool SumOnAdd>
CountMetric<T, SumOnAdd>::CountMetric(const String& name, Tags dimensions,
const String& desc, MetricSet* owner)
: AbstractCountMetric(name, std::move(dimensions), desc, owner),
@@ -160,7 +151,7 @@ CountMetric<T, SumOnAdd>::print(std::ostream& out, bool verbose,
(void) indent;
Values values(_values.getValues());
if (values._value == 0 && !verbose) return;
- out << this->_name << (SumOnAdd ? " count=" : " value=") << values._value;
+ out << this->getName() << (SumOnAdd ? " count=" : " value=") << values._value;
if (SumOnAdd) {
if (secondsPassed != 0) {
double avgDiff = values._value / ((double) secondsPassed);
diff --git a/metrics/src/vespa/metrics/jsonwriter.cpp b/metrics/src/vespa/metrics/jsonwriter.cpp
index 6ea585f0635..d890d725c8a 100644
--- a/metrics/src/vespa/metrics/jsonwriter.cpp
+++ b/metrics/src/vespa/metrics/jsonwriter.cpp
@@ -70,8 +70,8 @@ void
JsonWriter::writeDimensions(const DimensionSet& dimensions)
{
for (const auto& dimension : dimensions) {
- if (!dimension.key.empty() && !dimension.value.empty()) {
- _stream << dimension.key << dimension.value;
+ if (!dimension.key().empty() && !dimension.value().empty()) {
+ _stream << dimension.key() << dimension.value();
}
}
}
diff --git a/metrics/src/vespa/metrics/loadmetric.hpp b/metrics/src/vespa/metrics/loadmetric.hpp
index 0a66f985867..ce93c761a05 100644
--- a/metrics/src/vespa/metrics/loadmetric.hpp
+++ b/metrics/src/vespa/metrics/loadmetric.hpp
@@ -10,9 +10,9 @@ namespace metrics {
template<typename MetricType>
LoadMetric<MetricType>::LoadMetric(const LoadTypeSet& loadTypes, const MetricType& metric, MetricSet* owner)
- : MetricSet(metric.getName(), "", metric.getDescription(), owner),
+ : MetricSet(metric.getName(), {}, metric.getDescription(), owner),
_metrics(),
- _sum("sum", "loadsum sum", "Sum of all load metrics", this)
+ _sum("sum", {{"loadsum"},{"sum"}}, "Sum of all load metrics", this)
{
_metrics.resize(loadTypes.size());
// Currently, we only set tags and description on the metric set
@@ -35,9 +35,9 @@ LoadMetric<MetricType>::LoadMetric(const LoadTypeSet& loadTypes, const MetricTyp
template<typename MetricType>
LoadMetric<MetricType>::LoadMetric(const LoadMetric<MetricType>& other, MetricSet* owner)
- : MetricSet(other.getName(), "", other.getDescription(), owner),
+ : MetricSet(other.getName(), {}, other.getDescription(), owner),
_metrics(),
- _sum("sum", "loadsum sum", "Sum of all load metrics", this)
+ _sum("sum", {{"loadsum"},{"sum"}}, "Sum of all load metrics", this)
{
_metrics.resize(2 * other._metrics.size());
setTags(other.getTags());
diff --git a/metrics/src/vespa/metrics/memoryconsumption.cpp b/metrics/src/vespa/metrics/memoryconsumption.cpp
index 7cfda8f28af..0e69defa558 100644
--- a/metrics/src/vespa/metrics/memoryconsumption.cpp
+++ b/metrics/src/vespa/metrics/memoryconsumption.cpp
@@ -30,6 +30,24 @@ MemoryConsumption::getStringMemoryUsage(const std::string& s, uint32_t& uniqueCo
return s.capacity();
}
+
+uint32_t
+MemoryConsumption::getStringMemoryUsage(const vespalib::string& s, uint32_t& uniqueCount) {
+ ++_totalStringCount;
+ const char* internalString = s.c_str();
+ if (_seenStrings->find(internalString) != _seenStrings->end()) {
+ return 0;
+ }
+ ++uniqueCount;
+ _seenStrings->insert(internalString);
+ const void *p = &s;
+ if ((p <= internalString) && (internalString - sizeof(vespalib::string) < p)) {
+ // no extra space allocated outside object
+ return 0;
+ }
+ return s.capacity();
+}
+
void
MemoryConsumption::addSnapShotUsage(const std::string& name, uint32_t usage) {
_snapShotUsage->push_back(std::pair<std::string, uint32_t>(name, usage));
diff --git a/metrics/src/vespa/metrics/memoryconsumption.h b/metrics/src/vespa/metrics/memoryconsumption.h
index 8ac724b7a43..1cbd8f1deab 100644
--- a/metrics/src/vespa/metrics/memoryconsumption.h
+++ b/metrics/src/vespa/metrics/memoryconsumption.h
@@ -87,6 +87,7 @@ public:
/** Get memory usage of a string that is not included when doing sizeof */
uint32_t getStringMemoryUsage(const std::string& s, uint32_t& uniqueCount);
+ uint32_t getStringMemoryUsage(const vespalib::string& s, uint32_t& uniqueCount);
void addSnapShotUsage(const std::string& name, uint32_t usage);
uint32_t getTotalMemoryUsage() const;
diff --git a/metrics/src/vespa/metrics/metric.cpp b/metrics/src/vespa/metrics/metric.cpp
index e67398e4626..579e3bdfbe3 100644
--- a/metrics/src/vespa/metrics/metric.cpp
+++ b/metrics/src/vespa/metrics/metric.cpp
@@ -4,7 +4,7 @@
#include "countmetric.h"
#include "valuemetric.h"
#include "metricset.h"
-#include "namehash.h"
+#include "memoryconsumption.h"
#include <vespa/vespalib/text/stringtokenizer.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
@@ -37,23 +37,18 @@ MetricVisitor::visitMetric(const Metric&, bool)
}
namespace {
- Metric::Tags legacyTagStringToKeyedTags(const std::string& tagStr) {
- vespalib::StringTokenizer tokenizer(tagStr, " \t\r\f");
- Metric::Tags tags;
- std::transform(tokenizer.getTokens().begin(),
- tokenizer.getTokens().end(),
- std::back_inserter(tags),
- [](const std::string& s) { return Tag(s, ""); });
- return tags;
- }
std::string namePattern = "[a-zA-Z][_a-zA-Z0-9]*";
}
vespalib::Regexp Metric::_namePattern(namePattern);
+Tag::Tag(vespalib::stringref k)
+ : _key(NameRepo::tagKeyId(k)),
+ _value(TagValueId::empty_handle)
+{ }
Tag::Tag(vespalib::stringref k, vespalib::stringref v)
- : key(k),
- value(v)
+ : _key(NameRepo::tagKeyId(k)),
+ _value(NameRepo::tagValueId(v))
{ }
Tag::Tag(const Tag &) = default;
@@ -61,25 +56,12 @@ Tag & Tag::operator = (const Tag &) = default;
Tag::~Tag() {}
Metric::Metric(const String& name,
- const String& tags,
- const String& description,
- MetricSet* owner)
- : _name(name),
- _description(description),
- _tags(legacyTagStringToKeyedTags(tags)),
- _owner(nullptr) // Set later by registry
-{
- verifyConstructionParameters();
- assignMangledNameWithDimensions();
- registerWithOwnerIfRequired(owner);
-}
-
-Metric::Metric(const String& name,
Tags dimensions,
const String& description,
MetricSet* owner)
- : _name(name),
- _description(description),
+ : _name(NameRepo::metricId(name)),
+ _mangledName(_name),
+ _description(NameRepo::descriptionId(description)),
_tags(std::move(dimensions)),
_owner(nullptr)
{
@@ -92,6 +74,7 @@ Metric::Metric(const String& name,
Metric::Metric(const Metric& other, MetricSet* owner)
: Printable(other),
_name(other._name),
+ _mangledName(other._mangledName),
_description(other._description),
_tags(other._tags),
_owner(nullptr)
@@ -108,7 +91,7 @@ Metric::~Metric() { }
bool
Metric::tagsSpecifyAtLeastOneDimension(const Tags& tags) const
{
- auto hasNonEmptyTagValue = [](const Tag& t) { return !t.value.empty(); };
+ auto hasNonEmptyTagValue = [](const Tag& t) { return t.hasValue(); };
return std::any_of(tags.begin(), tags.end(), hasNonEmptyTagValue);
}
@@ -120,32 +103,33 @@ Metric::assignMangledNameWithDimensions()
return;
}
sortTagsInDeterministicOrder();
- _mangledName = createMangledNameWithDimensions();
+ vespalib::string mangled = createMangledNameWithDimensions();
+ _mangledName = NameRepo::metricId(mangled);
}
void
Metric::sortTagsInDeterministicOrder()
{
std::sort(_tags.begin(), _tags.end(), [](const Tag& a, const Tag& b) {
- return a.key < b.key;
+ return a.key() < b.key();
});
}
-std::string
+vespalib::string
Metric::createMangledNameWithDimensions() const
{
vespalib::asciistream s;
- s << _name << '{';
+ s << getName() << '{';
const size_t sz = _tags.size();
for (size_t i = 0; i < sz; ++i) {
const Tag& dimension(_tags[i]);
- if (dimension.value.empty()) {
+ if (dimension.value().empty()) {
continue;
}
if (i != 0) {
s << ',';
}
- s << dimension.key << ':' << dimension.value;
+ s << dimension.key() << ':' << dimension.value();
}
s << '}';
return s.str();
@@ -154,13 +138,13 @@ Metric::createMangledNameWithDimensions() const
void
Metric::verifyConstructionParameters()
{
- if (_name.size() == 0) {
+ if (getName().size() == 0) {
throw vespalib::IllegalArgumentException(
"Metric cannot have empty name", VESPA_STRLOC);
}
- if (!_namePattern.match(_name)) {
+ if (!_namePattern.match(getName())) {
throw vespalib::IllegalArgumentException(
- "Illegal metric name '" + _name + "'. Names must match pattern "
+ "Illegal metric name '" + getName() + "'. Names must match pattern "
+ namePattern, VESPA_STRLOC);
}
}
@@ -185,9 +169,9 @@ vespalib::string
Metric::getPath() const
{
if (_owner == 0 || _owner->_owner == 0) {
- return _name;
+ return getName();
} else {
- return _owner->getPath() + "." + _name;
+ return _owner->getPath() + "." + getName();
}
}
@@ -195,10 +179,10 @@ std::vector<Metric::String>
Metric::getPathVector() const
{
std::vector<String> result;
- result.push_back(_name);
+ result.push_back(getName());
const MetricSet* owner(_owner);
while (owner != 0) {
- result.push_back(owner->_name);
+ result.push_back(owner->getName());
owner = owner->_owner;
}
std::reverse(result.begin(), result.end());
@@ -209,7 +193,7 @@ bool
Metric::hasTag(const String& tag) const
{
return std::find_if(_tags.begin(), _tags.end(), [&](const Tag& t) {
- return t.key == tag;
+ return t.key() == tag;
}) != _tags.end();
}
@@ -217,9 +201,8 @@ void
Metric::addMemoryUsage(MemoryConsumption& mc) const
{
++mc._metricCount;
- mc._metricName += mc.getStringMemoryUsage(_name, mc._metricNameUnique);
- mc._metricDescription += mc.getStringMemoryUsage(
- _description, mc._metricDescriptionUnique);
+ mc._metricName += mc.getStringMemoryUsage(getName(), mc._metricNameUnique);
+ mc._metricDescription += mc.getStringMemoryUsage(getDescription(), mc._metricDescriptionUnique);
mc._metricTagCount += _tags.size();
// XXX figure out what we actually want to report from tags here...
// XXX we don't care about unique strings since they don't matter anymore.
@@ -228,21 +211,10 @@ Metric::addMemoryUsage(MemoryConsumption& mc) const
}
void
-Metric::updateNames(NameHash& hash) const
-{
- Metric& m(const_cast<Metric&>(*this));
- hash.updateName(m._name);
- hash.updateName(m._description);
- // Tags use vespalib::string which isn't refcounted under the hood and
- // use small string optimizations, meaning the implicit ref sharing hack
- // won't work for them anyway.
-}
-
-void
Metric::printDebug(std::ostream& out, const std::string& indent) const
{
(void) indent;
- out << "name=" << _name << ", instance=" << ((const void*) this)
+ out << "name=" << getName() << ", instance=" << ((const void*) this)
<< ", owner=" << ((const void*) _owner);
}
diff --git a/metrics/src/vespa/metrics/metric.h b/metrics/src/vespa/metrics/metric.h
index 91ad88fecf7..d1313c3ca1a 100644
--- a/metrics/src/vespa/metrics/metric.h
+++ b/metrics/src/vespa/metrics/metric.h
@@ -4,6 +4,7 @@
#include <vespa/vespalib/util/printable.h>
#include <vespa/vespalib/stllike/string.h>
#include <vespa/vespalib/util/regexp.h>
+#include "name_repo.h"
namespace metrics {
@@ -14,7 +15,6 @@ class MetricSet;
class MetricSnapshot;
class XmlWriterMetricVisitor;
class MemoryConsumption;
-class NameHash;
/** Implement class to visit metrics. */
struct MetricVisitor {
@@ -83,15 +83,22 @@ struct MetricVisitor {
*/
struct Tag
{
- vespalib::string key;
- vespalib::string value;
+ const vespalib::string& key() const { return NameRepo::tagKey(_key); }
+ const vespalib::string& value() const { return NameRepo::tagValue(_value); }
+ Tag(vespalib::stringref k);
Tag(vespalib::stringref k, vespalib::stringref v);
Tag(const Tag &);
Tag & operator = (const Tag &);
Tag(Tag &&) = default;
Tag & operator = (Tag &&) = default;
~Tag();
+
+ bool hasValue() const { return (_value != TagValueId::empty_handle); }
+
+private:
+ TagKeyId _key;
+ TagValueId _value;
};
class Metric : public vespalib::Printable
@@ -105,9 +112,6 @@ public:
static vespalib::Regexp _namePattern;
- Metric(const String& name, const String& tags,
- const String& description, MetricSet* owner = 0);
-
Metric(const String& name,
Tags dimensions,
const String& description,
@@ -120,17 +124,17 @@ public:
Metric & operator = (Metric && rhs) = default;
~Metric();
- const String& getName() const { return _name; }
+ const vespalib::string& getName() const { return NameRepo::metricName(_name); }
/**
* Get mangled name iff the metric contains any dimensions, otherwise
* the original metric name is returned.
*/
- const String& getMangledName() const {
- return (_mangledName.empty() ? _name : _mangledName);
+ const vespalib::string& getMangledName() const {
+ return NameRepo::metricName(_mangledName);
}
vespalib::string getPath() const;
std::vector<String> getPathVector() const;
- const String& getDescription() const { return _description; }
+ const vespalib::string& getDescription() const { return NameRepo::description(_description); }
const Tags& getTags() const { return _tags; }
/** Return whether there exists a tag with a key equal to 'tag' */
bool hasTag(const String& tag) const;
@@ -216,12 +220,15 @@ public:
/** Used by sum metric to alter name of cloned metric for sum. */
void setName(const String& name) {
- _name = name;
+ MetricNameId newName = NameRepo::metricId(name);
+ _name = newName;
assignMangledNameWithDimensions();
}
/** Used by sum metric to alter description of cloned metric for sum. */
- void setDescription(const String& d) { _description = d; }
+ void setDescription(const vespalib::string& d) {
+ _description = NameRepo::descriptionId(d);
+ }
/** Used by sum metric to alter tag of cloned metric for sum. */
void setTags(Tags tags) {
_tags = std::move(tags);
@@ -242,18 +249,6 @@ public:
virtual void addMemoryUsage(MemoryConsumption&) const;
- /**
- * Update names using the given name hash, to utilize ref counting.
- *
- * NOTE:
- * This is a hack that only works on GCC until they decide to finally break
- * ABI compatibility and remove that particular multicore-hostile feature
- * of their std::string implementation. If we want proper string ref
- * counting, all strings should be replaced with explicit string handles
- * and should only be created via a shared factory.
- */
- virtual void updateNames(NameHash&) const;
-
/** Print debug information of the metric tree. */
virtual void printDebug(std::ostream&, const std::string& indent="") const;
@@ -282,7 +277,7 @@ private:
*/
void sortTagsInDeterministicOrder();
- std::string createMangledNameWithDimensions() const;
+ vespalib::string createMangledNameWithDimensions() const;
void verifyConstructionParameters();
/**
@@ -292,9 +287,9 @@ private:
void registerWithOwnerIfRequired(MetricSet* owner);
protected:
- String _name;
- String _mangledName;
- String _description;
+ MetricNameId _name;
+ MetricNameId _mangledName;
+ DescriptionId _description;
std::vector<Tag> _tags;
MetricSet* _owner;
diff --git a/metrics/src/vespa/metrics/metricmanager.cpp b/metrics/src/vespa/metrics/metricmanager.cpp
index b8255a6532f..5b716e2698f 100644
--- a/metrics/src/vespa/metrics/metricmanager.cpp
+++ b/metrics/src/vespa/metrics/metricmanager.cpp
@@ -75,12 +75,12 @@ MetricManager::MetricManager(std::unique_ptr<Timer> timer)
_forceEventLogging(false),
_snapshotUnsetMetrics(false),
_consumerConfigChanged(false),
- _metricManagerMetrics("metricmanager", "", "Metrics for the metric manager upkeep tasks"),
- _periodicHookLatency("periodichooklatency", "", "Time in ms used to update a single periodic hook", &_metricManagerMetrics),
- _snapshotHookLatency("snapshothooklatency", "", "Time in ms used to update a single snapshot hook", &_metricManagerMetrics),
- _resetLatency("resetlatency", "", "Time in ms used to reset all metrics.", &_metricManagerMetrics),
- _snapshotLatency("snapshotlatency", "", "Time in ms used to take a snapshot", &_metricManagerMetrics),
- _sleepTimes("sleeptime", "", "Time in ms worker thread is sleeping", &_metricManagerMetrics)
+ _metricManagerMetrics("metricmanager", {}, "Metrics for the metric manager upkeep tasks"),
+ _periodicHookLatency("periodichooklatency", {}, "Time in ms used to update a single periodic hook", &_metricManagerMetrics),
+ _snapshotHookLatency("snapshothooklatency", {}, "Time in ms used to update a single snapshot hook", &_metricManagerMetrics),
+ _resetLatency("resetlatency", {}, "Time in ms used to reset all metrics.", &_metricManagerMetrics),
+ _snapshotLatency("snapshotlatency", {}, "Time in ms used to take a snapshot", &_metricManagerMetrics),
+ _sleepTimes("sleeptime", {}, "Time in ms worker thread is sleeping", &_metricManagerMetrics)
{
registerMetric(getMetricLock(), _metricManagerMetrics);
}
@@ -366,23 +366,15 @@ MetricManager::handleMetricsAltered(const MetricLockGuard & guard)
configMap[consumer.name] = ConsumerSpec::SP(new ConsumerSpec(std::move(consumerMetricBuilder._matchedMetrics)));
}
LOG(debug, "Recreating snapshots to include altered metrics");
- _activeMetrics.updateNames(_nameHash);
_totalMetrics->recreateSnapshot(_activeMetrics.getMetrics(),
_snapshotUnsetMetrics);
- _totalMetrics->updateNames(_nameHash);
for (uint32_t i=0; i<_snapshots.size(); ++i) {
_snapshots[i]->recreateSnapshot(_activeMetrics.getMetrics(),
_snapshotUnsetMetrics);
- _snapshots[i]->updateNames(_nameHash);
}
LOG(debug, "Setting new consumer config. Clearing dirty flag");
_consumerConfig.swap(configMap);
_consumerConfigChanged = false;
- LOG(debug, "Unified %u of %u strings configuring %" PRIu64 " consumers.",
- _nameHash.getUnifiedStringCount(),
- _nameHash.getCheckedStringCount(),
- _config->consumer.size());
- _nameHash.resetCounts();
}
namespace {
@@ -966,7 +958,6 @@ MetricManager::getMemoryConsumption(const MetricLockGuard & guard) const
_totalMetrics->addMemoryUsage(*mc);
postTotal = mc->getTotalMemoryUsage();
mc->addSnapShotUsage("total", postTotal - preTotal);
- _nameHash.addMemoryUsage(*mc);
return mc;
}
diff --git a/metrics/src/vespa/metrics/metricmanager.h b/metrics/src/vespa/metrics/metricmanager.h
index 423eb41a787..ec166529b94 100644
--- a/metrics/src/vespa/metrics/metricmanager.h
+++ b/metrics/src/vespa/metrics/metricmanager.h
@@ -48,7 +48,7 @@
#include <vespa/metrics/config-metricsmanager.h>
#include <vespa/metrics/metricset.h>
#include <vespa/metrics/metricsnapshot.h>
-#include <vespa/metrics/namehash.h>
+#include <vespa/metrics/memoryconsumption.h>
#include <vespa/metrics/valuemetric.h>
#include <vespa/metrics/updatehook.h>
#include <vespa/vespalib/stllike/hash_set.h>
@@ -125,10 +125,6 @@ private:
LongAverageMetric _snapshotLatency;
LongAverageMetric _sleepTimes;
- // Name hash trying to ensure string ref counting works as well as can be
- // expected
- NameHash _nameHash;
-
public:
MetricManager(std::unique_ptr<Timer> timer = std::unique_ptr<Timer>(new Timer));
~MetricManager();
diff --git a/metrics/src/vespa/metrics/metricset.cpp b/metrics/src/vespa/metrics/metricset.cpp
index cfaeb8b6f02..9fb731d7583 100644
--- a/metrics/src/vespa/metrics/metricset.cpp
+++ b/metrics/src/vespa/metrics/metricset.cpp
@@ -14,14 +14,6 @@ LOG_SETUP(".metrics.metricsset");
namespace metrics {
-MetricSet::MetricSet(const String& name, const String& tags,
- const String& description, MetricSet* owner)
- : Metric(name, tags, description, owner),
- _metricOrder(),
- _registrationAltered(false)
-{
-}
-
MetricSet::MetricSet(const String& name, Tags dimensions,
const String& description, MetricSet* owner)
: Metric(name, std::move(dimensions), description, owner),
@@ -262,7 +254,7 @@ MetricSet::addTo(Metric& other, std::vector<Metric::UP> *ownerList) const
std::vector<Metric*> newOrder;
newOrder.reserve(o._metricOrder.size() + newMetrics.size());
for (const Metric* metric : _metricOrder) {
- TmpString v(metric->getMangledName());
+ TmpString v = metric->getMangledName();
target = std::lower_bound(map2.begin(), map2.end(), v);
if ((target != map2.end()) && (target->first == v)) {
newOrder.push_back(target->second);
@@ -275,7 +267,7 @@ MetricSet::addTo(Metric& other, std::vector<Metric::UP> *ownerList) const
}
// If target had unique metrics, add them at the end
for (Metric* metric : o._metricOrder) {
- TmpString v(metric->getMangledName());
+ TmpString v = metric->getMangledName();
if ( ! std::binary_search(map1.begin(), map1.end(), v) ) {
LOG(debug, "Metric %s exist in one snapshot but not other."
"Order will be messed up. Adding target unique "
@@ -321,7 +313,7 @@ void
MetricSet::print(std::ostream& out, bool verbose,
const std::string& indent, uint64_t secondsPassed) const
{
- out << _name << ":";
+ out << getName() << ":";
for (const Metric* metric : _metricOrder) {
out << "\n" << indent << " ";
metric->print(out, verbose, indent + " ", secondsPassed);
@@ -350,15 +342,6 @@ MetricSet::addMemoryUsage(MemoryConsumption& mc) const
}
void
-MetricSet::updateNames(NameHash& hash) const
-{
- Metric::updateNames(hash);
- for (const Metric* metric : _metricOrder) {
- metric->updateNames(hash);
- }
-}
-
-void
MetricSet::printDebug(std::ostream& out, const std::string& indent) const
{
out << "set ";
diff --git a/metrics/src/vespa/metrics/metricset.h b/metrics/src/vespa/metrics/metricset.h
index 48027db374a..da3f2c26cde 100644
--- a/metrics/src/vespa/metrics/metricset.h
+++ b/metrics/src/vespa/metrics/metricset.h
@@ -22,9 +22,6 @@ class MetricSet : public Metric
// it was reset
public:
- MetricSet(const String& name, const String& tags,
- const String& description, MetricSet* owner = 0);
-
MetricSet(const String& name, Tags dimensions,
const String& description, MetricSet* owner = 0);
@@ -75,8 +72,6 @@ public:
bool used() const override;
void addMemoryUsage(MemoryConsumption&) const override;
- /** Update names using the given name hash, to utilize ref counting. */
- void updateNames(NameHash&) const override;
void printDebug(std::ostream&, const std::string& indent="") const override;
bool isMetricSet() const override { return true; }
void addToPart(Metric& m) const override { addTo(m, 0); }
diff --git a/metrics/src/vespa/metrics/metricsnapshot.cpp b/metrics/src/vespa/metrics/metricsnapshot.cpp
index 0b6ad8f9acd..86a33f0993f 100644
--- a/metrics/src/vespa/metrics/metricsnapshot.cpp
+++ b/metrics/src/vespa/metrics/metricsnapshot.cpp
@@ -12,7 +12,7 @@ MetricSnapshot::MetricSnapshot(const Metric::String& name)
_period(0),
_fromTime(0),
_toTime(0),
- _snapshot(new MetricSet("top", "", "")),
+ _snapshot(new MetricSet("top", {}, "")),
_metrics()
{
}
@@ -148,13 +148,6 @@ MetricSnapshotSet::addMemoryUsage(MemoryConsumption& mc) const
}
void
-MetricSnapshotSet::updateNames(NameHash& hash) const
-{
- if (_count != 1) _building->updateNames(hash);
- _current->updateNames(hash);
-}
-
-void
MetricSnapshotSet::setFromTime(time_t fromTime)
{
if (_count != 1) _building->setFromTime(fromTime);
diff --git a/metrics/src/vespa/metrics/metricsnapshot.h b/metrics/src/vespa/metrics/metricsnapshot.h
index 4cf8d113743..b00c001505c 100644
--- a/metrics/src/vespa/metrics/metricsnapshot.h
+++ b/metrics/src/vespa/metrics/metricsnapshot.h
@@ -69,7 +69,6 @@ public:
void recreateSnapshot(const MetricSet& metrics, bool copyUnset);
void addMemoryUsage(MemoryConsumption&) const;
- void updateNames(NameHash& hash) const { _snapshot->updateNames(hash); }
};
class MetricSnapshotSet {
@@ -107,7 +106,6 @@ public:
*/
void recreateSnapshot(const MetricSet& metrics, bool copyUnset);
void addMemoryUsage(MemoryConsumption&) const;
- void updateNames(NameHash& hash) const;
void setFromTime(time_t fromTime);
};
diff --git a/metrics/src/vespa/metrics/name_repo.cpp b/metrics/src/vespa/metrics/name_repo.cpp
new file mode 100644
index 00000000000..380120194d7
--- /dev/null
+++ b/metrics/src/vespa/metrics/name_repo.cpp
@@ -0,0 +1,71 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "name_repo.h"
+#include <vespa/vespalib/metrics/name_collection.h>
+
+using vespalib::metrics::NameCollection;
+
+namespace metrics {
+
+namespace {
+NameCollection metricNames;
+NameCollection descriptions;
+NameCollection tagKeys;
+NameCollection tagValues;
+}
+
+MetricNameId
+NameRepo::metricId(const vespalib::string &name)
+{
+ size_t id = metricNames.resolve(name);
+ return MetricNameId(id);
+}
+
+DescriptionId
+NameRepo::descriptionId(const vespalib::string &name)
+{
+ size_t id = descriptions.resolve(name);
+ return DescriptionId(id);
+}
+
+TagKeyId
+NameRepo::tagKeyId(const vespalib::string &name)
+{
+ size_t id = tagKeys.resolve(name);
+ return TagKeyId(id);
+}
+
+TagValueId
+NameRepo::tagValueId(const vespalib::string &value)
+{
+ size_t id = tagValues.resolve(value);
+ return TagValueId(id);
+}
+
+const vespalib::string&
+NameRepo::metricName(MetricNameId id)
+{
+ return metricNames.lookup(id.id());
+}
+
+const vespalib::string&
+NameRepo::description(DescriptionId id)
+{
+ return descriptions.lookup(id.id());
+}
+
+const vespalib::string&
+NameRepo::tagKey(TagKeyId id)
+{
+ return tagKeys.lookup(id.id());
+}
+
+const vespalib::string&
+NameRepo::tagValue(TagValueId id)
+{
+ return tagValues.lookup(id.id());
+}
+
+
+} // namespace metrics
+
diff --git a/metrics/src/vespa/metrics/name_repo.h b/metrics/src/vespa/metrics/name_repo.h
new file mode 100644
index 00000000000..44f8f622bff
--- /dev/null
+++ b/metrics/src/vespa/metrics/name_repo.h
@@ -0,0 +1,32 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/metrics/handle.h>
+
+namespace metrics {
+
+struct MetricNameIdTag {};
+struct DescriptionIdTag {};
+struct TagKeyIdTag {};
+struct TagValueIdTag {};
+
+using MetricNameId = vespalib::metrics::Handle<MetricNameIdTag>;
+using DescriptionId = vespalib::metrics::Handle<DescriptionIdTag>;
+using TagKeyId = vespalib::metrics::Handle<TagKeyIdTag>;
+using TagValueId = vespalib::metrics::Handle<TagValueIdTag>;
+
+struct NameRepo {
+ static MetricNameId metricId(const vespalib::string &name);
+ static DescriptionId descriptionId(const vespalib::string &name);
+ static TagKeyId tagKeyId(const vespalib::string &name);
+ static TagValueId tagValueId(const vespalib::string &value);
+
+ static const vespalib::string& metricName(MetricNameId id);
+ static const vespalib::string& description(DescriptionId id);
+ static const vespalib::string& tagKey(TagKeyId id);
+ static const vespalib::string& tagValue(TagValueId id);
+};
+
+} // metrics
+
diff --git a/metrics/src/vespa/metrics/summetric.h b/metrics/src/vespa/metrics/summetric.h
index 3dc58d32bef..a9827bcca7d 100644
--- a/metrics/src/vespa/metrics/summetric.h
+++ b/metrics/src/vespa/metrics/summetric.h
@@ -42,7 +42,7 @@ private:
std::vector<const AddendMetric*> _metricsToSum;
public:
- SumMetric(const String& name, const String& tags, const String& description, MetricSet* owner = 0);
+ SumMetric(const String& name, Tags tags, const String& description, MetricSet* owner = 0);
SumMetric(const SumMetric<AddendMetric>& other, std::vector<Metric::UP> & ownerList, MetricSet* owner = 0);
~SumMetric();
diff --git a/metrics/src/vespa/metrics/summetric.hpp b/metrics/src/vespa/metrics/summetric.hpp
index 8e558eb94ee..fc3c3a5e019 100644
--- a/metrics/src/vespa/metrics/summetric.hpp
+++ b/metrics/src/vespa/metrics/summetric.hpp
@@ -24,7 +24,7 @@ SumMetric<AddendMetric>::visit(MetricVisitor& visitor,
}
template<typename AddendMetric>
-SumMetric<AddendMetric>::SumMetric(const String& name, const String& tags,
+SumMetric<AddendMetric>::SumMetric(const String& name, Tags tags,
const String& description, MetricSet* owner)
: Metric(name, tags, description, owner),
_startValue(),
diff --git a/metrics/src/vespa/metrics/valuemetric.h b/metrics/src/vespa/metrics/valuemetric.h
index 57baa36c26a..44a4e551cba 100644
--- a/metrics/src/vespa/metrics/valuemetric.h
+++ b/metrics/src/vespa/metrics/valuemetric.h
@@ -28,10 +28,6 @@ struct AbstractValueMetric : public Metric {
virtual bool summedAverage() const = 0;
protected:
- AbstractValueMetric(const String& name, const String& tags,
- const String& description, MetricSet* owner)
- : Metric(name, tags, description, owner) {}
-
AbstractValueMetric(const String& name, Tags dimensions,
const String& description, MetricSet* owner)
: Metric(name, std::move(dimensions), description, owner) {}
@@ -83,9 +79,6 @@ class ValueMetric : public AbstractValueMetric {
bool checkFinite(AvgVal, std::false_type) { return true; }
public:
- ValueMetric(const String &name, const String &tags,
- const String &description, MetricSet *owner = 0);
-
ValueMetric(const ValueMetric<AvgVal, TotVal, SumOnAdd> &,
CopyType, MetricSet *owner);
diff --git a/metrics/src/vespa/metrics/valuemetric.hpp b/metrics/src/vespa/metrics/valuemetric.hpp
index 60ed219edec..8b2ee532614 100644
--- a/metrics/src/vespa/metrics/valuemetric.hpp
+++ b/metrics/src/vespa/metrics/valuemetric.hpp
@@ -11,16 +11,6 @@ namespace metrics {
template<typename AvgVal, typename TotVal, bool SumOnAdd>
ValueMetric<AvgVal, TotVal, SumOnAdd>::ValueMetric(
- const String& name, const String& tags,
- const String& description, MetricSet* owner)
- : AbstractValueMetric(name, tags, description, owner),
- _values()
-{
- _values.setFlag(LOG_IF_UNSET);
-}
-
-template<typename AvgVal, typename TotVal, bool SumOnAdd>
-ValueMetric<AvgVal, TotVal, SumOnAdd>::ValueMetric(
const String& name, const Tags dimensions,
const String& description, MetricSet* owner)
: AbstractValueMetric(name, std::move(dimensions), description, owner),
@@ -213,7 +203,7 @@ ValueMetric<AvgVal, TotVal, SumOnAdd>::print(
(void) secondsPassed;
Values values(_values.getValues());
if (!inUse(values) && !verbose) return;
- out << this->_name << " average=" << (values._count == 0
+ out << this->getName() << " average=" << (values._count == 0
? 0 : static_cast<double>(values._total) / values._count)
<< " last=" << values._last;
if (!summedAverage()) {
diff --git a/metrics/src/vespa/metrics/xmlwriter.cpp b/metrics/src/vespa/metrics/xmlwriter.cpp
index 197ffdd1e3d..90b25621cf8 100644
--- a/metrics/src/vespa/metrics/xmlwriter.cpp
+++ b/metrics/src/vespa/metrics/xmlwriter.cpp
@@ -98,9 +98,9 @@ XmlWriter::printCommonXmlParts(const Metric& metric) const
if (_verbosity >= 3 && tags.size() > 0) {
std::ostringstream ost;
// XXX print tag values as well
- ost << tags[0].key;
+ ost << tags[0].key();
for (uint32_t i=1; i<tags.size(); ++i) {
- ost << "," << tags[i].key;
+ ost << "," << tags[i].key();
}
_xos << XmlAttribute("tags", ost.str());
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelper.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelper.java
deleted file mode 100644
index cf010121c2a..00000000000
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelper.java
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.maintenance;
-
-import java.io.IOException;
-import java.io.UncheckedIOException;
-import java.nio.file.Files;
-import java.nio.file.LinkOption;
-import java.nio.file.NoSuchFileException;
-import java.nio.file.Path;
-import java.nio.file.attribute.FileTime;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Optional;
-import java.util.logging.Logger;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-/**
- * @author freva
- */
-public class FileHelper {
- private static final Logger logger = Logger.getLogger(FileHelper.class.getSimpleName());
-
- /**
- * (Recursively) deletes files if they match all the criteria, also deletes empty directories.
- *
- * @param basePath Base path from where to start the search
- * @param maxAge Delete files older (last modified date) than maxAge
- * @param fileNameRegex Delete files where filename matches fileNameRegex
- * @param recursive Delete files in sub-directories (with the same criteria)
- */
- public static void deleteFiles(Path basePath, Duration maxAge, Optional<String> fileNameRegex, boolean recursive) throws IOException {
- Pattern fileNamePattern = fileNameRegex.map(Pattern::compile).orElse(null);
-
- for (Path path : listContentsOfDirectory(basePath)) {
- if (Files.isDirectory(path)) {
- if (recursive) {
- deleteFiles(path, maxAge, fileNameRegex, true);
- if (listContentsOfDirectory(path).isEmpty() && !Files.deleteIfExists(path)) {
- logger.warning("Could not delete directory: " + path.toAbsolutePath());
- }
- }
- } else if (isPatternMatchingFilename(fileNamePattern, path) &&
- isTimeSinceLastModifiedMoreThan(path, maxAge)) {
- if (! Files.deleteIfExists(path)) {
- logger.warning("Could not delete file: " + path.toAbsolutePath());
- }
- }
- }
- }
-
- /**
- * Deletes all files in target directory except the n most recent (by modified date)
- *
- * @param basePath Base path to delete from
- * @param nMostRecentToKeep Number of most recent files to keep
- */
- static void deleteFilesExceptNMostRecent(Path basePath, int nMostRecentToKeep) throws IOException {
- if (nMostRecentToKeep < 1) {
- throw new IllegalArgumentException("Number of files to keep must be a positive number");
- }
-
- List<Path> pathsInDeleteDir = listContentsOfDirectory(basePath).stream()
- .filter(Files::isRegularFile)
- .sorted(Comparator.comparing(FileHelper::getLastModifiedTime))
- .skip(nMostRecentToKeep)
- .collect(Collectors.toList());
-
- for (Path path : pathsInDeleteDir) {
- if (!Files.deleteIfExists(path)) {
- logger.warning("Could not delete file: " + path.toAbsolutePath());
- }
- }
- }
-
- static void deleteFilesLargerThan(Path basePath, long sizeInBytes) throws IOException {
- for (Path path : listContentsOfDirectory(basePath)) {
- if (Files.isDirectory(path)) {
- deleteFilesLargerThan(path, sizeInBytes);
- } else {
- if (Files.size(path) > sizeInBytes && !Files.deleteIfExists(path)) {
- logger.warning("Could not delete file: " + path.toAbsolutePath());
- }
- }
- }
- }
-
- /**
- * Deletes directories and their contents if they match all the criteria
- *
- * @param basePath Base path to delete the directories from
- * @param maxAge Delete directories older (last modified date) than maxAge
- * @param dirNameRegex Delete directories where directory name matches dirNameRegex
- */
- public static void deleteDirectories(Path basePath, Duration maxAge, Optional<String> dirNameRegex) throws IOException {
- Pattern dirNamePattern = dirNameRegex.map(Pattern::compile).orElse(null);
-
- for (Path path : listContentsOfDirectory(basePath)) {
- if (Files.isDirectory(path) && isPatternMatchingFilename(dirNamePattern, path)) {
- boolean mostRecentFileModifiedBeforeMaxAge = getMostRecentlyModifiedFileIn(path)
- .map(mostRecentlyModified -> isTimeSinceLastModifiedMoreThan(mostRecentlyModified, maxAge))
- .orElse(true);
-
- if (mostRecentFileModifiedBeforeMaxAge) {
- deleteFiles(path, Duration.ZERO, Optional.empty(), true);
- if (listContentsOfDirectory(path).isEmpty() && !Files.deleteIfExists(path)) {
- logger.warning("Could not delete directory: " + path.toAbsolutePath());
- }
- }
- }
- }
- }
-
- /**
- * Similar to rm -rf file:
- * - It's not an error if file doesn't exist
- * - If file is a directory, it and all content is removed
- * - For symlinks: Only the symlink is removed, not what the symlink points to
- */
- public static void recursiveDelete(Path basePath) throws IOException {
- if (Files.isDirectory(basePath)) {
- for (Path path : listContentsOfDirectory(basePath)) {
- recursiveDelete(path);
- }
- }
-
- Files.deleteIfExists(basePath);
- }
-
- public static void moveIfExists(Path from, Path to) throws IOException {
- if (Files.exists(from)) {
- Files.move(from, to);
- }
- }
-
- private static Optional<Path> getMostRecentlyModifiedFileIn(Path basePath) throws IOException {
- return Files.walk(basePath).max(Comparator.comparing(FileHelper::getLastModifiedTime));
- }
-
- private static boolean isTimeSinceLastModifiedMoreThan(Path path, Duration duration) {
- Instant nowMinusDuration = Instant.now().minus(duration);
- Instant lastModified = getLastModifiedTime(path).toInstant();
-
- // Return true also if they are equal for test stability
- // (lastModified <= nowMinusDuration) is the same as !(lastModified > nowMinusDuration)
- return !lastModified.isAfter(nowMinusDuration);
- }
-
- private static boolean isPatternMatchingFilename(Pattern pattern, Path path) {
- return pattern == null || pattern.matcher(path.getFileName().toString()).find();
- }
-
- /**
- * @return list all files in a directory, returns empty list if directory does not exist
- */
- public static List<Path> listContentsOfDirectory(Path basePath) {
- try (Stream<Path> directoryStream = Files.list(basePath)) {
- return directoryStream.collect(Collectors.toList());
- } catch (NoSuchFileException ignored) {
- return Collections.emptyList();
- } catch (IOException e) {
- throw new UncheckedIOException("Failed to list contents of directory " + basePath.toAbsolutePath(), e);
- }
- }
-
- static FileTime getLastModifiedTime(Path path) {
- try {
- return Files.getLastModifiedTime(path, LinkOption.NOFOLLOW_LINKS);
- } catch (IOException e) {
- throw new UncheckedIOException("Failed to get last modified time of " + path.toAbsolutePath(), e);
- }
- }
-}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index 236415d1bcd..1affe890eee 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -1,23 +1,17 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.maintenance;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
import com.yahoo.collections.Pair;
import com.yahoo.config.provision.NodeType;
import com.yahoo.io.IOUtils;
import com.yahoo.system.ProcessExecuter;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper;
-import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions;
-import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper;
-import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerNetworking;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
import com.yahoo.vespa.hosted.node.admin.logging.FilebeatConfigProvider;
import com.yahoo.vespa.hosted.node.admin.component.Environment;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.FileHelper;
import com.yahoo.vespa.hosted.node.admin.task.util.file.IOExceptionUtil;
import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
import com.yahoo.vespa.hosted.node.admin.util.SecretAgentCheckConfig;
@@ -25,13 +19,10 @@ import com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoredumpHandler;
import java.io.IOException;
import java.io.InputStreamReader;
-import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.time.Clock;
import java.time.Duration;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -39,7 +30,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import java.util.stream.Stream;
@@ -50,41 +40,18 @@ import static com.yahoo.vespa.defaults.Defaults.getDefaults;
* @author freva
*/
public class StorageMaintainer {
- private static final ContainerName NODE_ADMIN = new ContainerName("node-admin");
- private static final ObjectMapper objectMapper = new ObjectMapper();
- private final GaugeWrapper numberOfCoredumpsOnHost;
- private final CounterWrapper numberOfNodeAdminMaintenanceFails;
private final DockerOperations dockerOperations;
private final ProcessExecuter processExecuter;
private final Environment environment;
- private final Optional<CoredumpHandler> coredumpHandler;
- private final Clock clock;
-
- private final Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>();
-
- public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter,
- MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) {
- this(dockerOperations, processExecuter, metricReceiver, environment, null, clock);
- }
+ private final CoredumpHandler coredumpHandler;
public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter,
- MetricReceiverWrapper metricReceiver, Environment environment,
- CoredumpHandler coredumpHandler, Clock clock) {
+ Environment environment, CoredumpHandler coredumpHandler) {
this.dockerOperations = dockerOperations;
this.processExecuter = processExecuter;
this.environment = environment;
- this.coredumpHandler = Optional.ofNullable(coredumpHandler);
- this.clock = clock;
-
- Dimensions dimensions = new Dimensions.Builder()
- .add("role", SecretAgentCheckConfig.nodeTypeToRole(environment.getNodeType()))
- .build();
- numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails");
- numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps");
-
- metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.running_on_host")
- .add(environment.isRunningOnHost() ? 1 : 0);
+ this.coredumpHandler = coredumpHandler;
}
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) {
@@ -241,17 +208,7 @@ public class StorageMaintainer {
* Deletes old log files for vespa, nginx, logstash, etc.
*/
public void removeOldFilesFromNode(ContainerName containerName) {
- if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return;
-
- MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
- addRemoveOldFilesCommand(maintainerExecutor, containerName);
-
- maintainerExecutor.execute();
- getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime();
- }
-
- private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) {
- Path[] pathsToClean = {
+ Path[] logPaths = {
environment.pathInNodeUnderVespaHome("logs/elasticsearch2"),
environment.pathInNodeUnderVespaHome("logs/logstash2"),
environment.pathInNodeUnderVespaHome("logs/daemontools_y"),
@@ -259,79 +216,42 @@ public class StorageMaintainer {
environment.pathInNodeUnderVespaHome("logs/vespa")
};
- for (Path pathToClean : pathsToClean) {
+ for (Path pathToClean : logPaths) {
Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean);
- if (Files.exists(path)) {
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", path)
- .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
- .withArgument("fileNameRegex", ".*\\.log.+")
- .withArgument("recursive", false);
- }
+ FileHelper.streamFiles(path)
+ .filterFile(FileHelper.olderThan(Duration.ofDays(3))
+ .and(FileHelper.nameMatches(Pattern.compile(".*\\.log.+"))))
+ .delete();
}
Path qrsDir = environment.pathInNodeAdminFromPathInNode(
containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs"));
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", qrsDir)
- .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds())
- .withArgument("recursive", false);
+ FileHelper.streamFiles(qrsDir)
+ .filterFile(FileHelper.olderThan(Duration.ofDays(3)))
+ .delete();
Path logArchiveDir = environment.pathInNodeAdminFromPathInNode(
containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive"));
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", logArchiveDir)
- .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
- .withArgument("recursive", false);
+ FileHelper.streamFiles(logArchiveDir)
+ .filterFile(FileHelper.olderThan(Duration.ofDays(31)))
+ .delete();
Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(
containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"));
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", fileDistrDir)
- .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
- .withArgument("recursive", true);
+ FileHelper.streamFiles(fileDistrDir)
+ .filterFile(FileHelper.olderThan(Duration.ofDays(31)))
+ .recursive(true)
+ .delete();
}
/**
* Checks if container has any new coredumps, reports and archives them if so
*/
public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node) {
- // Sample number of coredumps on the host
- try (Stream<Path> files = Files.list(environment.pathInNodeAdminToDoneCoredumps())) {
- numberOfCoredumpsOnHost.sample(files.count());
- } catch (IOException e) {
- // Ignore for now - this is either test or a misconfiguration
- }
-
- MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
- addHandleCoredumpsCommand(maintainerExecutor, containerName, node);
- maintainerExecutor.execute();
- }
-
- /**
- * Will either schedule coredump execution in the given maintainerExecutor or run coredump handling
- * directly if {@link #coredumpHandler} is set.
- */
- private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) {
final Path coredumpsPath = environment.pathInNodeAdminFromPathInNode(
containerName, environment.pathInNodeUnderVespaHome("var/crash"));
final Map<String, Object> nodeAttributes = getCoredumpNodeAttributes(node);
- if (coredumpHandler.isPresent()) {
- try {
- coredumpHandler.get().processAll(coredumpsPath, nodeAttributes);
- } catch (IOException e) {
- throw new UncheckedIOException("Failed to process coredumps", e);
- }
- } else {
- // Core dump handling is disabled.
- if (!environment.getCoredumpFeedEndpoint().isPresent()) return;
-
- maintainerExecutor.addJob("handle-core-dumps")
- .withArgument("coredumpsPath", coredumpsPath)
- .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps())
- .withArgument("attributes", nodeAttributes)
- .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get());
- }
+ coredumpHandler.processAll(coredumpsPath, nodeAttributes);
}
private Map<String, Object> getCoredumpNodeAttributes(NodeSpec node) {
@@ -354,60 +274,22 @@ public class StorageMaintainer {
}
/**
- * Deletes old
- * * archived app data
- * * Vespa logs
- * * Filedistribution files
- */
- public void cleanNodeAdmin() {
- if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return;
-
- MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
- maintainerExecutor.addJob("delete-directories")
- .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin())
- .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds())
- .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX));
-
- Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode(
- NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/"));
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", nodeAdminJDiskLogsPath)
- .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
- .withArgument("recursive", false);
-
- Path fileDistrDir = environment.pathInNodeAdminFromPathInNode(
- NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"));
- maintainerExecutor.addJob("delete-files")
- .withArgument("basePath", fileDistrDir)
- .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds())
- .withArgument("recursive", true);
-
- maintainerExecutor.execute();
- getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime();
- }
-
- /**
- * Prepares the container-storage for the next container by deleting/archiving all the data of the current container.
- * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty"
+ * Prepares the container-storage for the next container by archiving container logs to a new directory
+ * and deleting everything else owned by this container.
*/
public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) {
- MaintainerExecutor maintainerExecutor = new MaintainerExecutor();
- addRemoveOldFilesCommand(maintainerExecutor, containerName);
- addHandleCoredumpsCommand(maintainerExecutor, containerName, node);
- addArchiveNodeData(maintainerExecutor, containerName);
+ removeOldFilesFromNode(containerName);
- maintainerExecutor.execute();
- getMaintenanceThrottlerFor(containerName).reset();
- }
+ Path logsDirInContainer = environment.pathInNodeUnderVespaHome("logs");
+ Path containerLogsInArchiveDir = environment.pathInNodeAdminToNodeCleanup(containerName).resolve(logsDirInContainer);
+ Path containerLogsPathOnHost = environment.pathInNodeAdminFromPathInNode(containerName, logsDirInContainer);
- private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) {
- maintainerExecutor.addJob("recursive-delete")
- .withArgument("path", environment.pathInNodeAdminFromPathInNode(
- containerName, environment.pathInNodeUnderVespaHome("var")));
+ FileHelper.createDirectories(containerLogsInArchiveDir.getParent());
+ FileHelper.moveIfExists(containerLogsPathOnHost, containerLogsInArchiveDir);
- maintainerExecutor.addJob("move-files")
- .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/")))
- .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName));
+ FileHelper.streamContents(environment.pathInHostFromPathInNode(containerName, Paths.get("/")))
+ .includeBase(true)
+ .delete();
}
/**
@@ -452,7 +334,6 @@ public class StorageMaintainer {
Pair<Integer, String> result = processExecuter.exec(command);
if (result.getFirst() != 0) {
- numberOfNodeAdminMaintenanceFails.add();
throw new RuntimeException(
String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s",
Arrays.toString(command), result.getFirst(), result.getSecond()));
@@ -462,69 +343,4 @@ public class StorageMaintainer {
throw new RuntimeException("Failed to execute maintainer", e);
}
}
-
- /**
- * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM
- */
- private class MaintainerExecutor {
- private final List<MaintainerExecutorJob> jobs = new ArrayList<>();
-
- MaintainerExecutorJob addJob(String jobName) {
- MaintainerExecutorJob job = new MaintainerExecutorJob(jobName);
- jobs.add(job);
- return job;
- }
-
- void execute() {
- if (jobs.isEmpty()) return;
-
- String args;
- try {
- args = objectMapper.writeValueAsString(jobs);
- } catch (JsonProcessingException e) {
- throw new RuntimeException("Failed transform list of maintenance jobs to JSON");
- }
-
- executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args);
- }
- }
-
- private class MaintainerExecutorJob {
- @JsonProperty(value="type")
- private final String type;
-
- @JsonProperty(value="arguments")
- private final Map<String, Object> arguments = new HashMap<>();
-
- MaintainerExecutorJob(String type) {
- this.type = type;
- }
-
- MaintainerExecutorJob withArgument(String argument, Object value) {
- // Transform Path to String, otherwise ObjectMapper wont encode/decode it properly on the other end
- arguments.put(argument, (value instanceof Path) ? value.toString() : value);
- return this;
- }
- }
-
- private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) {
- maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler());
- return maintenanceThrottlerByContainerName.get(containerName);
- }
-
- private class MaintenanceThrottler {
- private Instant nextRemoveOldFilesAt = Instant.EPOCH;
-
- void updateNextRemoveOldFilesTime() {
- nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1));
- }
-
- boolean shouldRemoveOldFilesNow() {
- return !nextRemoveOldFilesAt.isAfter(clock.instant());
- }
-
- void reset() {
- nextRemoveOldFilesAt = Instant.EPOCH;
- }
- }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
index eb48086eb0f..e46b29cc078 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
@@ -3,19 +3,18 @@ package com.yahoo.vespa.hosted.node.admin.maintenance.coredump;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.yahoo.system.ProcessExecuter;
-import com.yahoo.vespa.hosted.node.admin.maintenance.FileHelper;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.FileHelper;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.time.Duration;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
-import java.util.Optional;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.regex.Pattern;
/**
* Finds coredumps, collects metadata and reports them
@@ -24,6 +23,7 @@ import java.util.logging.Logger;
*/
public class CoredumpHandler {
+ private static final Pattern JAVA_COREDUMP_PATTERN = Pattern.compile("java_pid.*\\.hprof");
private static final String PROCESSING_DIRECTORY_NAME = "processing";
static final String METADATA_FILE_NAME = "metadata.json";
@@ -44,23 +44,11 @@ public class CoredumpHandler {
this.doneCoredumpsPath = doneCoredumpsPath;
}
- public void processAll(Path coredumpsPath, Map<String, Object> nodeAttributes) throws IOException {
- removeJavaCoredumps(coredumpsPath);
- handleNewCoredumps(coredumpsPath, nodeAttributes);
- removeOldCoredumps();
- }
-
- private void removeJavaCoredumps(Path coredumpsPath) throws IOException {
- if (! coredumpsPath.toFile().isDirectory()) return;
- FileHelper.deleteFiles(coredumpsPath, Duration.ZERO, Optional.of("^java_pid.*\\.hprof$"), false);
- }
-
- private void removeOldCoredumps() throws IOException {
- if (! doneCoredumpsPath.toFile().isDirectory()) return;
- FileHelper.deleteDirectories(doneCoredumpsPath, Duration.ofDays(10), Optional.empty());
- }
+ public void processAll(Path coredumpsPath, Map<String, Object> nodeAttributes) {
+ FileHelper.streamFiles(coredumpsPath)
+ .filterFile(FileHelper.nameMatches(JAVA_COREDUMP_PATTERN))
+ .delete();
- private void handleNewCoredumps(Path coredumpsPath, Map<String, Object> nodeAttributes) {
enqueueCoredumps(coredumpsPath);
processAndReportCoredumps(coredumpsPath, nodeAttributes);
}
@@ -72,12 +60,14 @@ public class CoredumpHandler {
*/
void enqueueCoredumps(Path coredumpsPath) {
Path processingCoredumpsPath = getProcessingCoredumpsPath(coredumpsPath);
- processingCoredumpsPath.toFile().mkdirs();
- if (!FileHelper.listContentsOfDirectory(processingCoredumpsPath).isEmpty()) return;
-
- FileHelper.listContentsOfDirectory(coredumpsPath).stream()
- .filter(path -> path.toFile().isFile() && ! path.getFileName().toString().startsWith("."))
- .min((Comparator.comparingLong(o -> o.toFile().lastModified())))
+ FileHelper.createDirectories(processingCoredumpsPath);
+ if (!FileHelper.streamDirectories(processingCoredumpsPath).list().isEmpty()) return;
+
+ FileHelper.streamFiles(coredumpsPath)
+ .filterFile(FileHelper.nameStartsWith(".").negate())
+ .stream()
+ .min(Comparator.comparing(FileHelper.FileAttributes::lastModifiedTime))
+ .map(FileHelper.FileAttributes::path)
.ifPresent(coredumpPath -> {
try {
enqueueCoredumpForProcessing(coredumpPath, processingCoredumpsPath);
@@ -89,11 +79,10 @@ public class CoredumpHandler {
void processAndReportCoredumps(Path coredumpsPath, Map<String, Object> nodeAttributes) {
Path processingCoredumpsPath = getProcessingCoredumpsPath(coredumpsPath);
- doneCoredumpsPath.toFile().mkdirs();
+ FileHelper.createDirectories(doneCoredumpsPath);
- FileHelper.listContentsOfDirectory(processingCoredumpsPath).stream()
- .filter(path -> path.toFile().isDirectory())
- .forEach(coredumpDirectory -> processAndReportSingleCoredump(coredumpDirectory, nodeAttributes));
+ FileHelper.streamDirectories(processingCoredumpsPath)
+ .forEachPath(coredumpDirectory -> processAndReportSingleCoredump(coredumpDirectory, nodeAttributes));
}
private void processAndReportSingleCoredump(Path coredumpDirectory, Map<String, Object> nodeAttributes) {
@@ -109,19 +98,20 @@ public class CoredumpHandler {
}
private void enqueueCoredumpForProcessing(Path coredumpPath, Path processingCoredumpsPath) throws IOException {
- // Make coredump readable
- coredumpPath.toFile().setReadable(true, false);
-
// Create new directory for this coredump and move it into it
Path folder = processingCoredumpsPath.resolve(UUID.randomUUID().toString());
- folder.toFile().mkdirs();
+
+ FileHelper.createDirectories(folder);
Files.move(coredumpPath, folder.resolve(coredumpPath.getFileName()));
}
String collectMetadata(Path coredumpDirectory, Map<String, Object> nodeAttributes) throws IOException {
Path metadataPath = coredumpDirectory.resolve(METADATA_FILE_NAME);
if (!Files.exists(metadataPath)) {
- Path coredumpPath = FileHelper.listContentsOfDirectory(coredumpDirectory).stream().findFirst()
+ Path coredumpPath = FileHelper.streamFiles(coredumpDirectory)
+ .stream()
+ .map(FileHelper.FileAttributes::path)
+ .findFirst()
.orElseThrow(() -> new RuntimeException("No coredump file found in processing directory " + coredumpDirectory));
Map<String, Object> metadata = coreCollector.collect(coredumpPath);
metadata.putAll(nodeAttributes);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index 2621156487d..519d83bd7d4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -9,7 +9,6 @@ import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
-import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
@@ -42,7 +41,6 @@ public class NodeAdminImpl implements NodeAdmin {
private final DockerOperations dockerOperations;
private final Function<String, NodeAgent> nodeAgentFactory;
- private final StorageMaintainer storageMaintainer;
private final Runnable aclMaintainer;
private final Clock clock;
@@ -57,13 +55,11 @@ public class NodeAdminImpl implements NodeAdmin {
public NodeAdminImpl(DockerOperations dockerOperations,
Function<String, NodeAgent> nodeAgentFactory,
- StorageMaintainer storageMaintainer,
Runnable aclMaintainer,
MetricReceiverWrapper metricReceiver,
Clock clock) {
this.dockerOperations = dockerOperations;
this.nodeAgentFactory = nodeAgentFactory;
- this.storageMaintainer = storageMaintainer;
this.aclMaintainer = aclMaintainer;
this.clock = clock;
@@ -82,7 +78,6 @@ public class NodeAdminImpl implements NodeAdmin {
.map(NodeSpec::getHostname)
.collect(Collectors.toSet());
- storageMaintainer.cleanNodeAdmin();
synchronizeNodesToNodeAgents(hostnamesOfContainersToRun);
updateNodeAgentMetrics();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelper.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelper.java
new file mode 100644
index 00000000000..a4b1a66c71b
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelper.java
@@ -0,0 +1,263 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.task.util.file;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Stack;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
+
+import static com.yahoo.vespa.hosted.node.admin.task.util.file.IOExceptionUtil.uncheck;
+
+/**
+ * @author freva
+ */
+public class FileHelper {
+
+ private final Path basePath;
+ private Predicate<FileAttributes> fileFilter;
+ private Predicate<FileAttributes> directoryFilter;
+ private boolean includeBase = false;
+ private boolean recursive = false;
+
+ public FileHelper(Path basePath, boolean includeFiles, boolean includeDirectories) {
+ this.basePath = basePath;
+ this.fileFilter = path -> includeFiles;
+ this.directoryFilter = path -> includeDirectories;
+ }
+
+ /**
+ * Creates a {@link FileHelper} that will by default match all files and all directories
+ * under the given basePath.
+ */
+ public static FileHelper streamContents(Path basePath) {
+ return new FileHelper(basePath, true, true);
+ }
+
+ /**
+ * Creates a {@link FileHelper} that will by default match all files and no directories
+ * under the given basePath.
+ */
+ public static FileHelper streamFiles(Path basePath) {
+ return new FileHelper(basePath, true, false);
+ }
+
+ /**
+ * Creates a {@link FileHelper} that will by default match all directories and no files
+ * under the given basePath.
+ */
+ public static FileHelper streamDirectories(Path basePath) {
+ return new FileHelper(basePath, false, true);
+ }
+
+
+ /**
+ * Filter that will be used to match files under the base path. Files include everything that
+ * is not a directory (such as symbolic links)
+ */
+ public FileHelper filterFile(Predicate<FileAttributes> fileFilter) {
+ this.fileFilter = fileFilter;
+ return this;
+ }
+
+ /**
+ * Filter that will be used to match directories under the base path.
+ *
+ * NOTE: When a directory is matched, all of its sub-directories and files are also matched
+ */
+ public FileHelper filterDirectory(Predicate<FileAttributes> directoryFilter) {
+ this.directoryFilter = directoryFilter;
+ return this;
+ }
+
+ /**
+ * Whether the search should be recursive.
+ *
+ * WARNING: When using {@link #delete()} and matching directories, make sure that the directories
+ * either are already empty or that recursive is set
+ */
+ public FileHelper recursive(boolean recursive) {
+ this.recursive = recursive;
+ return this;
+ }
+
+ /**
+ * Whether the base path should also be considered (i.e. checked against the correspoding filter).
+ * When using {@link #delete()} with directories, this is the difference between
+ * `rm -rf basePath` (true) and `rm -rf basePath/*` (false)
+ */
+ public FileHelper includeBase(boolean includeBase) {
+ this.includeBase = includeBase;
+ return this;
+ }
+
+ public int delete() {
+ int[] numDeletions = { 0 }; // :(
+ forEach(attributes -> {
+ if (deleteIfExists(attributes.path()))
+ numDeletions[0]++;
+ });
+
+ return numDeletions[0];
+ }
+
+ public List<FileAttributes> list() {
+ LinkedList<FileAttributes> list = new LinkedList<>();
+ forEach(list::add);
+ return list;
+ }
+
+ public Stream<FileAttributes> stream() {
+ return list().stream();
+ }
+
+ public void forEachPath(Consumer<Path> action) {
+ forEach(attributes -> action.accept(attributes.path()));
+ }
+
+ /** Applies a given consumer to all the matching {@link FileHelper.FileAttributes} */
+ public void forEach(Consumer<FileAttributes> action) {
+ applyForEachToMatching(basePath, fileFilter, directoryFilter, recursive, includeBase, action);
+ }
+
+
+ /**
+ * <p> This method walks a file tree rooted at a given starting file. The file tree traversal is
+ * <em>depth-first</em>: The filter function is applied in pre-order (NLR), but the given
+ * {@link Consumer} will be called in post-order (LRN).
+ */
+ private void applyForEachToMatching(Path basePath, Predicate<FileAttributes> fileFilter, Predicate<FileAttributes> directoryFilter,
+ boolean recursive, boolean includeBase, Consumer<FileAttributes> action) {
+ try {
+ Files.walkFileTree(basePath, Collections.emptySet(), recursive ? Integer.MAX_VALUE : 1, new SimpleFileVisitor<Path>() {
+ private Stack<FileAttributes> matchingDirectoryStack = new Stack<>();
+ private int currentLevel = -1;
+
+ @Override
+ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
+ currentLevel++;
+
+ FileAttributes attributes = new FileAttributes(dir, attrs);
+ if (!matchingDirectoryStack.empty() || directoryFilter.test(attributes))
+ matchingDirectoryStack.push(attributes);
+
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
+ // When we find a directory at the max depth given to Files.walkFileTree, the directory
+ // will be passed to visitFile() rather than (pre|post)VisitDirectory
+ if (attrs.isDirectory()) {
+ preVisitDirectory(file, attrs);
+ return postVisitDirectory(file, null);
+ }
+
+ FileAttributes attributes = new FileAttributes(file, attrs);
+ if (!matchingDirectoryStack.isEmpty() || fileFilter.test(attributes))
+ action.accept(attributes);
+
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(Path dir, IOException exc) {
+ if (!matchingDirectoryStack.isEmpty()) {
+ FileAttributes attributes = matchingDirectoryStack.pop();
+ if (currentLevel != 0 || includeBase)
+ action.accept(attributes);
+ }
+
+ currentLevel--;
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ } catch (NoSuchFileException ignored) {
+
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+
+ // Ideally, we would reuse the FileAttributes in this package, but unfortunately we only get
+ // BasicFileAttributes and not PosixFileAttributes from FileVisitor
+ public static class FileAttributes {
+ private final Path path;
+ private final BasicFileAttributes attributes;
+
+ FileAttributes(Path path, BasicFileAttributes attributes) {
+ this.path = path;
+ this.attributes = attributes;
+ }
+
+ public Path path() { return path; }
+ public String filename() { return path.getFileName().toString(); }
+ public Instant lastModifiedTime() { return attributes.lastModifiedTime().toInstant(); }
+ public boolean isRegularFile() { return attributes.isRegularFile(); }
+ public boolean isDirectory() { return attributes.isDirectory(); }
+ public long size() { return attributes.size(); }
+ }
+
+
+ // Filters
+ public static Predicate<FileAttributes> olderThan(Duration duration) {
+ return attrs -> Duration.between(attrs.lastModifiedTime(), Instant.now()).compareTo(duration) > 0;
+ }
+
+ public static Predicate<FileAttributes> youngerThan(Duration duration) {
+ return olderThan(duration).negate();
+ }
+
+ public static Predicate<FileAttributes> largerThan(long sizeInBytes) {
+ return attrs -> attrs.size() > sizeInBytes;
+ }
+
+ public static Predicate<FileAttributes> smallerThan(long sizeInBytes) {
+ return largerThan(sizeInBytes).negate();
+ }
+
+ public static Predicate<FileAttributes> nameMatches(Pattern pattern) {
+ return attrs -> pattern.matcher(attrs.filename()).matches();
+ }
+
+ public static Predicate<FileAttributes> nameStartsWith(String string) {
+ return attrs -> attrs.filename().startsWith(string);
+ }
+
+ public static Predicate<FileAttributes> nameEndsWith(String string) {
+ return attrs -> attrs.filename().endsWith(string);
+ }
+
+
+ // Other helpful methods that no not throw checked exceptions
+ public static boolean moveIfExists(Path from, Path to) {
+ try {
+ Files.move(from, to);
+ return true;
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ public static boolean deleteIfExists(Path path) {
+ return uncheck(() -> Files.deleteIfExists(path));
+ }
+
+ public static Path createDirectories(Path path) {
+ return uncheck(() -> Files.createDirectories(path));
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
index 15bb2825738..49a03c454c1 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
@@ -87,7 +87,7 @@ public class DockerTester implements AutoCloseable {
Clock clock = Clock.systemUTC();
DockerOperations dockerOperations = new DockerOperationsImpl(dockerMock, environment, processExecuter);
- StorageMaintainerMock storageMaintainer = new StorageMaintainerMock(dockerOperations, null, environment, callOrderVerifier, clock);
+ StorageMaintainerMock storageMaintainer = new StorageMaintainerMock(dockerOperations, null, environment, callOrderVerifier);
AclMaintainer aclMaintainer = mock(AclMaintainer.class);
AthenzCredentialsMaintainer athenzCredentialsMaintainer = mock(AthenzCredentialsMaintainer.class);
@@ -95,7 +95,7 @@ public class DockerTester implements AutoCloseable {
MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepositoryMock,
orchestratorMock, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL, athenzCredentialsMaintainer);
- nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, mr, Clock.systemUTC());
+ nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, aclMaintainer, mr, Clock.systemUTC());
nodeAdminStateUpdater = new NodeAdminStateUpdaterImpl(nodeRepositoryMock, orchestratorMock, storageMaintainer,
nodeAdmin, DOCKER_HOST_HOSTNAME, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL,
Optional.of(new ClassLocking()));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
index a46defc991b..2a3171762bc 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
@@ -245,7 +245,7 @@ public class RunInContainerTest {
private final Function<String, NodeAgent> nodeAgentFactory =
(hostName) -> new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, dockerOperationsMock,
storageMaintainer, aclMaintainer, environment, Clock.systemUTC(), NODE_AGENT_SCAN_INTERVAL, athenzCredentialsMaintainer);
- private final NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperationsMock, nodeAgentFactory, storageMaintainer, aclMaintainer, mr, Clock.systemUTC());
+ private final NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperationsMock, nodeAgentFactory, aclMaintainer, mr, Clock.systemUTC());
private final NodeAdminStateUpdaterImpl nodeAdminStateUpdater = new NodeAdminStateUpdaterImpl(nodeRepositoryMock,
orchestratorMock, storageMaintainer, nodeAdmin, "localhost.test.yahoo.com",
Clock.systemUTC(), NODE_ADMIN_CONVERGE_STATE_INTERVAL, Optional.of(new ClassLocking()));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java
index 6b7d545c286..62f1a59ecf2 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java
@@ -1,16 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
-import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.system.ProcessExecuter;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.component.Environment;
-import java.time.Clock;
import java.util.Optional;
/**
@@ -19,8 +16,8 @@ import java.util.Optional;
public class StorageMaintainerMock extends StorageMaintainer {
private final CallOrderVerifier callOrderVerifier;
- public StorageMaintainerMock(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CallOrderVerifier callOrderVerifier, Clock clock) {
- super(dockerOperations, processExecuter, new MetricReceiverWrapper(MetricReceiver.nullImplementation), environment, clock);
+ public StorageMaintainerMock(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CallOrderVerifier callOrderVerifier) {
+ super(dockerOperations, processExecuter, environment, null);
this.callOrderVerifier = callOrderVerifier;
}
@@ -38,10 +35,6 @@ public class StorageMaintainerMock extends StorageMaintainer {
}
@Override
- public void cleanNodeAdmin() {
- }
-
- @Override
public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) {
callOrderVerifier.add("DeleteContainerStorage with " + containerName);
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelperTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelperTest.java
deleted file mode 100644
index 6b53bc217c4..00000000000
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/FileHelperTest.java
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.maintenance;
-
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.time.Duration;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Optional;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-/**
- * @author freva
- */
-public class FileHelperTest {
- @Rule
- public TemporaryFolder folder = new TemporaryFolder();
-
- @Before
- public void initFiles() throws IOException {
- for (int i=0; i<10; i++) {
- File temp = folder.newFile("test_" + i + ".json");
- temp.setLastModified(System.currentTimeMillis() - i*Duration.ofSeconds(130).toMillis());
- }
-
- for (int i=0; i<7; i++) {
- File temp = folder.newFile("test_" + i + "_file.test");
- temp.setLastModified(System.currentTimeMillis() - i*Duration.ofSeconds(250).toMillis());
- }
-
- for (int i=0; i<5; i++) {
- File temp = folder.newFile(i + "-abc" + ".json");
- temp.setLastModified(System.currentTimeMillis() - i*Duration.ofSeconds(80).toMillis());
- }
-
- File temp = folder.newFile("week_old_file.json");
- temp.setLastModified(System.currentTimeMillis() - Duration.ofDays(8).toMillis());
- }
-
- @Test
- public void testDeleteAll() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.empty(), false);
-
- assertEquals(0, getContentsOfDirectory(folder.getRoot()).length);
- }
-
- @Test
- public void testDeletePrefix() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of("^test_"), false);
-
- assertEquals(6, getContentsOfDirectory(folder.getRoot()).length); // 5 abc files + 1 week_old_file
- }
-
- @Test
- public void testDeleteSuffix() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of(".json$"), false);
-
- assertEquals(7, getContentsOfDirectory(folder.getRoot()).length);
- }
-
- @Test
- public void testDeletePrefixAndSuffix() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of("^test_.*\\.json$"), false);
-
- assertEquals(13, getContentsOfDirectory(folder.getRoot()).length); // 5 abc files + 7 test_*_file.test files + week_old_file
- }
-
- @Test
- public void testDeleteOld() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ofSeconds(600), Optional.empty(), false);
-
- assertEquals(13, getContentsOfDirectory(folder.getRoot()).length); // All 23 - 6 (from test_*_.json) - 3 (from test_*_file.test) - 1 week old file
- }
-
- @Test
- public void testDeleteWithAllParameters() throws IOException {
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ofSeconds(200), Optional.of("^test_.*\\.json$"), false);
-
- assertEquals(15, getContentsOfDirectory(folder.getRoot()).length); // All 23 - 8 (from test_*_.json)
- }
-
- @Test
- public void testDeleteWithSubDirectoriesNoRecursive() throws IOException {
- initSubDirectories();
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of("^test_.*\\.json$"), false);
-
- // 6 test_*.json from test_folder1/
- // + 9 test_*.json and 4 abc_*.json from test_folder2/
- // + 13 test_*.json from test_folder2/subSubFolder2/
- // + 7 test_*_file.test and 5 *-abc.json and 1 week_old_file from root
- // + test_folder1/ and test_folder2/ and test_folder2/subSubFolder2/ themselves
- assertEquals(48, getNumberOfFilesAndDirectoriesIn(folder.getRoot()));
- }
-
- @Test
- public void testDeleteWithSubDirectoriesRecursive() throws IOException {
- initSubDirectories();
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of("^test_.*\\.json$"), true);
-
- // 4 abc_*.json from test_folder2/
- // + 7 test_*_file.test and 5 *-abc.json and 1 week_old_file from root
- // + test_folder2/ itself
- assertEquals(18, getNumberOfFilesAndDirectoriesIn(folder.getRoot()));
- }
-
- @Test
- public void testDeleteFilesWhereFilenameRegexAlsoMatchesDirectories() throws IOException {
- initSubDirectories();
-
- FileHelper.deleteFiles(folder.getRoot().toPath(), Duration.ZERO, Optional.of("^test_"), false);
-
- assertEquals(8, getContentsOfDirectory(folder.getRoot()).length); // 5 abc files + 1 week_old_file + 2 directories
- }
-
- @Test
- public void testGetContentsOfNonExistingDirectory() {
- Path fakePath = Paths.get("/some/made/up/dir/");
- assertEquals(Collections.emptyList(), FileHelper.listContentsOfDirectory(fakePath));
- }
-
- @Test(expected=IllegalArgumentException.class)
- public void testDeleteFilesExceptNMostRecentWithNegativeN() throws IOException {
- FileHelper.deleteFilesExceptNMostRecent(folder.getRoot().toPath(), -5);
- }
-
- @Test
- public void testDeleteFilesExceptFiveMostRecent() throws IOException {
- FileHelper.deleteFilesExceptNMostRecent(folder.getRoot().toPath(), 5);
-
- assertEquals(5, getContentsOfDirectory(folder.getRoot()).length);
-
- String[] oldestFiles = {"test_5_file.test", "test_6_file.test", "test_8.json", "test_9.json", "week_old_file.json"};
- String[] remainingFiles = Arrays.stream(getContentsOfDirectory(folder.getRoot()))
- .map(File::getName)
- .sorted()
- .toArray(String[]::new);
-
- assertArrayEquals(oldestFiles, remainingFiles);
- }
-
- @Test
- public void testDeleteFilesExceptNMostRecentWithLargeN() throws IOException {
- String[] filesPreDelete = folder.getRoot().list();
-
- FileHelper.deleteFilesExceptNMostRecent(folder.getRoot().toPath(), 50);
-
- assertArrayEquals(filesPreDelete, folder.getRoot().list());
- }
-
- @Test
- public void testDeleteFilesLargerThan10B() throws IOException {
- initSubDirectories();
-
- File temp1 = new File(folder.getRoot(), "small_file");
- writeNBytesToFile(temp1, 50);
-
- File temp2 = new File(folder.getRoot(), "some_file");
- writeNBytesToFile(temp2, 20);
-
- File temp3 = new File(folder.getRoot(), "test_folder1/some_other_file");
- writeNBytesToFile(temp3, 75);
-
- FileHelper.deleteFilesLargerThan(folder.getRoot().toPath(), 10);
-
- assertEquals(58, getNumberOfFilesAndDirectoriesIn(folder.getRoot()));
- assertFalse(temp1.exists() || temp2.exists() || temp3.exists());
- }
-
- @Test
- public void testDeleteDirectories() throws IOException {
- initSubDirectories();
-
- FileHelper.deleteDirectories(folder.getRoot().toPath(), Duration.ZERO, Optional.of(".*folder2"));
-
- //23 files in root
- // + 6 in test_folder1 + test_folder1 itself
- assertEquals(30, getNumberOfFilesAndDirectoriesIn(folder.getRoot()));
- }
-
- @Test
- public void testDeleteDirectoriesBasedOnAge() throws IOException {
- initSubDirectories();
- // Create folder3 which is older than maxAge, inside have a single directory, subSubFolder3, inside it which is
- // also older than maxAge inside the sub directory, create some files which are newer than maxAge.
- // deleteDirectories() should NOT delete folder3
- File subFolder3 = folder.newFolder("test_folder3");
- File subSubFolder3 = folder.newFolder("test_folder3", "subSubFolder3");
-
- for (int j=0; j<11; j++) {
- File.createTempFile("test_", ".json", subSubFolder3);
- }
-
- subFolder3.setLastModified(System.currentTimeMillis() - Duration.ofHours(1).toMillis());
- subSubFolder3.setLastModified(System.currentTimeMillis() - Duration.ofHours(3).toMillis());
-
- FileHelper.deleteDirectories(folder.getRoot().toPath(), Duration.ofSeconds(50), Optional.of(".*folder.*"));
-
- //23 files in root
- // + 13 in test_folder2
- // + 13 in subSubFolder2
- // + 11 in subSubFolder3
- // + test_folder2 + subSubFolder2 + folder3 + subSubFolder3 itself
- assertEquals(64, getNumberOfFilesAndDirectoriesIn(folder.getRoot()));
- }
-
- @Test
- public void testRecursivelyDeleteDirectory() throws IOException {
- initSubDirectories();
- FileHelper.recursiveDelete(folder.getRoot().toPath());
- assertFalse(folder.getRoot().exists());
- }
-
- @Test
- public void testRecursivelyDeleteRegularFile() throws IOException {
- File file = folder.newFile();
- assertTrue(file.exists());
- assertTrue(file.isFile());
- FileHelper.recursiveDelete(file.toPath());
- assertFalse(file.exists());
- }
-
- @Test
- public void testRecursivelyDeleteNonExistingFile() throws IOException {
- File file = folder.getRoot().toPath().resolve("non-existing-file.json").toFile();
- assertFalse(file.exists());
- FileHelper.recursiveDelete(file.toPath());
- assertFalse(file.exists());
- }
-
- @Test
- public void testInitSubDirectories() throws IOException {
- initSubDirectories();
- assertTrue(folder.getRoot().exists());
- assertTrue(folder.getRoot().isDirectory());
-
- Path test_folder1 = folder.getRoot().toPath().resolve("test_folder1");
- assertTrue(test_folder1.toFile().exists());
- assertTrue(test_folder1.toFile().isDirectory());
-
- Path test_folder2 = folder.getRoot().toPath().resolve("test_folder2");
- assertTrue(test_folder2.toFile().exists());
- assertTrue(test_folder2.toFile().isDirectory());
-
- Path subSubFolder2 = test_folder2.resolve("subSubFolder2");
- assertTrue(subSubFolder2.toFile().exists());
- assertTrue(subSubFolder2.toFile().isDirectory());
- }
-
- @Test
- public void testDoesNotFailOnLastModifiedOnSymLink() throws IOException {
- Path symPath = folder.getRoot().toPath().resolve("symlink");
- Path fakePath = Paths.get("/some/not/existant/file");
-
- Files.createSymbolicLink(symPath, fakePath);
- assertTrue(Files.isSymbolicLink(symPath));
- assertFalse(Files.exists(fakePath));
-
- // Not possible to set modified time on symlink in java, so just check that it doesn't crash
- FileHelper.getLastModifiedTime(symPath).toInstant();
- }
-
- private void initSubDirectories() throws IOException {
- File subFolder1 = folder.newFolder("test_folder1");
- File subFolder2 = folder.newFolder("test_folder2");
- File subSubFolder2 = folder.newFolder("test_folder2", "subSubFolder2");
-
- for (int j=0; j<6; j++) {
- File temp = File.createTempFile("test_", ".json", subFolder1);
- temp.setLastModified(System.currentTimeMillis() - (j+1)*Duration.ofSeconds(60).toMillis());
- }
-
- for (int j=0; j<9; j++) {
- File.createTempFile("test_", ".json", subFolder2);
- }
-
- for (int j=0; j<4; j++) {
- File.createTempFile("abc_", ".txt", subFolder2);
- }
-
- for (int j=0; j<13; j++) {
- File temp = File.createTempFile("test_", ".json", subSubFolder2);
- temp.setLastModified(System.currentTimeMillis() - (j+1)*Duration.ofSeconds(40).toMillis());
- }
-
- //Must be after all the files have been created
- subFolder1.setLastModified(System.currentTimeMillis() - Duration.ofHours(2).toMillis());
- subFolder2.setLastModified(System.currentTimeMillis() - Duration.ofHours(1).toMillis());
- subSubFolder2.setLastModified(System.currentTimeMillis() - Duration.ofHours(3).toMillis());
- }
-
- private static int getNumberOfFilesAndDirectoriesIn(File folder) {
- int total = 0;
- for (File file : getContentsOfDirectory(folder)) {
- if (file.isDirectory()) {
- total += getNumberOfFilesAndDirectoriesIn(file);
- }
- total++;
- }
-
- return total;
- }
-
- private static void writeNBytesToFile(File file, int nBytes) throws IOException {
- Files.write(file.toPath(), new byte[nBytes]);
- }
-
- private static File[] getContentsOfDirectory(File directory) {
- File[] directoryContents = directory.listFiles();
-
- return directoryContents == null ? new File[0] : directoryContents;
- }
-}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
index d9cce7f80a0..7722354a633 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
@@ -1,20 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.maintenance;
-import com.yahoo.collections.Pair;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.system.ProcessExecuter;
-import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
-import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.config.ConfigServerConfig;
import com.yahoo.vespa.hosted.node.admin.docker.DockerNetworking;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
import com.yahoo.vespa.hosted.node.admin.component.Environment;
import com.yahoo.vespa.hosted.node.admin.component.PathResolver;
-import com.yahoo.vespa.hosted.provision.Node;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -22,21 +14,15 @@ import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
-import java.time.Duration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
/**
* @author dybis
*/
public class StorageMaintainerTest {
- private final ManualClock clock = new ManualClock();
private final Environment environment = new Environment.Builder()
.configServerConfig(new ConfigServerConfig(new ConfigServerConfig.Builder()))
.region("us-east-1")
@@ -50,7 +36,7 @@ public class StorageMaintainerTest {
private final DockerOperations docker = mock(DockerOperations.class);
private final ProcessExecuter processExecuter = mock(ProcessExecuter.class);
private final StorageMaintainer storageMaintainer = new StorageMaintainer(docker, processExecuter,
- new MetricReceiverWrapper(MetricReceiver.nullImplementation), environment, clock);
+ environment, null);
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@@ -71,76 +57,7 @@ public class StorageMaintainerTest {
assertEquals(0L, usedBytes);
}
- @Test
- public void testMaintenanceThrottlingAfterSuccessfulMaintenance() {
- String hostname = "node-123.us-north-3.test.yahoo.com";
- ContainerName containerName = ContainerName.fromHostname(hostname);
- NodeSpec node = new NodeSpec.Builder()
- .hostname(hostname)
- .state(Node.State.ready)
- .nodeType(NodeType.tenant)
- .flavor("docker")
- .minCpuCores(1)
- .minMainMemoryAvailableGb(1)
- .minDiskAvailableGb(1)
- .build();
-
- try {
- when(processExecuter.exec(any(String[].class))).thenReturn(new Pair<>(0, ""));
- } catch (IOException ignored) { }
- storageMaintainer.removeOldFilesFromNode(containerName);
- verifyProcessExecuterCalled(1);
- // Will not actually run maintenance job until an hour passes
- storageMaintainer.removeOldFilesFromNode(containerName);
- verifyProcessExecuterCalled(1);
-
- clock.advance(Duration.ofMinutes(61));
- storageMaintainer.removeOldFilesFromNode(containerName);
- verifyProcessExecuterCalled(2);
-
- // Coredump handling is unthrottled
- storageMaintainer.handleCoreDumpsForContainer(containerName, node);
- verifyProcessExecuterCalled(3);
-
- storageMaintainer.handleCoreDumpsForContainer(containerName, node);
- verifyProcessExecuterCalled(4);
-
- // cleanupNodeStorage is unthrottled and it should reset previous times
- storageMaintainer.cleanupNodeStorage(containerName, node);
- verifyProcessExecuterCalled(5);
- storageMaintainer.cleanupNodeStorage(containerName, node);
- verifyProcessExecuterCalled(6);
- }
-
- @Test
- public void testMaintenanceThrottlingAfterFailedMaintenance() {
- String hostname = "node-123.us-north-3.test.yahoo.com";
- ContainerName containerName = ContainerName.fromHostname(hostname);
-
- try {
- when(processExecuter.exec(any(String[].class)))
- .thenThrow(new RuntimeException("Something went wrong"))
- .thenReturn(new Pair<>(0, ""));
- } catch (IOException ignored) { }
-
- try {
- storageMaintainer.removeOldFilesFromNode(containerName);
- fail("Maintenance job should've failed!");
- } catch (RuntimeException ignored) { }
- verifyProcessExecuterCalled(1);
-
- // Maintenance job failed, we should be able to immediately re-run it
- storageMaintainer.removeOldFilesFromNode(containerName);
- verifyProcessExecuterCalled(2);
- }
-
private static void writeNBytesToFile(File file, int nBytes) throws IOException {
Files.write(file.toPath(), new byte[nBytes]);
}
-
- private void verifyProcessExecuterCalled(int times) {
- try {
- verify(processExecuter, times(times)).exec(any(String[].class));
- } catch (IOException ignored) { }
- }
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
index efd35cce00e..065b039c7fd 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
@@ -5,7 +5,6 @@ import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
-import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl;
import org.junit.Test;
@@ -40,11 +39,10 @@ public class NodeAdminImplTest {
private interface NodeAgentFactory extends Function<String, NodeAgent> {}
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final Function<String, NodeAgent> nodeAgentFactory = mock(NodeAgentFactory.class);
- private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final Runnable aclMaintainer = mock(Runnable.class);
private final ManualClock clock = new ManualClock();
- private final NodeAdminImpl nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer,
+ private final NodeAdminImpl nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, aclMaintainer,
new MetricReceiverWrapper(MetricReceiver.nullImplementation), clock);
@Test
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelperTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelperTest.java
new file mode 100644
index 00000000000..a3569853122
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileHelperTest.java
@@ -0,0 +1,201 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.task.util.file;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.runners.Enclosed;
+import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * @author freva
+ */
+@RunWith(Enclosed.class)
+public class FileHelperTest {
+
+ public static class GeneralLogicTests {
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ @Test
+ public void delete_all_files_non_recursive() {
+ int numDeleted = FileHelper.streamFiles(testRoot())
+ .delete();
+
+ assertEquals(3, numDeleted);
+ assertRecursiveContents("test", "test/file.txt", "test/data.json", "test/subdir-1", "test/subdir-1/file", "test/subdir-2");
+ }
+
+ @Test
+ public void delete_all_files_recursive() {
+ int numDeleted = FileHelper.streamFiles(testRoot())
+ .recursive(true)
+ .delete();
+
+ assertEquals(6, numDeleted);
+ assertRecursiveContents("test", "test/subdir-1", "test/subdir-2");
+ }
+
+ @Test
+ public void delete_with_filter_recursive() {
+ int numDeleted = FileHelper.streamFiles(testRoot())
+ .filterFile(FileHelper.nameEndsWith(".json"))
+ .recursive(true)
+ .delete();
+
+ assertEquals(3, numDeleted);
+ assertRecursiveContents("test.txt", "test", "test/file.txt", "test/subdir-1", "test/subdir-1/file", "test/subdir-2");
+ }
+
+ @Test
+ public void delete_directory_with_filter() {
+ int numDeleted = FileHelper.streamDirectories(testRoot())
+ .filterDirectory(FileHelper.nameStartsWith("subdir"))
+ .recursive(true)
+ .delete();
+
+ assertEquals(3, numDeleted);
+ assertRecursiveContents("file-1.json", "test.json", "test.txt", "test", "test/file.txt", "test/data.json");
+ }
+
+ @Test
+ public void delete_all_contents() {
+ int numDeleted = FileHelper.streamContents(testRoot())
+ .recursive(true)
+ .delete();
+
+ assertEquals(9, numDeleted);
+ assertTrue(Files.exists(testRoot()));
+ assertRecursiveContents();
+ }
+
+ @Test
+ public void delete_everything() {
+ int numDeleted = FileHelper.streamContents(testRoot())
+ .includeBase(true)
+ .recursive(true)
+ .delete();
+
+ assertEquals(10, numDeleted);
+ assertFalse(Files.exists(testRoot()));
+ }
+
+ @Before
+ public void setup() throws IOException {
+ Path root = testRoot();
+
+ Files.createFile(root.resolve("file-1.json"));
+ Files.createFile(root.resolve("test.json"));
+ Files.createFile(root.resolve("test.txt"));
+
+ Files.createDirectories(root.resolve("test"));
+ Files.createFile(root.resolve("test/file.txt"));
+ Files.createFile(root.resolve("test/data.json"));
+
+ Files.createDirectories(root.resolve("test/subdir-1"));
+ Files.createFile(root.resolve("test/subdir-1/file"));
+
+ Files.createDirectories(root.resolve("test/subdir-2"));
+ }
+
+ private Path testRoot() {
+ return folder.getRoot().toPath();
+ }
+
+ private void assertRecursiveContents(String... relativePaths) {
+ Set<String> expectedPaths = new HashSet<>(Arrays.asList(relativePaths));
+ Set<String> actualPaths = recursivelyListContents(testRoot()).stream()
+ .map(testRoot()::relativize)
+ .map(Path::toString)
+ .collect(Collectors.toSet());
+
+ assertEquals(expectedPaths, actualPaths);
+ }
+
+ private List<Path> recursivelyListContents(Path basePath) {
+ try (Stream<Path> pathStream = Files.list(basePath)) {
+ List<Path> paths = new LinkedList<>();
+ pathStream.forEach(path -> {
+ paths.add(path);
+ if (Files.isDirectory(path))
+ paths.addAll(recursivelyListContents(path));
+ });
+ return paths;
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+ }
+
+ public static class FilterUnitTests {
+
+ private final BasicFileAttributes attributes = mock(BasicFileAttributes.class);
+
+ @Test
+ public void age_filter_test() {
+ Path path = Paths.get("/my/fake/path");
+ when(attributes.lastModifiedTime()).thenReturn(FileTime.from(Instant.now().minus(Duration.ofHours(1))));
+ FileHelper.FileAttributes fileAttributes = new FileHelper.FileAttributes(path, attributes);
+
+ assertFalse(FileHelper.olderThan(Duration.ofMinutes(61)).test(fileAttributes));
+ assertTrue(FileHelper.olderThan(Duration.ofMinutes(59)).test(fileAttributes));
+
+ assertTrue(FileHelper.youngerThan(Duration.ofMinutes(61)).test(fileAttributes));
+ assertFalse(FileHelper.youngerThan(Duration.ofMinutes(59)).test(fileAttributes));
+ }
+
+ @Test
+ public void size_filters() {
+ Path path = Paths.get("/my/fake/path");
+ when(attributes.size()).thenReturn(100L);
+ FileHelper.FileAttributes fileAttributes = new FileHelper.FileAttributes(path, attributes);
+
+ assertFalse(FileHelper.largerThan(101).test(fileAttributes));
+ assertTrue(FileHelper.largerThan(99).test(fileAttributes));
+
+ assertTrue(FileHelper.smallerThan(101).test(fileAttributes));
+ assertFalse(FileHelper.smallerThan(99).test(fileAttributes));
+ }
+
+ @Test
+ public void filename_filters() {
+ Path path = Paths.get("/my/fake/path/some-12352-file.json");
+ FileHelper.FileAttributes fileAttributes = new FileHelper.FileAttributes(path, attributes);
+
+ assertTrue(FileHelper.nameStartsWith("some-").test(fileAttributes));
+ assertFalse(FileHelper.nameStartsWith("som-").test(fileAttributes));
+
+ assertTrue(FileHelper.nameEndsWith(".json").test(fileAttributes));
+ assertFalse(FileHelper.nameEndsWith("file").test(fileAttributes));
+
+ assertTrue(FileHelper.nameMatches(Pattern.compile("some-[0-9]+-file.json")).test(fileAttributes));
+ assertTrue(FileHelper.nameMatches(Pattern.compile("^some-[0-9]+-file.json$")).test(fileAttributes));
+ assertFalse(FileHelper.nameMatches(Pattern.compile("some-[0-9]-file.json")).test(fileAttributes));
+ }
+ }
+}
diff --git a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
index 58e662a2b1d..d62285da6d3 100644
--- a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
+++ b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
@@ -33,21 +33,22 @@ using std::make_unique;
Impl::ResultMetrics::~ResultMetrics() { }
Impl::ResultMetrics::ResultMetrics(const char* opName)
- : metrics::MetricSet(opName, "", ""),
+ : metrics::MetricSet(opName, {}, ""),
_metric(Result::ERROR_COUNT)
{
- _metric[Result::NONE] = make_unique<DoubleAverageMetric>("success", "", "", this);
- _metric[Result::TRANSIENT_ERROR] = make_unique<DoubleAverageMetric>("transient_error", "", "", this);
- _metric[Result::PERMANENT_ERROR] = make_unique<DoubleAverageMetric>("permanent_error", "", "", this);
- _metric[Result::TIMESTAMP_EXISTS] = make_unique<DoubleAverageMetric>("timestamp_exists", "", "", this);
- _metric[Result::FATAL_ERROR] = make_unique<DoubleAverageMetric>("fatal_error", "", "", this);
- _metric[Result::RESOURCE_EXHAUSTED] = make_unique<DoubleAverageMetric>("resource_exhausted", "", "", this);
+ metrics::Metric::Tags noTags;
+ _metric[Result::NONE] = make_unique<DoubleAverageMetric>("success", noTags, "", this);
+ _metric[Result::TRANSIENT_ERROR] = make_unique<DoubleAverageMetric>("transient_error", noTags, "", this);
+ _metric[Result::PERMANENT_ERROR] = make_unique<DoubleAverageMetric>("permanent_error", noTags, "", this);
+ _metric[Result::TIMESTAMP_EXISTS] = make_unique<DoubleAverageMetric>("timestamp_exists", noTags, "", this);
+ _metric[Result::FATAL_ERROR] = make_unique<DoubleAverageMetric>("fatal_error", noTags, "", this);
+ _metric[Result::RESOURCE_EXHAUSTED] = make_unique<DoubleAverageMetric>("resource_exhausted", noTags, "", this);
// Assert that the above initialized all entries in vector
for (size_t i=0; i<_metric.size(); ++i) assert(_metric[i].get());
}
Impl::MetricPersistenceProvider(PersistenceProvider& next)
- : metrics::MetricSet("spi", "", ""),
+ : metrics::MetricSet("spi", {}, ""),
_next(&next),
_functionMetrics(23)
{
diff --git a/searchcore/src/apps/fdispatch/fdispatch.cpp b/searchcore/src/apps/fdispatch/fdispatch.cpp
index cd3ef3b7550..0aa16260737 100644
--- a/searchcore/src/apps/fdispatch/fdispatch.cpp
+++ b/searchcore/src/apps/fdispatch/fdispatch.cpp
@@ -113,9 +113,6 @@ FastS_FDispatchApp::Main()
} catch (std::exception &e) {
LOG(error, "got exception during init: %s", e.what());
exitCode = 1;
- } catch (...) {
- LOG(error, "got exception during init");
- exitCode = 1;
}
LOG(debug, "Deleting fdispatch");
diff --git a/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp b/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp
index 1aaeb1707b3..e10fd41ffed 100644
--- a/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp
+++ b/searchcore/src/tests/proton/metrics/metrics_engine/metrics_engine_test.cpp
@@ -16,7 +16,7 @@ LOG_SETUP("metrics_engine_test");
using namespace proton;
struct DummyMetricSet : public metrics::MetricSet {
- DummyMetricSet(const vespalib::string &name) : metrics::MetricSet(name, "", "", nullptr) {}
+ DummyMetricSet(const vespalib::string &name) : metrics::MetricSet(name, {}, "", nullptr) {}
};
struct AttributeMetricsFixture {
diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp
index f8ec0d20d33..abf595f87eb 100644
--- a/searchcore/src/tests/proton/server/documentretriever_test.cpp
+++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp
@@ -114,11 +114,24 @@ const int64_t static_zcurve_value = 1118035438880ll;
const int64_t dynamic_zcurve_value = 6145423666930817152ll;
struct MyDocumentStore : proton::test::DummyDocumentStore {
+ mutable std::unique_ptr<Document> _testDoc;
+
+ MyDocumentStore()
+ : proton::test::DummyDocumentStore(),
+ _testDoc()
+ {
+ }
+
+ ~MyDocumentStore() override;
+
virtual Document::UP read(DocumentIdT lid,
const DocumentTypeRepo &r) const override {
if (lid == 0) {
return Document::UP();
}
+ if (_testDoc) {
+ return std::move(_testDoc);
+ }
const DocumentType *doc_type = r.getDocumentType(doc_type_name);
Document::UP doc(new Document(*doc_type, doc_id));
ASSERT_TRUE(doc.get());
@@ -145,6 +158,8 @@ struct MyDocumentStore : proton::test::DummyDocumentStore {
}
};
+MyDocumentStore::~MyDocumentStore() = default;
+
document::DocumenttypesConfig getRepoConfig() {
const int32_t doc_type_id = 787121340;
@@ -317,6 +332,14 @@ struct Fixture {
dyn_wset_field_n, DataType::FLOAT, ct);
_retriever = std::make_unique<DocumentRetriever>(_dtName, repo, schema, meta_store, attr_manager, doc_store);
}
+
+ void clearAttributes(std::vector<vespalib::string> names) {
+ for (const auto &name : names) {
+ auto guard = *attr_manager.getAttribute(name);
+ guard->clearDoc(lid);
+ guard->commit();
+ }
+ }
};
TEST_F("require that document retriever can retrieve document meta data",
@@ -451,6 +474,21 @@ TEST_F("require that predicate attributes can be retrieved", Fixture) {
ASSERT_TRUE(predicate_value);
}
+TEST_F("require that zero values in multivalue attribute removes fields", Fixture)
+{
+ auto meta_data = f._retriever->getDocumentMetaData(doc_id);
+ auto doc = f._retriever->getDocument(meta_data.lid);
+ ASSERT_TRUE(doc);
+ const Document *docPtr = doc.get();
+ ASSERT_TRUE(doc->hasValue(dyn_arr_field_i));
+ ASSERT_TRUE(doc->hasValue(dyn_wset_field_i));
+ f.doc_store._testDoc = std::move(doc);
+ f.clearAttributes({ dyn_arr_field_i, dyn_wset_field_i });
+ doc = f._retriever->getDocument(meta_data.lid);
+ EXPECT_EQUAL(docPtr, doc.get());
+ ASSERT_FALSE(doc->hasValue(dyn_arr_field_i));
+ ASSERT_FALSE(doc->hasValue(dyn_wset_field_i));
+}
} // namespace
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp
index 7b1ed5d6522..5ea89860307 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/content_proton_metrics.cpp
@@ -5,7 +5,7 @@
namespace proton {
ContentProtonMetrics::ProtonExecutorMetrics::ProtonExecutorMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("executor", "", "Metrics for top-level executors shared among all document databases", parent),
+ : metrics::MetricSet("executor", {}, "Metrics for top-level executors shared among all document databases", parent),
proton("proton", this),
flush("flush", this),
match("match", this),
@@ -18,7 +18,7 @@ ContentProtonMetrics::ProtonExecutorMetrics::ProtonExecutorMetrics(metrics::Metr
ContentProtonMetrics::ProtonExecutorMetrics::~ProtonExecutorMetrics() = default;
ContentProtonMetrics::ContentProtonMetrics()
- : metrics::MetricSet("content.proton", "", "Search engine metrics", nullptr),
+ : metrics::MetricSet("content.proton", {}, "Search engine metrics", nullptr),
transactionLog(this),
resourceUsage(this),
executor(this)
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
index c618614ea52..9561c605bd8 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
@@ -9,27 +9,27 @@ namespace proton {
using matching::MatchingStats;
DocumentDBTaggedMetrics::JobMetrics::JobMetrics(metrics::MetricSet* parent)
- : MetricSet("job", "", "Job load average for various jobs in a document database", parent),
- attributeFlush("attribute_flush", "", "Flushing of attribute vector(s) to disk", this),
- memoryIndexFlush("memory_index_flush", "", "Flushing of memory index to disk", this),
- diskIndexFusion("disk_index_fusion", "", "Fusion of disk indexes", this),
- documentStoreFlush("document_store_flush", "", "Flushing of document store to disk", this),
- documentStoreCompact("document_store_compact", "",
+ : MetricSet("job", {}, "Job load average for various jobs in a document database", parent),
+ attributeFlush("attribute_flush", {}, "Flushing of attribute vector(s) to disk", this),
+ memoryIndexFlush("memory_index_flush", {}, "Flushing of memory index to disk", this),
+ diskIndexFusion("disk_index_fusion", {}, "Fusion of disk indexes", this),
+ documentStoreFlush("document_store_flush", {}, "Flushing of document store to disk", this),
+ documentStoreCompact("document_store_compact", {},
"Compaction of document store on disk", this),
- bucketMove("bucket_move", "",
+ bucketMove("bucket_move", {},
"Moving of buckets between 'ready' and 'notready' sub databases", this),
- lidSpaceCompact("lid_space_compact", "",
+ lidSpaceCompact("lid_space_compact", {},
"Compaction of lid space in document meta store and attribute vectors", this),
- removedDocumentsPrune("removed_documents_prune", "",
+ removedDocumentsPrune("removed_documents_prune", {},
"Pruning of removed documents in 'removed' sub database", this),
- total("total", "", "The job load average total of all job metrics", this)
+ total("total", {}, "The job load average total of all job metrics", this)
{
}
DocumentDBTaggedMetrics::JobMetrics::~JobMetrics() = default;
DocumentDBTaggedMetrics::SubDBMetrics::SubDBMetrics(const vespalib::string &name, MetricSet *parent)
- : MetricSet(name, "", "Sub database metrics", parent),
+ : MetricSet(name, {}, "Sub database metrics", parent),
lidSpace(this),
documentStore(this),
attributes(this)
@@ -39,14 +39,14 @@ DocumentDBTaggedMetrics::SubDBMetrics::SubDBMetrics(const vespalib::string &name
DocumentDBTaggedMetrics::SubDBMetrics::~SubDBMetrics() = default;
DocumentDBTaggedMetrics::SubDBMetrics::LidSpaceMetrics::LidSpaceMetrics(MetricSet *parent)
- : MetricSet("lid_space", "", "Local document id (lid) space metrics for this document sub DB", parent),
- lidLimit("lid_limit", "", "The size of the allocated lid space", this),
- usedLids("used_lids", "", "The number of lids used", this),
- lowestFreeLid("lowest_free_lid", "", "The lowest free lid", this),
- highestUsedLid("highest_used_lid", "", "The highest used lid", this),
- lidBloatFactor("lid_bloat_factor", "", "The bloat factor of this lid space, indicating the total amount of holes in the allocated lid space "
+ : MetricSet("lid_space", {}, "Local document id (lid) space metrics for this document sub DB", parent),
+ lidLimit("lid_limit", {}, "The size of the allocated lid space", this),
+ usedLids("used_lids", {}, "The number of lids used", this),
+ lowestFreeLid("lowest_free_lid", {}, "The lowest free lid", this),
+ highestUsedLid("highest_used_lid", {}, "The highest used lid", this),
+ lidBloatFactor("lid_bloat_factor", {}, "The bloat factor of this lid space, indicating the total amount of holes in the allocated lid space "
"((lid_limit - used_lids) / lid_limit)", this),
- lidFragmentationFactor("lid_fragmentation_factor", "",
+ lidFragmentationFactor("lid_fragmentation_factor", {},
"The fragmentation factor of this lid space, indicating the amount of holes in the currently used part of the lid space "
"((highest_used_lid - used_lids) / highest_used_lid)", this)
{
@@ -55,22 +55,22 @@ DocumentDBTaggedMetrics::SubDBMetrics::LidSpaceMetrics::LidSpaceMetrics(MetricSe
DocumentDBTaggedMetrics::SubDBMetrics::LidSpaceMetrics::~LidSpaceMetrics() = default;
DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::CacheMetrics::CacheMetrics(MetricSet *parent)
- : MetricSet("cache", "", "Document store cache metrics", parent),
- memoryUsage("memory_usage", "", "Memory usage of the cache (in bytes)", this),
- elements("elements", "", "Number of elements in the cache", this),
- hitRate("hit_rate", "", "Rate of hits in the cache compared to number of lookups", this),
- lookups("lookups", "", "Number of lookups in the cache (hits + misses)", this),
- invalidations("invalidations", "", "Number of invalidations (erased elements) in the cache. ", this)
+ : MetricSet("cache", {}, "Document store cache metrics", parent),
+ memoryUsage("memory_usage", {}, "Memory usage of the cache (in bytes)", this),
+ elements("elements", {}, "Number of elements in the cache", this),
+ hitRate("hit_rate", {}, "Rate of hits in the cache compared to number of lookups", this),
+ lookups("lookups", {}, "Number of lookups in the cache (hits + misses)", this),
+ invalidations("invalidations", {}, "Number of invalidations (erased elements) in the cache. ", this)
{
}
DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::CacheMetrics::~CacheMetrics() = default;
DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::DocumentStoreMetrics(MetricSet *parent)
- : MetricSet("document_store", "", "Document store metrics for this document sub DB", parent),
- diskUsage("disk_usage", "", "Disk space usage in bytes", this),
- diskBloat("disk_bloat", "", "Disk space bloat in bytes", this),
- maxBucketSpread("max_bucket_spread", "", "Max bucket spread in underlying files (sum(unique buckets in each chunk)/unique buckets in file)", this),
+ : MetricSet("document_store", {}, "Document store metrics for this document sub DB", parent),
+ diskUsage("disk_usage", {}, "Disk space usage in bytes", this),
+ diskBloat("disk_bloat", {}, "Disk space bloat in bytes", this),
+ maxBucketSpread("max_bucket_spread", {}, "Max bucket spread in underlying files (sum(unique buckets in each chunk)/unique buckets in file)", this),
memoryUsage(this),
cache(this)
{
@@ -79,7 +79,7 @@ DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::DocumentStoreMetric
DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::~DocumentStoreMetrics() = default;
DocumentDBTaggedMetrics::AttributeMetrics::AttributeMetrics(MetricSet *parent)
- : MetricSet("attribute", "", "Attribute vector metrics for this document db", parent),
+ : MetricSet("attribute", {}, "Attribute vector metrics for this document db", parent),
resourceUsage(this),
totalMemoryUsage(this)
{
@@ -88,22 +88,22 @@ DocumentDBTaggedMetrics::AttributeMetrics::AttributeMetrics(MetricSet *parent)
DocumentDBTaggedMetrics::AttributeMetrics::~AttributeMetrics() = default;
DocumentDBTaggedMetrics::AttributeMetrics::ResourceUsageMetrics::ResourceUsageMetrics(MetricSet *parent)
- : MetricSet("resource_usage", "", "Usage metrics for various attribute vector resources", parent),
- enumStore("enum_store", "", "The highest relative amount of enum store address space used among "
+ : MetricSet("resource_usage", {}, "Usage metrics for various attribute vector resources", parent),
+ enumStore("enum_store", {}, "The highest relative amount of enum store address space used among "
"all enumerated attribute vectors in this document db (value in the range [0, 1])", this),
- multiValue("multi_value", "", "The highest relative amount of multi-value address space used among "
+ multiValue("multi_value", {}, "The highest relative amount of multi-value address space used among "
"all multi-value attribute vectors in this document db (value in the range [0, 1])", this),
- feedingBlocked("feeding_blocked", "", "Whether feeding is blocked due to attribute resource limits being reached (value is either 0 or 1)", this)
+ feedingBlocked("feeding_blocked", {}, "Whether feeding is blocked due to attribute resource limits being reached (value is either 0 or 1)", this)
{
}
DocumentDBTaggedMetrics::AttributeMetrics::ResourceUsageMetrics::~ResourceUsageMetrics() = default;
DocumentDBTaggedMetrics::IndexMetrics::IndexMetrics(MetricSet *parent)
- : MetricSet("index", "", "Index metrics (memory and disk) for this document db", parent),
- diskUsage("disk_usage", "", "Disk space usage in bytes", this),
+ : MetricSet("index", {}, "Index metrics (memory and disk) for this document db", parent),
+ diskUsage("disk_usage", {}, "Disk space usage in bytes", this),
memoryUsage(this),
- docsInMemory("docs_in_memory", "", "Number of documents in memory index", this)
+ docsInMemory("docs_in_memory", {}, "Number of documents in memory index", this)
{
}
@@ -124,14 +124,14 @@ DocumentDBTaggedMetrics::MatchingMetrics::update(const MatchingStats &stats)
}
DocumentDBTaggedMetrics::MatchingMetrics::MatchingMetrics(MetricSet *parent)
- : MetricSet("matching", "", "Matching metrics", parent),
- docsMatched("docs_matched", "", "Number of documents matched", this),
- docsRanked("docs_ranked", "", "Number of documents ranked (first phase)", this),
- docsReRanked("docs_reranked", "", "Number of documents re-ranked (second phase)", this),
- queries("queries", "", "Number of queries executed", this),
- softDoomFactor("soft_doom_factor", "", "Factor used to compute soft-timeout", this),
- queryCollateralTime("query_collateral_time", "", "Average time (sec) spent setting up and tearing down queries", this),
- queryLatency("query_latency", "", "Average latency (sec) when matching a query", this)
+ : MetricSet("matching", {}, "Matching metrics", parent),
+ docsMatched("docs_matched", {}, "Number of documents matched", this),
+ docsRanked("docs_ranked", {}, "Number of documents ranked (first phase)", this),
+ docsReRanked("docs_reranked", {}, "Number of documents re-ranked (second phase)", this),
+ queries("queries", {}, "Number of queries executed", this),
+ softDoomFactor("soft_doom_factor", {}, "Factor used to compute soft-timeout", this),
+ queryCollateralTime("query_collateral_time", {}, "Average time (sec) spent setting up and tearing down queries", this),
+ queryLatency("query_latency", {}, "Average latency (sec) when matching a query", this)
{
}
@@ -141,16 +141,16 @@ DocumentDBTaggedMetrics::MatchingMetrics::RankProfileMetrics::RankProfileMetrics
size_t numDocIdPartitions,
MetricSet *parent)
: MetricSet("rank_profile", {{"rankProfile", name}}, "Rank profile metrics", parent),
- docsMatched("docs_matched", "", "Number of documents matched", this),
- docsRanked("docs_ranked", "", "Number of documents ranked (first phase)", this),
- docsReRanked("docs_reranked", "", "Number of documents re-ranked (second phase)", this),
- queries("queries", "", "Number of queries executed", this),
- limitedQueries("limited_queries", "", "Number of queries limited in match phase", this),
- matchTime("match_time", "", "Average time (sec) for matching a query", this),
- groupingTime("grouping_time", "", "Average time (sec) spent on grouping", this),
- rerankTime("rerank_time", "", "Average time (sec) spent on 2nd phase ranking", this),
- queryCollateralTime("query_collateral_time", "", "Average time (sec) spent setting up and tearing down queries", this),
- queryLatency("query_latency", "", "Average latency (sec) when matching a query", this)
+ docsMatched("docs_matched", {}, "Number of documents matched", this),
+ docsRanked("docs_ranked", {}, "Number of documents ranked (first phase)", this),
+ docsReRanked("docs_reranked", {}, "Number of documents re-ranked (second phase)", this),
+ queries("queries", {}, "Number of queries executed", this),
+ limitedQueries("limited_queries", {}, "Number of queries limited in match phase", this),
+ matchTime("match_time", {}, "Average time (sec) for matching a query", this),
+ groupingTime("grouping_time", {}, "Average time (sec) spent on grouping", this),
+ rerankTime("rerank_time", {}, "Average time (sec) spent on 2nd phase ranking", this),
+ queryCollateralTime("query_collateral_time", {}, "Average time (sec) spent setting up and tearing down queries", this),
+ queryLatency("query_latency", {}, "Average latency (sec) when matching a query", this)
{
for (size_t i = 0; i < numDocIdPartitions; ++i) {
vespalib::string partition(vespalib::make_string("docid_part%02ld", i));
@@ -162,11 +162,11 @@ DocumentDBTaggedMetrics::MatchingMetrics::RankProfileMetrics::~RankProfileMetric
DocumentDBTaggedMetrics::MatchingMetrics::RankProfileMetrics::DocIdPartition::DocIdPartition(const vespalib::string &name, MetricSet *parent) :
MetricSet("docid_partition", {{"docidPartition", name}}, "DocId Partition profile metrics", parent),
- docsMatched("docs_matched", "", "Number of documents matched", this),
- docsRanked("docs_ranked", "", "Number of documents ranked (first phase)", this),
- docsReRanked("docs_reranked", "", "Number of documents re-ranked (second phase)", this),
- activeTime("active_time", "", "Time (sec) spent doing actual work", this),
- waitTime("wait_time", "", "Time (sec) spent waiting for other external threads and resources", this)
+ docsMatched("docs_matched", {}, "Number of documents matched", this),
+ docsRanked("docs_ranked", {}, "Number of documents ranked (first phase)", this),
+ docsReRanked("docs_reranked", {}, "Number of documents re-ranked (second phase)", this),
+ activeTime("active_time", {}, "Time (sec) spent doing actual work", this),
+ waitTime("wait_time", {}, "Time (sec) spent waiting for other external threads and resources", this)
{ }
DocumentDBTaggedMetrics::MatchingMetrics::RankProfileMetrics::DocIdPartition::~DocIdPartition() = default;
@@ -215,7 +215,7 @@ DocumentDBTaggedMetrics::MatchingMetrics::RankProfileMetrics::update(const Match
}
DocumentDBTaggedMetrics::SessionCacheMetrics::SessionCacheMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("session_cache", "", "Metrics for session caches (search / grouping requests)", parent),
+ : metrics::MetricSet("session_cache", {}, "Metrics for session caches (search / grouping requests)", parent),
search("search", this),
grouping("grouping", this)
{
@@ -224,11 +224,11 @@ DocumentDBTaggedMetrics::SessionCacheMetrics::SessionCacheMetrics(metrics::Metri
DocumentDBTaggedMetrics::SessionCacheMetrics::~SessionCacheMetrics() = default;
DocumentDBTaggedMetrics::DocumentsMetrics::DocumentsMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("documents", "", "Metrics for various document counts in this document db", parent),
- active("active", "", "The number of active / searchable documents in this document db", this),
- ready("ready", "", "The number of ready documents in this document db", this),
- total("total", "", "The total number of documents in this documents db (ready + not-ready)", this),
- removed("removed", "", "The number of removed documents in this document db", this)
+ : metrics::MetricSet("documents", {}, "Metrics for various document counts in this document db", parent),
+ active("active", {}, "The number of active / searchable documents in this document db", this),
+ ready("ready", {}, "The number of ready documents in this document db", this),
+ total("total", {}, "The total number of documents in this documents db (ready + not-ready)", this),
+ removed("removed", {}, "The number of removed documents in this document db", this)
{
}
@@ -247,7 +247,7 @@ DocumentDBTaggedMetrics::DocumentDBTaggedMetrics(const vespalib::string &docType
sessionCache(this),
documents(this),
totalMemoryUsage(this),
- totalDiskUsage("disk_usage", "", "The total disk usage (in bytes) for this document db", this)
+ totalDiskUsage("disk_usage", {}, "The total disk usage (in bytes) for this document db", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/executor_metrics.cpp
index c5296aa3b1f..710c072aa53 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/executor_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_metrics.cpp
@@ -13,10 +13,10 @@ ExecutorMetrics::update(const vespalib::ThreadStackExecutorBase::Stats &stats)
}
ExecutorMetrics::ExecutorMetrics(const std::string &name, metrics::MetricSet *parent)
- : metrics::MetricSet(name, "", "Instance specific thread executor metrics", parent),
- maxPending("maxpending", "", "Maximum number of pending (active + queued) tasks", this),
- accepted("accepted", "", "Number of accepted tasks", this),
- rejected("rejected", "", "Number of rejected tasks", this)
+ : metrics::MetricSet(name, {}, "Instance specific thread executor metrics", parent),
+ maxPending("maxpending", {}, "Maximum number of pending (active + queued) tasks", this),
+ accepted("accepted", {}, "Number of accepted tasks", this),
+ rejected("rejected", {}, "Number of rejected tasks", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp
index 2b7908633c9..e7c11bd7dd5 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/executor_threading_service_metrics.cpp
@@ -6,7 +6,7 @@
namespace proton {
ExecutorThreadingServiceMetrics::ExecutorThreadingServiceMetrics(const std::string &name, metrics::MetricSet *parent)
- : metrics::MetricSet(name, "", "Instance specific threading service metrics", parent),
+ : metrics::MetricSet(name, {}, "Instance specific threading service metrics", parent),
master("master", this),
index("index", this),
summary("summary", this),
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/legacy_attribute_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/legacy_attribute_metrics.cpp
index 25dfb3d71ab..7d182e5ccc5 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/legacy_attribute_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/legacy_attribute_metrics.cpp
@@ -5,9 +5,9 @@
namespace proton {
LegacyAttributeMetrics::List::Entry::Entry(const std::string &name)
- : metrics::MetricSet(name, "", "Attribute vector metrics", 0),
- memoryUsage("memoryusage", "", "Memory usage", this),
- bitVectors("bitvectors", "", "Number of bitvectors", this)
+ : metrics::MetricSet(name, {}, "Attribute vector metrics", 0),
+ memoryUsage("memoryusage", {}, "Memory usage", this),
+ bitVectors("bitvectors", {}, "Number of bitvectors", this)
{
}
@@ -56,7 +56,7 @@ LegacyAttributeMetrics::List::release()
}
LegacyAttributeMetrics::List::List(metrics::MetricSet *parent)
- : metrics::MetricSet("list", "", "Metrics per attribute vector", parent),
+ : metrics::MetricSet("list", {}, "Metrics per attribute vector", parent),
metrics()
{
}
@@ -64,10 +64,10 @@ LegacyAttributeMetrics::List::List(metrics::MetricSet *parent)
LegacyAttributeMetrics::List::~List() = default;
LegacyAttributeMetrics::LegacyAttributeMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("attributes", "", "Attribute metrics", parent),
+ : metrics::MetricSet("attributes", {}, "Attribute metrics", parent),
list(this),
- memoryUsage("memoryusage", "", "Memory usage for attributes", this),
- bitVectors("bitvectors", "", "Number of bitvectors for attributes", this)
+ memoryUsage("memoryusage", {}, "Memory usage for attributes", this),
+ bitVectors("bitvectors", {}, "Number of bitvectors for attributes", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/legacy_documentdb_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/legacy_documentdb_metrics.cpp
index 3b24dcdc1d1..d4180c4ec5f 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/legacy_documentdb_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/legacy_documentdb_metrics.cpp
@@ -13,21 +13,21 @@ namespace proton {
using matching::MatchingStats;
LegacyDocumentDBMetrics::IndexMetrics::IndexMetrics(MetricSet *parent)
- : MetricSet("index", "", "Index metrics", parent),
- memoryUsage("memoryusage", "", "Memory usage for memory indexes", this),
- docsInMemory("docsinmemory", "", "Number of documents in memory", this),
- diskUsage("diskusage", "", "Disk usage for disk indexes", this)
+ : MetricSet("index", {}, "Index metrics", parent),
+ memoryUsage("memoryusage", {}, "Memory usage for memory indexes", this),
+ docsInMemory("docsinmemory", {}, "Number of documents in memory", this),
+ diskUsage("diskusage", {}, "Disk usage for disk indexes", this)
{ }
LegacyDocumentDBMetrics::IndexMetrics::~IndexMetrics() {}
LegacyDocumentDBMetrics::DocstoreMetrics::DocstoreMetrics(MetricSet *parent)
- : MetricSet("docstore", "", "Document store metrics", parent),
- memoryUsage("memoryusage", "", "Memory usage for docstore", this),
- cacheLookups("cachelookups", "", "Number of lookups in summary cache", this),
- cacheHitRate("cachehitrate", "", "Rate of cache hits in summary cache", this),
- cacheElements("cacheelements", "", "Number of elements in summary cache", this),
- cacheMemoryUsed("cachememoryused", "", "Memory used by summary cache", this)
+ : MetricSet("docstore", {}, "Document store metrics", parent),
+ memoryUsage("memoryusage", {}, "Memory usage for docstore", this),
+ cacheLookups("cachelookups", {}, "Number of lookups in summary cache", this),
+ cacheHitRate("cachehitrate", {}, "Rate of cache hits in summary cache", this),
+ cacheElements("cacheelements", {}, "Number of elements in summary cache", this),
+ cacheMemoryUsed("cachememoryused", {}, "Memory used by summary cache", this)
{ }
LegacyDocumentDBMetrics::DocstoreMetrics::~DocstoreMetrics() {}
@@ -47,26 +47,26 @@ LegacyDocumentDBMetrics::MatchingMetrics::update(const MatchingStats &stats)
}
LegacyDocumentDBMetrics::MatchingMetrics::MatchingMetrics(MetricSet *parent)
- : MetricSet("matching", "", "Matching metrics", parent),
- docsMatched("docsmatched", "", "Number of documents matched", this),
- docsRanked("docsranked", "", "Number of documents ranked (first phase)", this),
- docsReRanked("docsreranked", "", "Number of documents re-ranked (second phase)", this),
- queries("queries", "", "Number of queries executed", this),
- softDoomFactor("softdoomfactor", "", "Factor used to compute soft-timeout", this),
- queryCollateralTime("querycollateraltime", "", "Average time spent setting up and tearing down queries", this),
- queryLatency("querylatency", "", "Average latency when matching a query", this)
+ : MetricSet("matching", {}, "Matching metrics", parent),
+ docsMatched("docsmatched", {}, "Number of documents matched", this),
+ docsRanked("docsranked", {}, "Number of documents ranked (first phase)", this),
+ docsReRanked("docsreranked", {}, "Number of documents re-ranked (second phase)", this),
+ queries("queries", {}, "Number of queries executed", this),
+ softDoomFactor("softdoomfactor", {}, "Factor used to compute soft-timeout", this),
+ queryCollateralTime("querycollateraltime", {}, "Average time spent setting up and tearing down queries", this),
+ queryLatency("querylatency", {}, "Average latency when matching a query", this)
{ }
LegacyDocumentDBMetrics::MatchingMetrics::~MatchingMetrics() {}
LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::RankProfileMetrics(
const std::string &name, size_t numDocIdPartitions, MetricSet *parent)
- : MetricSet(name, "", "Rank profile metrics", parent),
- queries("queries", "", "Number of queries executed", this),
- limited_queries("limitedqueries", "", "Number of queries limited in match phase", this),
- matchTime("match_time", "", "Average time for matching a query", this),
- groupingTime("grouping_time", "", "Average time spent on grouping", this),
- rerankTime("rerank_time", "", "Average time spent on 2nd phase ranking", this)
+ : MetricSet(name, {}, "Rank profile metrics", parent),
+ queries("queries", {}, "Number of queries executed", this),
+ limited_queries("limitedqueries", {}, "Number of queries limited in match phase", this),
+ matchTime("match_time", {}, "Average time for matching a query", this),
+ groupingTime("grouping_time", {}, "Average time spent on grouping", this),
+ rerankTime("rerank_time", {}, "Average time spent on 2nd phase ranking", this)
{
for (size_t i=0; i < numDocIdPartitions; i++) {
vespalib::string s(make_string("docid_part%02ld", i));
@@ -77,12 +77,12 @@ LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::RankProfileMetrics
LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::~RankProfileMetrics() {}
LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::DocIdPartition::DocIdPartition(const std::string &name, MetricSet *parent) :
- MetricSet(name, "", "DocId Partition profile metrics", parent),
- docsMatched("docsmatched", "", "Number of documents matched", this),
- docsRanked("docsranked", "", "Number of documents ranked (first phase)", this),
- docsReRanked("docsreranked", "", "Number of documents re-ranked (second phase)", this),
- active_time("activetime", "", "Time spent doing actual work", this),
- wait_time("waittime", "", "Time spent waiting for other external threads and resources", this)
+ MetricSet(name, {}, "DocId Partition profile metrics", parent),
+ docsMatched("docsmatched", {}, "Number of documents matched", this),
+ docsRanked("docsranked", {}, "Number of documents ranked (first phase)", this),
+ docsReRanked("docsreranked", {}, "Number of documents re-ranked (second phase)", this),
+ active_time("activetime", {}, "Time spent doing actual work", this),
+ wait_time("waittime", {}, "Time spent waiting for other external threads and resources", this)
{ }
LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::DocIdPartition::~DocIdPartition() {}
@@ -126,14 +126,14 @@ LegacyDocumentDBMetrics::MatchingMetrics::RankProfileMetrics::update(const Match
}
LegacyDocumentDBMetrics::SubDBMetrics::DocumentMetaStoreMetrics::DocumentMetaStoreMetrics(MetricSet *parent)
- : MetricSet("docmetastore", "", "Document meta store metrics", parent),
- lidLimit("lidlimit", "", "The size of the allocated lid space", this),
- usedLids("usedlids", "", "The number of lids used", this),
- lowestFreeLid("lowestfreelid", "", "The lowest free lid", this),
- highestUsedLid("highestusedlid", "", "The highest used lid", this),
- lidBloatFactor("lidbloatfactor", "", "The bloat factor of this lid space, indicating the total amount of holes in the allocated lid space "
+ : MetricSet("docmetastore", {}, "Document meta store metrics", parent),
+ lidLimit("lidlimit", {}, "The size of the allocated lid space", this),
+ usedLids("usedlids", {}, "The number of lids used", this),
+ lowestFreeLid("lowestfreelid", {}, "The lowest free lid", this),
+ highestUsedLid("highestusedlid", {}, "The highest used lid", this),
+ lidBloatFactor("lidbloatfactor", {}, "The bloat factor of this lid space, indicating the total amount of holes in the allocated lid space "
"((lidlimit - usedlids) / lidlimit)", this),
- lidFragmentationFactor("lid_fragmentation_factor", "",
+ lidFragmentationFactor("lid_fragmentation_factor", {},
"The fragmentation factor of this lid space, indicating the amount of holes in the currently used part of the lid space "
"((highestusedlid - usedlids) / highestusedlid)", this)
{
@@ -142,7 +142,7 @@ LegacyDocumentDBMetrics::SubDBMetrics::DocumentMetaStoreMetrics::DocumentMetaSto
LegacyDocumentDBMetrics::SubDBMetrics::DocumentMetaStoreMetrics::~DocumentMetaStoreMetrics() {}
LegacyDocumentDBMetrics::SubDBMetrics::SubDBMetrics(const vespalib::string &name, MetricSet *parent)
- : MetricSet(name, "", "Sub database metrics", parent),
+ : MetricSet(name, {}, "Sub database metrics", parent),
attributes(this),
docMetaStore(this)
{ }
@@ -150,7 +150,7 @@ LegacyDocumentDBMetrics::SubDBMetrics::SubDBMetrics(const vespalib::string &name
LegacyDocumentDBMetrics::SubDBMetrics::~SubDBMetrics() {}
LegacyDocumentDBMetrics::LegacyDocumentDBMetrics(const std::string &docTypeName, size_t maxNumThreads)
- : MetricSet(make_string("%s", docTypeName.c_str()), "", "Document DB Metrics", 0),
+ : MetricSet(make_string("%s", docTypeName.c_str()), {}, "Document DB Metrics", 0),
index(this),
attributes(this),
docstore(this),
@@ -162,13 +162,13 @@ LegacyDocumentDBMetrics::LegacyDocumentDBMetrics(const std::string &docTypeName,
ready("ready", this),
notReady("notready", this),
removed("removed", this),
- memoryUsage("memoryusage", "", "Memory usage for this Document DB", this),
- numDocs("numdocs", "", "Number of ready/indexed documents in this Document DB (aka number of documents in the 'ready' sub db)", this),
- numActiveDocs("numactivedocs", "", "Number of active/searchable documents in this Document DB (aka number of active/searchable documents in the 'ready' sub db)", this),
- numIndexedDocs("numindexeddocs", "", "Number of ready/indexed documents in this Document DB (aka number of documents in the 'ready' sub db)", this),
- numStoredDocs("numstoreddocs", "", "Total number of documents stored in this Document DB (aka number of documents in the 'ready' and 'notready' sub dbs)", this),
- numRemovedDocs("numremoveddocs", "", "Number of removed documents in this Document DB (aka number of documents in the 'removed' sub db)", this),
- numBadConfigs("numBadConfigs", "", "Number of bad configs for this Document DB", this),
+ memoryUsage("memoryusage", {}, "Memory usage for this Document DB", this),
+ numDocs("numdocs", {}, "Number of ready/indexed documents in this Document DB (aka number of documents in the 'ready' sub db)", this),
+ numActiveDocs("numactivedocs", {}, "Number of active/searchable documents in this Document DB (aka number of active/searchable documents in the 'ready' sub db)", this),
+ numIndexedDocs("numindexeddocs", {}, "Number of ready/indexed documents in this Document DB (aka number of documents in the 'ready' sub db)", this),
+ numStoredDocs("numstoreddocs", {}, "Total number of documents stored in this Document DB (aka number of documents in the 'ready' and 'notready' sub dbs)", this),
+ numRemovedDocs("numremoveddocs", {}, "Number of removed documents in this Document DB (aka number of documents in the 'removed' sub db)", this),
+ numBadConfigs("numBadConfigs", {}, "Number of bad configs for this Document DB", this),
_maxNumThreads(maxNumThreads)
{
memoryUsage.addMetricToSum(index.memoryUsage);
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/legacy_proton_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/legacy_proton_metrics.cpp
index 9b7015a9f61..5c0285967e9 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/legacy_proton_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/legacy_proton_metrics.cpp
@@ -5,34 +5,34 @@
namespace proton {
LegacyProtonMetrics::DocumentTypeMetrics::DocumentTypeMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("doctypes", "", "Metrics per document type", parent)
+ : metrics::MetricSet("doctypes", {}, "Metrics per document type", parent)
{
}
LegacyProtonMetrics::DocumentTypeMetrics::~DocumentTypeMetrics() { }
LegacyProtonMetrics::LegacyProtonMetrics()
- : metrics::MetricSet("proton", "", "Search engine metrics", 0),
+ : metrics::MetricSet("proton", {}, "Search engine metrics", 0),
docTypes(this),
executor("executor", this),
flushExecutor("flushexecutor", this),
matchExecutor("matchexecutor", this),
summaryExecutor("summaryexecutor", this),
- memoryUsage("memoryusage", "logdefault", "Total tracked memory usage", this),
- diskUsage("diskusage", "logdefault", "Total tracked disk usage for disk indexes", this),
- docsInMemory("docsinmemory", "logdefault", "Total Number of documents in memory", this),
- numDocs("numdocs", "logdefault", "Total number of ready/indexed documents among all document dbs (equal as numindexeddocs)", this),
- numActiveDocs("numactivedocs", "logdefault",
+ memoryUsage("memoryusage", {{"logdefault"}}, "Total tracked memory usage", this),
+ diskUsage("diskusage", {{"logdefault"}}, "Total tracked disk usage for disk indexes", this),
+ docsInMemory("docsinmemory", {{"logdefault"}}, "Total Number of documents in memory", this),
+ numDocs("numdocs", {{"logdefault"}}, "Total number of ready/indexed documents among all document dbs (equal as numindexeddocs)", this),
+ numActiveDocs("numactivedocs", {{"logdefault"}},
"Total number of active/searchable documents among all document dbs", this),
- numIndexedDocs("numindexeddocs", "logdefault",
+ numIndexedDocs("numindexeddocs", {{"logdefault"}},
"Total number of ready/indexed documents among all document dbs (equal as numdocs)", this),
- numStoredDocs("numstoreddocs", "logdefault",
+ numStoredDocs("numstoreddocs", {{"logdefault"}},
"Total number of stored documents among all document dbs", this),
- numRemovedDocs("numremoveddocs", "logdefault",
+ numRemovedDocs("numremoveddocs", {{"logdefault"}},
"Total number of removed documents among all document dbs", this)
{
// supply start value to support sum without any document types
- metrics::LongValueMetric start("start", "", "", 0);
+ metrics::LongValueMetric start("start", {}, "", 0);
memoryUsage.setStartValue(start);
diskUsage.setStartValue(start);
docsInMemory.setStartValue(start);
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/legacy_sessionmanager_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/legacy_sessionmanager_metrics.cpp
index 097398f42ad..6d780739848 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/legacy_sessionmanager_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/legacy_sessionmanager_metrics.cpp
@@ -5,12 +5,12 @@
namespace proton {
LegacySessionManagerMetrics::LegacySessionManagerMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("sessionmanager", "", "Grouping session manager metrics", parent),
- numInsert("numinsert", "", "Number of inserted sessions", this),
- numPick("numpick", "", "Number if picked sessions", this),
- numDropped("numdropped", "", "Number of dropped cached sessions", this),
- numCached("numcached", "", "Number of currently cached sessions", this),
- numTimedout("numtimedout", "", "Number of timed out sessions", this)
+ : metrics::MetricSet("sessionmanager", {}, "Grouping session manager metrics", parent),
+ numInsert("numinsert", {}, "Number of inserted sessions", this),
+ numPick("numpick", {}, "Number if picked sessions", this),
+ numDropped("numdropped", {}, "Number of dropped cached sessions", this),
+ numCached("numcached", {}, "Number of currently cached sessions", this),
+ numTimedout("numtimedout", {}, "Number of timed out sessions", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/memory_usage_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/memory_usage_metrics.cpp
index 7a5d62b52f7..b675e80920f 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/memory_usage_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/memory_usage_metrics.cpp
@@ -6,11 +6,11 @@
namespace proton {
MemoryUsageMetrics::MemoryUsageMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("memory_usage", "", "The memory usage for a given component", parent),
- _allocatedBytes("allocated_bytes", "", "The number of allocated bytes", this),
- _usedBytes("used_bytes", "", "The number of used bytes (<= allocatedbytes)", this),
- _deadBytes("dead_bytes", "", "The number of dead bytes (<= usedbytes)", this),
- _onHoldBytes("onhold_bytes", "", "The number of bytes on hold", this)
+ : metrics::MetricSet("memory_usage", {}, "The memory usage for a given component", parent),
+ _allocatedBytes("allocated_bytes", {}, "The number of allocated bytes", this),
+ _usedBytes("used_bytes", {}, "The number of used bytes (<= allocatedbytes)", this),
+ _deadBytes("dead_bytes", {}, "The number of dead bytes (<= usedbytes)", this),
+ _onHoldBytes("onhold_bytes", {}, "The number of bytes on hold", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/resource_usage_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/resource_usage_metrics.cpp
index d944287243a..43e4e4d3f75 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/resource_usage_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/resource_usage_metrics.cpp
@@ -5,14 +5,14 @@
namespace proton {
ResourceUsageMetrics::ResourceUsageMetrics(metrics::MetricSet *parent)
- : MetricSet("resource_usage", "", "Usage metrics for various resources in this search engine", parent),
- disk("disk", "", "The relative amount of disk space used on this machine (value in the range [0, 1])", this),
- diskUtilization("disk_utilization", "", "The relative amount of disk used compared to the disk resource limit", this),
- memory("memory", "", "The relative amount of memory used by this process (value in the range [0, 1])", this),
- memoryUtilization("memory_utilization", "", "The relative amount of memory used compared to the memory resource limit", this),
- memoryMappings("memory_mappings", "", "The number of mapped memory areas", this),
- openFileDescriptors("open_file_descriptors", "", "The number of open files", this),
- feedingBlocked("feeding_blocked", "", "Whether feeding is blocked due to resource limits being reached (value is either 0 or 1)", this)
+ : MetricSet("resource_usage", {}, "Usage metrics for various resources in this search engine", parent),
+ disk("disk", {}, "The relative amount of disk space used on this machine (value in the range [0, 1])", this),
+ diskUtilization("disk_utilization", {}, "The relative amount of disk used compared to the disk resource limit", this),
+ memory("memory", {}, "The relative amount of memory used by this process (value in the range [0, 1])", this),
+ memoryUtilization("memory_utilization", {}, "The relative amount of memory used compared to the memory resource limit", this),
+ memoryMappings("memory_mappings", {}, "The number of mapped memory areas", this),
+ openFileDescriptors("open_file_descriptors", {}, "The number of open files", this),
+ feedingBlocked("feeding_blocked", {}, "Whether feeding is blocked due to resource limits being reached (value is either 0 or 1)", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/sessionmanager_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/sessionmanager_metrics.cpp
index 5fc41acf33c..82ec8ad485b 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/sessionmanager_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/sessionmanager_metrics.cpp
@@ -6,12 +6,12 @@
namespace proton {
SessionManagerMetrics::SessionManagerMetrics(const vespalib::string &name, metrics::MetricSet *parent)
- : metrics::MetricSet(name, "", vespalib::make_string("Session manager cache metrics for %s", name.c_str()), parent),
- numInsert("num_insert", "", "Number of inserted sessions", this),
- numPick("num_pick", "", "Number if picked sessions", this),
- numDropped("num_dropped", "", "Number of dropped cached sessions", this),
- numCached("num_cached", "", "Number of currently cached sessions", this),
- numTimedout("num_timedout", "", "Number of timed out sessions", this)
+ : metrics::MetricSet(name, {}, vespalib::make_string("Session manager cache metrics for %s", name.c_str()), parent),
+ numInsert("num_insert", {}, "Number of inserted sessions", this),
+ numPick("num_pick", {}, "Number if picked sessions", this),
+ numDropped("num_dropped", {}, "Number of dropped cached sessions", this),
+ numCached("num_cached", {}, "Number of currently cached sessions", this),
+ numTimedout("num_timedout", {}, "Number of timed out sessions", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/trans_log_server_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/trans_log_server_metrics.cpp
index c2624719d81..a6e8ac54e86 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/trans_log_server_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/trans_log_server_metrics.cpp
@@ -11,9 +11,9 @@ TransLogServerMetrics::DomainMetrics::DomainMetrics(metrics::MetricSet *parent,
const vespalib::string &documentType)
: metrics::MetricSet("transactionlog", {{"documenttype", documentType}},
"Transaction log metrics for a document type", parent),
- entries("entries", "", "The current number of entries in the transaction log", this),
- diskUsage("disk_usage", "", "The disk usage (in bytes) of the transaction log", this),
- replayTime("replay_time", "", "The replay time (in seconds) of the transaction log during start-up", this)
+ entries("entries", {}, "The current number of entries in the transaction log", this),
+ diskUsage("disk_usage", {}, "The disk usage (in bytes) of the transaction log", this),
+ replayTime("replay_time", {}, "The replay time (in seconds) of the transaction log during start-up", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.cpp b/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.cpp
index 7497ae0bf2f..9744b24a74c 100644
--- a/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.cpp
+++ b/searchcore/src/vespa/searchcore/proton/summaryengine/summaryengine.cpp
@@ -47,10 +47,10 @@ uint32_t getNumDocs(const DocsumReply &reply) {
namespace proton {
SummaryEngine::DocsumMetrics::DocsumMetrics()
- : metrics::MetricSet("docsum", "", "Docsum metrics", nullptr),
- count("count", "logdefault", "Docsum requests handled", this),
- docs("docs", "logdefault", "Total docsums returned", this),
- latency("latency", "logdefault", "Docsum request latency", this)
+ : metrics::MetricSet("docsum", {}, "Docsum metrics", nullptr),
+ count("count", {{"logdefault"}}, "Docsum requests handled", this),
+ docs("docs", {{"logdefault"}}, "Total docsums returned", this),
+ latency("latency", {{"logdefault"}}, "Docsum request latency", this)
{
}
diff --git a/searchlib/src/vespa/searchlib/engine/transport_metrics.cpp b/searchlib/src/vespa/searchlib/engine/transport_metrics.cpp
index d291f968379..62d2b9d5489 100644
--- a/searchlib/src/vespa/searchlib/engine/transport_metrics.cpp
+++ b/searchlib/src/vespa/searchlib/engine/transport_metrics.cpp
@@ -5,26 +5,26 @@
namespace search::engine {
TransportMetrics::QueryMetrics::QueryMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("query", "", "Query metrics", parent),
- count("count", "logdefault", "Query requests handled", this),
- latency("latency", "logdefault", "Query request latency", this)
+ : metrics::MetricSet("query", {}, "Query metrics", parent),
+ count("count", {{"logdefault"}}, "Query requests handled", this),
+ latency("latency", {{"logdefault"}}, "Query request latency", this)
{
}
TransportMetrics::QueryMetrics::~QueryMetrics() = default;
TransportMetrics::DocsumMetrics::DocsumMetrics(metrics::MetricSet *parent)
- : metrics::MetricSet("docsum", "", "Docsum metrics", parent),
- count("count", "logdefault", "Docsum requests handled", this),
- docs("docs", "logdefault", "Total docsums returned", this),
- latency("latency", "logdefault", "Docsum request latency", this)
+ : metrics::MetricSet("docsum", {}, "Docsum metrics", parent),
+ count("count", {{"logdefault"}}, "Docsum requests handled", this),
+ docs("docs", {{"logdefault"}}, "Total docsums returned", this),
+ latency("latency", {{"logdefault"}}, "Docsum request latency", this)
{
}
TransportMetrics::DocsumMetrics::~DocsumMetrics() = default;
TransportMetrics::TransportMetrics()
- : metrics::MetricSet("transport", "", "Transport server metrics", nullptr),
+ : metrics::MetricSet("transport", {}, "Transport server metrics", nullptr),
updateLock(),
query(this),
docsum(this)
diff --git a/slobrok/src/apps/slobrok/slobrok.cpp b/slobrok/src/apps/slobrok/slobrok.cpp
index e69f2df53f0..0588c90d383 100644
--- a/slobrok/src/apps/slobrok/slobrok.cpp
+++ b/slobrok/src/apps/slobrok/slobrok.cpp
@@ -94,10 +94,6 @@ App::Main()
LOG(error, "unknown exception during construction : %s", e.what());
EV_STOPPING("slobrok", "unknown exception during construction");
return 2;
- } catch (...) {
- LOG(error, "unknown exception during construction");
- EV_STOPPING("slobrok", "unknown exception during construction");
- return 3;
}
mainobj.reset();
return res;
diff --git a/slobrok/src/vespa/slobrok/server/sbenv.cpp b/slobrok/src/vespa/slobrok/server/sbenv.cpp
index 163113de7b9..4e510b61e70 100644
--- a/slobrok/src/vespa/slobrok/server/sbenv.cpp
+++ b/slobrok/src/vespa/slobrok/server/sbenv.cpp
@@ -190,9 +190,9 @@ SBEnv::MainLoop()
LOG(error, "invalid config: %s", e.what());
EV_STOPPING("slobrok", "invalid config");
return 1;
- } catch (...) {
- LOG(error, "unknown exception while configuring");
- EV_STOPPING("slobrok", "unknown config exception");
+ } catch (std::exception &e) {
+ LOG(error, "Unexpected std::exception : %s", e.what());
+ EV_STOPPING("slobrok", "Unexpected std::exception");
return 1;
}
EV_STOPPING("slobrok", "clean shutdown");
diff --git a/staging_vespalib/src/vespa/vespalib/metrics/handle.h b/staging_vespalib/src/vespa/vespalib/metrics/handle.h
index ddae0b1b6d2..3e32945ceed 100644
--- a/staging_vespalib/src/vespa/vespalib/metrics/handle.h
+++ b/staging_vespalib/src/vespa/vespalib/metrics/handle.h
@@ -15,9 +15,12 @@ template <typename T>
class Handle {
private:
size_t _id;
+ constexpr Handle() : _id(0) {}
public:
explicit Handle(size_t id) : _id(id) {}
size_t id() const { return _id; }
+
+ static constexpr Handle empty_handle = Handle();
};
template <typename T>
diff --git a/staging_vespalib/src/vespa/vespalib/metrics/name_collection.h b/staging_vespalib/src/vespa/vespalib/metrics/name_collection.h
index 6a91c2d4a9d..6e939cd372d 100644
--- a/staging_vespalib/src/vespa/vespalib/metrics/name_collection.h
+++ b/staging_vespalib/src/vespa/vespalib/metrics/name_collection.h
@@ -23,6 +23,8 @@ public:
NameCollection();
~NameCollection() {}
+
+ static constexpr size_t empty_id = 0;
};
} // namespace vespalib::metrics
diff --git a/storage/src/tests/common/metricstest.cpp b/storage/src/tests/common/metricstest.cpp
index 9d2f566f770..867d132031e 100644
--- a/storage/src/tests/common/metricstest.cpp
+++ b/storage/src/tests/common/metricstest.cpp
@@ -97,7 +97,7 @@ void MetricsTest::setUp() {
_metricManager.reset(new metrics::MetricManager(
std::unique_ptr<metrics::MetricManager::Timer>(
new MetricClock(*_clock))));
- _topSet.reset(new metrics::MetricSet("vds", "", ""));
+ _topSet.reset(new metrics::MetricSet("vds", {}, ""));
{
metrics::MetricLockGuard guard(_metricManager->getMetricLock());
_metricManager->registerMetric(guard, *_topSet);
diff --git a/storage/src/tests/storageserver/statereportertest.cpp b/storage/src/tests/storageserver/statereportertest.cpp
index f4bf7685225..d0cdf41823b 100644
--- a/storage/src/tests/storageserver/statereportertest.cpp
+++ b/storage/src/tests/storageserver/statereportertest.cpp
@@ -88,7 +88,7 @@ void StateReporterTest::setUp() {
_metricManager.reset(new metrics::MetricManager(
std::unique_ptr<metrics::MetricManager::Timer>(
new MetricClock(*_clock))));
- _topSet.reset(new metrics::MetricSet("vds", "", ""));
+ _topSet.reset(new metrics::MetricSet("vds", {}, ""));
{
metrics::MetricLockGuard guard(_metricManager->getMetricLock());
_metricManager->registerMetric(guard, *_topSet);
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp b/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
index b95fb6b814c..a0f5f2f7ec9 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketmanagermetrics.cpp
@@ -10,12 +10,12 @@ using vespalib::IllegalStateException;
using vespalib::make_string;
DataStoredMetrics::DataStoredMetrics(const std::string& name, metrics::MetricSet* owner)
- : metrics::MetricSet(name, "partofsum yamasdefault", "", owner),
- buckets("buckets", "", "buckets managed", this),
- docs("docs", "", "documents stored", this),
- bytes("bytes", "", "bytes stored", this),
- active("activebuckets", "", "Number of active buckets on the node", this),
- ready("readybuckets", "", "Number of ready buckets on the node", this)
+ : metrics::MetricSet(name, {{"partofsum"},{"yamasdefault"}}, "", owner),
+ buckets("buckets", {}, "buckets managed", this),
+ docs("docs", {}, "documents stored", this),
+ bytes("bytes", {}, "bytes stored", this),
+ active("activebuckets", {}, "Number of active buckets on the node", this),
+ ready("readybuckets", {}, "Number of ready buckets on the node", this)
{
docs.logOnlyIfSet();
bytes.logOnlyIfSet();
@@ -26,15 +26,15 @@ DataStoredMetrics::DataStoredMetrics(const std::string& name, metrics::MetricSet
DataStoredMetrics::~DataStoredMetrics() { }
BucketManagerMetrics::BucketManagerMetrics()
- : metrics::MetricSet("datastored", "", ""),
+ : metrics::MetricSet("datastored", {}, ""),
disks(),
- total("alldisks", "sum", "Sum of data stored metrics for all disks", this),
- simpleBucketInfoRequestSize("simplebucketinforeqsize", "",
+ total("alldisks", {{"sum"}}, "Sum of data stored metrics for all disks", this),
+ simpleBucketInfoRequestSize("simplebucketinforeqsize", {},
"Amount of buckets returned in simple bucket info requests",
this),
- fullBucketInfoRequestSize("fullbucketinforeqsize", "",
+ fullBucketInfoRequestSize("fullbucketinforeqsize", {},
"Amount of distributors answered at once in full bucket info requests.", this),
- fullBucketInfoLatency("fullbucketinfolatency", "",
+ fullBucketInfoLatency("fullbucketinfolatency", {},
"Amount of time spent to process a full bucket info request", this)
{ }
diff --git a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
index d553b41de7c..84352df5ec9 100644
--- a/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
+++ b/storage/src/vespa/storage/bucketdb/storagebucketdbinitializer.cpp
@@ -93,26 +93,26 @@ StorageBucketDBInitializer::System::getBucketDatabase(document::BucketSpace buck
}
StorageBucketDBInitializer::Metrics::Metrics(framework::Component& component)
- : metrics::MetricSet("dbinit", "",
+ : metrics::MetricSet("dbinit", {},
"Metrics for the storage bucket database initializer"),
- _wrongDisk("wrongdisk", "",
+ _wrongDisk("wrongdisk", {},
"Number of buckets found on non-ideal disk.", this),
- _insertedCount("insertedcount", "",
+ _insertedCount("insertedcount", {},
"Number of buckets inserted into database in list step.", this),
- _joinedCount("joinedcount", "",
+ _joinedCount("joinedcount", {},
"Number of buckets found in list step already found "
"(added from other disks).", this),
- _infoReadCount("infocount", "",
+ _infoReadCount("infocount", {},
"Number of buckets we have read bucket information from.", this),
- _infoSetByLoad("infosetbyload", "",
+ _infoSetByLoad("infosetbyload", {},
"Number of buckets we did not need to request bucket info for "
"due to load already having updated them.", this),
- _dirsListed("dirslisted", "",
+ _dirsListed("dirslisted", {},
"Directories listed in list step of initialization.", this),
_startTime(component.getClock()),
- _listLatency("listlatency", "",
+ _listLatency("listlatency", {},
"Time used until list phase is done. (in ms)", this),
- _initLatency("initlatency", "",
+ _initLatency("initlatency", {},
"Time used until initialization is complete. (in ms)", this)
{
component.registerMetric(*this);
diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.cpp b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
index 761c5c45889..b7725559b1d 100644
--- a/storage/src/vespa/storage/distributor/distributormetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/distributormetricsset.cpp
@@ -8,7 +8,7 @@ namespace storage {
using metrics::MetricSet;
DistributorMetricSet::DistributorMetricSet(const metrics::LoadTypeSet& lt)
- : MetricSet("distributor", "distributor", ""),
+ : MetricSet("distributor", {{"distributor"}}, ""),
puts(lt, PersistenceOperationMetricSet("puts"), this),
updates(lt, PersistenceOperationMetricSet("updates"), this),
update_puts(lt, PersistenceOperationMetricSet("update_puts"), this),
@@ -19,18 +19,20 @@ DistributorMetricSet::DistributorMetricSet(const metrics::LoadTypeSet& lt)
stats(lt, PersistenceOperationMetricSet("stats"), this),
multioperations(lt, PersistenceOperationMetricSet("multioperations"), this),
visits(lt, VisitorMetricSet(), this),
- stateTransitionTime("state_transition_time", "",
+ stateTransitionTime("state_transition_time", {},
"Time it takes to complete a cluster state transition. If a "
"state transition is preempted before completing, its elapsed "
"time is counted as part of the total time spent for the final, "
"completed state transition", this),
- recoveryModeTime("recoverymodeschedulingtime", "",
+ recoveryModeTime("recoverymodeschedulingtime", {},
"Time spent scheduling operations in recovery mode "
"after receiving new cluster state", this),
- docsStored("docsstored", "logdefault yamasdefault",
+ docsStored("docsstored",
+ {{"logdefault"},{"yamasdefault"}},
"Number of documents stored in all buckets controlled by "
"this distributor", this),
- bytesStored("bytesstored", "logdefault yamasdefault",
+ bytesStored("bytesstored",
+ {{"logdefault"},{"yamasdefault"}},
"Number of bytes stored in all buckets controlled by "
"this distributor", this)
{
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
index 6d9607f2cd9..d72f4a80ef4 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
@@ -5,11 +5,17 @@ namespace storage {
namespace distributor {
-OperationMetricSet::OperationMetricSet(const std::string& name, const std::string& tags, const std::string& description, MetricSet* owner)
+OperationMetricSet::OperationMetricSet(const std::string& name, metrics::Metric::Tags tags, const std::string& description, MetricSet* owner)
: MetricSet(name, tags, description, owner),
- pending("pending", "logdefault yamasdefault", "The number of operations pending", this),
- ok("done_ok", "logdefault yamasdefault", "The number of operations successfully performed", this),
- failed("done_failed", "logdefault yamasdefault", "The number of operations that failed", this)
+ pending("pending",
+ {{"logdefault"},{"yamasdefault"}},
+ "The number of operations pending", this),
+ ok("done_ok",
+ {{"logdefault"},{"yamasdefault"}},
+ "The number of operations successfully performed", this),
+ failed("done_failed",
+ {{"logdefault"},{"yamasdefault"}},
+ "The number of operations that failed", this)
{ }
OperationMetricSet::~OperationMetricSet() { }
@@ -19,47 +25,58 @@ IdealStateMetricSet::createOperationMetrics() {
typedef IdealStateOperation ISO;
operations.resize(ISO::OPERATION_COUNT);
operations[ISO::DELETE_BUCKET] = std::shared_ptr<OperationMetricSet>(
- new OperationMetricSet("delete_bucket", "logdefault yamasdefault",
+ new OperationMetricSet("delete_bucket",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to delete excess buckets on storage nodes", this));
operations[ISO::MERGE_BUCKET] = std::shared_ptr<OperationMetricSet>(
- new OperationMetricSet("merge_bucket", "logdefault yamasdefault",
+ new OperationMetricSet("merge_bucket",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to merge buckets that are out of sync", this));
operations[ISO::SPLIT_BUCKET] = std::shared_ptr<OperationMetricSet>(
- new OperationMetricSet("split_bucket", "logdefault yamasdefault",
+ new OperationMetricSet("split_bucket",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to split buckets that are larger than the configured size", this));
operations[ISO::JOIN_BUCKET] = std::shared_ptr<OperationMetricSet>(
- new OperationMetricSet("join_bucket", "logdefault yamasdefault",
+ new OperationMetricSet("join_bucket",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to join buckets that in sum are smaller than the configured size", this));
operations[ISO::SET_BUCKET_STATE] = std::shared_ptr<OperationMetricSet>(
new OperationMetricSet("set_bucket_state",
- "logdefault yamasdefault",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to set active/ready state for bucket copies", this));
operations[ISO::GARBAGE_COLLECTION] = std::shared_ptr<OperationMetricSet>(
new OperationMetricSet("garbage_collection",
- "logdefault yamasdefault",
+ {{"logdefault"},{"yamasdefault"}},
"Operations to garbage collect data from buckets", this));
}
IdealStateMetricSet::IdealStateMetricSet()
- : MetricSet("idealstate", "idealstate", "Statistics for ideal state generation"),
- idealstate_diff("idealstate_diff", "logdefault yamasdefault",
+ : MetricSet("idealstate", {{"idealstate"}}, "Statistics for ideal state generation"),
+ idealstate_diff("idealstate_diff",
+ {{"logdefault"},{"yamasdefault"}},
"A number representing the current difference from the ideal "
"state. This is a number that decreases steadily as the system "
"is getting closer to the ideal state", this),
- buckets_toofewcopies("buckets_toofewcopies", "logdefault yamasdefault",
+ buckets_toofewcopies("buckets_toofewcopies",
+ {{"logdefault"},{"yamasdefault"}},
"The number of buckets the distributor controls that have less "
"than the desired redundancy", this),
- buckets_toomanycopies("buckets_toomanycopies", "logdefault yamasdefault",
+ buckets_toomanycopies("buckets_toomanycopies",
+ {{"logdefault"},{"yamasdefault"}},
"The number of buckets the distributor controls that have more "
"than the desired redundancy", this),
- buckets("buckets", "logdefault yamasdefault", "The number of buckets the distributor controls", this),
- buckets_notrusted("buckets_notrusted", "logdefault yamasdefault",
+ buckets("buckets",
+ {{"logdefault"},{"yamasdefault"}},
+ "The number of buckets the distributor controls", this),
+ buckets_notrusted("buckets_notrusted",
+ {{"logdefault"},{"yamasdefault"}},
"The number of buckets that have no trusted copies.", this),
- buckets_rechecking("buckets_rechecking", "logdefault yamasdefault",
+ buckets_rechecking("buckets_rechecking",
+ {{"logdefault"},{"yamasdefault"}},
"The number of buckets that we are rechecking for "
"ideal state operations", this),
- startOperationsLatency("start_operations_latency", "", "Time used in startOperations()", this),
- nodesPerMerge("nodes_per_merge", "", "The number of nodes involved in a single merge operation.", this)
+ startOperationsLatency("start_operations_latency", {}, "Time used in startOperations()", this),
+ nodesPerMerge("nodes_per_merge", {}, "The number of nodes involved in a single merge operation.", this)
{
createOperationMetrics();
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.h b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
index cd88ab672d3..7bb472b4a2c 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.h
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
@@ -15,7 +15,7 @@ public:
metrics::LongCountMetric ok;
metrics::LongCountMetric failed;
- OperationMetricSet(const std::string& name, const std::string& tags, const std::string& description, MetricSet* owner);
+ OperationMetricSet(const std::string& name, metrics::Metric::Tags tags, const std::string& description, MetricSet* owner);
~OperationMetricSet();
};
diff --git a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
index 30fa2797ef4..d3ae5d547ed 100644
--- a/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
+++ b/storage/src/vespa/storage/distributor/persistence_operation_metric_set.cpp
@@ -10,24 +10,24 @@ namespace storage {
using metrics::MetricSet;
PersistenceFailuresMetricSet::PersistenceFailuresMetricSet(MetricSet* owner)
- : MetricSet("failures", "", "Detailed failure statistics", owner),
- sum("total", "logdefault yamasdefault", "Sum of all failures", this),
- notready("notready", "", "The number of operations discarded because distributor was not ready", this),
- notconnected("notconnected", "", "The number of operations discarded because there were no available storage nodes to send to", this),
- wrongdistributor("wrongdistributor", "", "The number of operations discarded because they were sent to the wrong distributor", this),
- safe_time_not_reached("safe_time_not_reached", "",
+ : MetricSet("failures", {}, "Detailed failure statistics", owner),
+ sum("total", {{"logdefault"},{"yamasdefault"}}, "Sum of all failures", this),
+ notready("notready", {}, "The number of operations discarded because distributor was not ready", this),
+ notconnected("notconnected", {}, "The number of operations discarded because there were no available storage nodes to send to", this),
+ wrongdistributor("wrongdistributor", {}, "The number of operations discarded because they were sent to the wrong distributor", this),
+ safe_time_not_reached("safe_time_not_reached", {},
"The number of operations that were transiently"
" failed due to them arriving before the safe "
"time point for bucket ownership handovers has "
"passed", this),
- storagefailure("storagefailure", "", "The number of operations that failed in storage", this),
- timeout("timeout", "", "The number of operations that failed because the operation timed out towards storage", this),
- busy("busy", "", "The number of messages from storage that failed because the storage node was busy", this),
- inconsistent_bucket("inconsistent_bucket", "",
+ storagefailure("storagefailure", {}, "The number of operations that failed in storage", this),
+ timeout("timeout", {}, "The number of operations that failed because the operation timed out towards storage", this),
+ busy("busy", {}, "The number of messages from storage that failed because the storage node was busy", this),
+ inconsistent_bucket("inconsistent_bucket", {},
"The number of operations failed due to buckets "
"being in an inconsistent state or not found", this),
- notfound("notfound", "", "The number of operations that failed because the document did not exist", this),
- concurrent_mutations("concurrent_mutations", "", "The number of operations that were transiently failed due "
+ notfound("notfound", {}, "The number of operations that failed because the document did not exist", this),
+ concurrent_mutations("concurrent_mutations", {}, "The number of operations that were transiently failed due "
"to a mutating operation already being in progress for its document ID", this)
{
sum.addMetricToSum(notready);
@@ -55,9 +55,9 @@ PersistenceFailuresMetricSet::clone(std::vector<Metric::UP>& ownerList, CopyType
}
PersistenceOperationMetricSet::PersistenceOperationMetricSet(const std::string& name, MetricSet* owner)
- : MetricSet(name, "", vespalib::make_string("Statistics for the %s command", name.c_str()), owner),
- latency("latency", "yamasdefault", vespalib::make_string("The average latency of %s operations", name.c_str()), this),
- ok("ok", "logdefault yamasdefault", vespalib::make_string("The number of successful %s operations performed", name.c_str()), this),
+ : MetricSet(name, {}, vespalib::make_string("Statistics for the %s command", name.c_str()), owner),
+ latency("latency", {{"yamasdefault"}}, vespalib::make_string("The average latency of %s operations", name.c_str()), this),
+ ok("ok", {{"logdefault"},{"yamasdefault"}}, vespalib::make_string("The number of successful %s operations performed", name.c_str()), this),
failures(this)
{ }
diff --git a/storage/src/vespa/storage/distributor/visitormetricsset.cpp b/storage/src/vespa/storage/distributor/visitormetricsset.cpp
index 999ab2d577f..84b1174962c 100644
--- a/storage/src/vespa/storage/distributor/visitormetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/visitormetricsset.cpp
@@ -10,13 +10,13 @@ using metrics::MetricSet;
VisitorMetricSet::VisitorMetricSet(MetricSet* owner)
: PersistenceOperationMetricSet("visitor", owner),
- buckets_per_visitor("buckets_per_visitor", "",
+ buckets_per_visitor("buckets_per_visitor", {},
"The number of sub buckets visited as part of a "
"single client visitor command", this),
- docs_per_visitor("docs_per_visitor", "",
+ docs_per_visitor("docs_per_visitor", {},
"The number of documents visited on content nodes as "
"part of a single client visitor command", this),
- bytes_per_visitor("bytes_per_visitor", "",
+ bytes_per_visitor("bytes_per_visitor", {},
"The number of bytes visited on content nodes as part "
"of a single client visitor command", this)
{
diff --git a/storage/src/vespa/storage/frameworkimpl/status/statuswebserver.cpp b/storage/src/vespa/storage/frameworkimpl/status/statuswebserver.cpp
index 483cffaf751..c7d8bf24e82 100644
--- a/storage/src/vespa/storage/frameworkimpl/status/statuswebserver.cpp
+++ b/storage/src/vespa/storage/frameworkimpl/status/statuswebserver.cpp
@@ -258,10 +258,6 @@ StatusWebServer::handlePage(const framework::HttpUrlPath& urlpath, std::ostream&
HttpErrorWriter writer(out, "500 Internal Server Error");
writer << "<pre>" << e.what() << "</pre>";
pageExisted = true;
- } catch (...) {
- HttpErrorWriter writer(out, "500 Internal Server Error");
- writer << "Unknown exception";
- pageExisted = true;
}
if (pageExisted) {
LOG(spam, "Status finished request");
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp
index 52d95e9a3ed..0be046b2e9e 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestormetrics.cpp
@@ -10,11 +10,11 @@ using metrics::MetricSet;
using metrics::LoadTypeSet;
FileStorThreadMetrics::Op::Op(const std::string& id, const std::string& name, MetricSet* owner)
- : MetricSet(id, "", name + " load in filestor thread", owner),
+ : MetricSet(id, {}, name + " load in filestor thread", owner),
_name(name),
- count("count", "yamasdefault", "Number of requests processed.", this),
- latency("latency", "yamasdefault", "Latency of successful requests.", this),
- failed("failed", "yamasdefault", "Number of failed requests.", this)
+ count("count", {{"yamasdefault"}}, "Number of requests processed.", this),
+ latency("latency", {{"yamasdefault"}}, "Latency of successful requests.", this),
+ failed("failed", {{"yamasdefault"}}, "Number of failed requests.", this)
{ }
FileStorThreadMetrics::Op::~Op() = default;
@@ -34,7 +34,7 @@ FileStorThreadMetrics::Op::clone(std::vector<Metric::UP>& ownerList,
template <typename BaseOp>
FileStorThreadMetrics::OpWithRequestSize<BaseOp>::OpWithRequestSize(const std::string& id, const std::string& name, MetricSet* owner)
: BaseOp(id, name, owner),
- request_size("request_size", "", "Size of requests, in bytes", this)
+ request_size("request_size", {}, "Size of requests, in bytes", this)
{
}
@@ -59,8 +59,9 @@ FileStorThreadMetrics::OpWithRequestSize<BaseOp>::clone(
FileStorThreadMetrics::OpWithNotFound::OpWithNotFound(const std::string& id, const std::string& name, MetricSet* owner)
: Op(id, name, owner),
- notFound("not_found", "", "Number of requests that could not be "
- "completed due to source document not found.", this)
+ notFound("not_found", {},
+ "Number of requests that could not be completed due to source document not found.",
+ this)
{ }
FileStorThreadMetrics::OpWithNotFound::~OpWithNotFound() = default;
@@ -81,7 +82,7 @@ FileStorThreadMetrics::OpWithNotFound::clone(std::vector<Metric::UP>& ownerList,
FileStorThreadMetrics::Update::Update(MetricSet* owner)
: OpWithRequestSize("update", "Update", owner),
- latencyRead("latency_read", "", "Latency of the source read in the request.", this)
+ latencyRead("latency_read", {}, "Latency of the source read in the request.", this)
{ }
FileStorThreadMetrics::Update::~Update() = default;
@@ -100,7 +101,7 @@ FileStorThreadMetrics::Update::clone(std::vector<Metric::UP>& ownerList,
FileStorThreadMetrics::Visitor::Visitor(MetricSet* owner)
: Op("visit", "Visit", owner),
- documentsPerIterate("docs", "", "Number of entries read per iterate call", this)
+ documentsPerIterate("docs", {}, "Number of entries read per iterate call", this)
{ }
FileStorThreadMetrics::Visitor::~Visitor() = default;
@@ -118,9 +119,9 @@ FileStorThreadMetrics::Visitor::clone(std::vector<Metric::UP>& ownerList,
}
FileStorThreadMetrics::FileStorThreadMetrics(const std::string& name, const std::string& desc, const LoadTypeSet& lt)
- : MetricSet(name, "filestor partofsum", desc),
- operations("operations", "", "Number of operations processed.", this),
- failedOperations("failedoperations", "", "Number of operations throwing exceptions.", this),
+ : MetricSet(name, {{"filestor"},{"partofsum"}}, desc),
+ operations("operations", {}, "Number of operations processed.", this),
+ failedOperations("failedoperations", {}, "Number of operations throwing exceptions.", this),
put(lt, OpWithRequestSize<Op>("put", "Put"), this),
get(lt, OpWithRequestSize<OpWithNotFound>("get", "Get"), this),
remove(lt, OpWithRequestSize<OpWithNotFound>("remove", "Remove"), this),
@@ -128,13 +129,13 @@ FileStorThreadMetrics::FileStorThreadMetrics(const std::string& name, const std:
statBucket(lt, Op("stat_bucket", "Stat bucket"), this),
update(lt, Update(), this),
revert(lt, OpWithNotFound("revert", "Revert"), this),
- createIterator("createiterator", "", this),
+ createIterator("createiterator", {}, this),
visit(lt, Visitor(), this),
multiOp(lt, Op("multioperations", "The number of multioperations that have been created"), this),
createBuckets("createbuckets", "Number of buckets that has been created.", this),
deleteBuckets("deletebuckets", "Number of buckets that has been deleted.", this),
repairs("bucketverified", "Number of times buckets have been checked.", this),
- repairFixed("bucketfixed", "", "Number of times bucket has been fixed because of corruption", this),
+ repairFixed("bucketfixed", {}, "Number of times bucket has been fixed because of corruption", this),
recheckBucketInfo("recheckbucketinfo",
"Number of times bucket info has been explicitly "
"rechecked due to buckets being marked modified by "
@@ -151,24 +152,24 @@ FileStorThreadMetrics::FileStorThreadMetrics(const std::string& name, const std:
mergeBuckets("mergebuckets", "Number of times buckets have been merged.", this),
getBucketDiff("getbucketdiff", "Number of getbucketdiff commands that have been processed.", this),
applyBucketDiff("applybucketdiff", "Number of applybucketdiff commands that have been processed.", this),
- bytesMerged("bytesmerged", "", "Total number of bytes merged into this node.", this),
- getBucketDiffReply("getbucketdiffreply", "", "Number of getbucketdiff replies that have been processed.", this),
- applyBucketDiffReply("applybucketdiffreply", "", "Number of applybucketdiff replies that have been processed.", this),
- mergeLatencyTotal("mergelatencytotal", "",
+ bytesMerged("bytesmerged", {}, "Total number of bytes merged into this node.", this),
+ getBucketDiffReply("getbucketdiffreply", {}, "Number of getbucketdiff replies that have been processed.", this),
+ applyBucketDiffReply("applybucketdiffreply", {}, "Number of applybucketdiff replies that have been processed.", this),
+ mergeLatencyTotal("mergelatencytotal", {},
"Latency of total merge operation, from master node receives "
"it, until merge is complete and master node replies.", this),
- mergeMetadataReadLatency("mergemetadatareadlatency", "",
+ mergeMetadataReadLatency("mergemetadatareadlatency", {},
"Latency of time used in a merge step to check metadata of "
"current node to see what data it has.", this),
- mergeDataReadLatency("mergedatareadlatency", "",
+ mergeDataReadLatency("mergedatareadlatency", {},
"Latency of time used in a merge step to read data other "
"nodes need.", this),
- mergeDataWriteLatency("mergedatawritelatency", "",
+ mergeDataWriteLatency("mergedatawritelatency", {},
"Latency of time used in a merge step to write data needed to "
"current node.", this),
- mergeAverageDataReceivedNeeded("mergeavgdatareceivedneeded", "", "Amount of data transferred from previous node "
+ mergeAverageDataReceivedNeeded("mergeavgdatareceivedneeded", {}, "Amount of data transferred from previous node "
"in chain that we needed to apply locally.", this),
- batchingSize("batchingsize", "", "Number of operations batched per bucket (only counts "
+ batchingSize("batchingsize", {}, "Number of operations batched per bucket (only counts "
"batches of size > 1)", this)
{ }
@@ -176,9 +177,9 @@ FileStorThreadMetrics::~FileStorThreadMetrics() = default;
FileStorStripeMetrics::FileStorStripeMetrics(const std::string& name, const std::string& description,
const LoadTypeSet& loadTypes)
- : MetricSet(name, "partofsum", description),
+ : MetricSet(name, {{"partofsum"}}, description),
averageQueueWaitingTime(loadTypes,
- metrics::DoubleAverageMetric("averagequeuewait", "",
+ metrics::DoubleAverageMetric("averagequeuewait", {},
"Average time an operation spends in input queue."),
this)
{
@@ -188,19 +189,19 @@ FileStorStripeMetrics::~FileStorStripeMetrics() = default;
FileStorDiskMetrics::FileStorDiskMetrics(const std::string& name, const std::string& description,
const metrics::LoadTypeSet& loadTypes, MetricSet* owner)
- : MetricSet(name, "partofsum", description, owner),
- sumThreads("allthreads", "sum", "", this),
- sumStripes("allstripes", "sum", "", this),
+ : MetricSet(name, {{"partofsum"}}, description, owner),
+ sumThreads("allthreads", {{"sum"}}, "", this),
+ sumStripes("allstripes", {{"sum"}}, "", this),
averageQueueWaitingTime(loadTypes,
- metrics::DoubleAverageMetric("averagequeuewait", "",
+ metrics::DoubleAverageMetric("averagequeuewait", {},
"Average time an operation spends in input queue."),
this),
- queueSize("queuesize", "", "Size of input message queue.", this),
- pendingMerges("pendingmerge", "", "Number of buckets currently being merged.", this),
- waitingForLockHitRate("waitingforlockrate", "",
+ queueSize("queuesize", {}, "Size of input message queue.", this),
+ pendingMerges("pendingmerge", {}, "Number of buckets currently being merged.", this),
+ waitingForLockHitRate("waitingforlockrate", {},
"Amount of times a filestor thread has needed to wait for "
"lock to take next message in queue.", this),
- lockWaitTime("lockwaittime", "", "Amount of time waiting used waiting for lock.", this)
+ lockWaitTime("lockwaittime", {}, "Amount of time waiting used waiting for lock.", this)
{
pendingMerges.unsetOnZeroValue();
waitingForLockHitRate.unsetOnZeroValue();
@@ -236,11 +237,11 @@ FileStorDiskMetrics::initDiskMetrics(const LoadTypeSet& loadTypes, uint32_t numS
}
FileStorMetrics::FileStorMetrics(const LoadTypeSet&)
- : MetricSet("filestor", "filestor", ""),
- sum("alldisks", "sum", "", this),
- directoryEvents("directoryevents", "", "Number of directory events received.", this),
- partitionEvents("partitionevents", "", "Number of partition events received.", this),
- diskEvents("diskevents", "", "Number of disk events received.", this)
+ : MetricSet("filestor", {{"filestor"}}, ""),
+ sum("alldisks", {{"sum"}}, "", this),
+ directoryEvents("directoryevents", {}, "Number of directory events received.", this),
+ partitionEvents("partitionevents", {}, "Number of partition events received.", this),
+ diskEvents("diskevents", {}, "Number of disk events received.", this)
{ }
FileStorMetrics::~FileStorMetrics() = default;
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 26cfb0e6566..1e9cc7c0cde 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -1361,11 +1361,8 @@ MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
} catch (std::exception& e) {
_env._fileStorHandler.clearMergeStatus(
bucket.getBucket(),
- api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE,
- e.what()));
+ api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, e.what()));
throw;
- } catch (...) {
- assert(false);
}
if (clearState) {
@@ -1586,8 +1583,6 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE,
e.what()));
throw;
- } catch (...) {
- assert(false);
}
if (clearState) {
diff --git a/storage/src/vespa/storage/storageserver/bouncer_metrics.cpp b/storage/src/vespa/storage/storageserver/bouncer_metrics.cpp
index 2ee39ad22ec..c0fac35263e 100644
--- a/storage/src/vespa/storage/storageserver/bouncer_metrics.cpp
+++ b/storage/src/vespa/storage/storageserver/bouncer_metrics.cpp
@@ -5,12 +5,12 @@
namespace storage {
BouncerMetrics::BouncerMetrics()
- : MetricSet("bouncer", "", "Metrics for Bouncer component", nullptr),
- clock_skew_aborts("clock_skew_aborts", "", "Number of client operations that were aborted due to "
+ : MetricSet("bouncer", {}, "Metrics for Bouncer component", nullptr),
+ clock_skew_aborts("clock_skew_aborts", {}, "Number of client operations that were aborted due to "
"clock skew between sender and receiver exceeding acceptable range", this)
{
}
BouncerMetrics::~BouncerMetrics() = default;
-} \ No newline at end of file
+}
diff --git a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
index 166aa25bc68..66e0209d4b7 100644
--- a/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
+++ b/storage/src/vespa/storage/storageserver/changedbucketownershiphandler.cpp
@@ -89,10 +89,10 @@ allDistributorsDownInState(const lib::ClusterState& state) {
}
ChangedBucketOwnershipHandler::Metrics::Metrics(metrics::MetricSet* owner)
- : metrics::MetricSet("changedbucketownershiphandler", "", "", owner),
- averageAbortProcessingTime("avg_abort_processing_time", "", "Average time spent aborting operations for changed buckets", this),
- idealStateOpsAborted("ideal_state_ops_aborted", "", "Number of outdated ideal state operations aborted", this),
- externalLoadOpsAborted("external_load_ops_aborted", "", "Number of outdated external load operations aborted", this)
+ : metrics::MetricSet("changedbucketownershiphandler", {}, "", owner),
+ averageAbortProcessingTime("avg_abort_processing_time", {}, "Average time spent aborting operations for changed buckets", this),
+ idealStateOpsAborted("ideal_state_ops_aborted", {}, "Number of outdated ideal state operations aborted", this),
+ externalLoadOpsAborted("external_load_ops_aborted", {}, "Number of outdated external load operations aborted", this)
{}
ChangedBucketOwnershipHandler::Metrics::~Metrics() { }
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index 783bde40981..eee688b1fb2 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -481,9 +481,6 @@ CommunicationManager::process(const std::shared_ptr<api::StorageMessage>& msg)
LOGBP(error, "When running command %s, caught exception %s. Discarding message",
msg->toString().c_str(), e.what());
_metrics.exceptionMessageProcessTime[msg->getLoadType()].addValue(startTime.getElapsedTimeAsDouble());
- } catch (...) {
- LOG(fatal, "Caught fatal exception in communication manager");
- throw;
}
}
diff --git a/storage/src/vespa/storage/storageserver/communicationmanagermetrics.cpp b/storage/src/vespa/storage/storageserver/communicationmanagermetrics.cpp
index 5d2caddb200..5f2bed07f66 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanagermetrics.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanagermetrics.cpp
@@ -7,24 +7,24 @@ using namespace metrics;
namespace storage {
CommunicationManagerMetrics::CommunicationManagerMetrics(const LoadTypeSet& loadTypes, MetricSet* owner)
- : MetricSet("communication", "", "Metrics for the communication manager", owner),
- queueSize("messagequeue", "", "Size of input message queue.", this),
+ : MetricSet("communication", {}, "Metrics for the communication manager", owner),
+ queueSize("messagequeue", {}, "Size of input message queue.", this),
messageProcessTime(loadTypes,
- DoubleAverageMetric("messageprocesstime", "",
+ DoubleAverageMetric("messageprocesstime", {},
"Time transport thread uses to process a single message"),
this),
exceptionMessageProcessTime(loadTypes,
- DoubleAverageMetric("exceptionmessageprocesstime", "",
+ DoubleAverageMetric("exceptionmessageprocesstime", {},
"Time transport thread uses to process a single message "
"that fails with an exception thrown into communication manager"),
this),
- failedDueToTooLittleMemory("toolittlememory", "", "Number of messages failed due to too little memory available", this),
- convertToStorageAPIFailures("convertfailures", "",
+ failedDueToTooLittleMemory("toolittlememory", {}, "Number of messages failed due to too little memory available", this),
+ convertToStorageAPIFailures("convertfailures", {},
"Number of messages that failed to get converted to storage API messages", this),
- bucketSpaceMappingFailures("bucket_space_mapping_failures", "",
+ bucketSpaceMappingFailures("bucket_space_mapping_failures", {},
"Number of messages that could not be resolved to a known bucket space", this),
- sendCommandLatency("sendcommandlatency", "", "Average ms used to send commands to MBUS", this),
- sendReplyLatency("sendreplylatency", "", "Average ms used to send replies to MBUS", this)
+ sendCommandLatency("sendcommandlatency", {}, "Average ms used to send commands to MBUS", this),
+ sendReplyLatency("sendreplylatency", {}, "Average ms used to send replies to MBUS", this)
{
}
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.cpp b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
index a9d54c196c9..92ed01ca5d9 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.cpp
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
@@ -67,26 +67,26 @@ MergeThrottler::ChainedMergeState::ChainedMergeState(const api::StorageMessage::
MergeThrottler::ChainedMergeState::~ChainedMergeState() {}
MergeThrottler::Metrics::Metrics(metrics::MetricSet* owner)
- : metrics::MetricSet("mergethrottler", "", "", owner),
- averageQueueWaitingTime("averagequeuewaitingtime", "", "Average time a merge spends in the throttler queue", this),
- bounced_due_to_back_pressure("bounced_due_to_back_pressure", "", "Number of merges bounced due to resource exhaustion back-pressure", this),
+ : metrics::MetricSet("mergethrottler", {}, "", owner),
+ averageQueueWaitingTime("averagequeuewaitingtime", {}, "Average time a merge spends in the throttler queue", this),
+ bounced_due_to_back_pressure("bounced_due_to_back_pressure", {}, "Number of merges bounced due to resource exhaustion back-pressure", this),
chaining("mergechains", this),
local("locallyexecutedmerges", this)
{ }
MergeThrottler::Metrics::~Metrics() {}
MergeThrottler::MergeFailureMetrics::MergeFailureMetrics(metrics::MetricSet* owner)
- : metrics::MetricSet("failures", "", "Detailed failure statistics", owner),
- sum("total", "", "Sum of all failures", this),
- notready("notready", "", "The number of merges discarded because distributor was not ready", this),
- timeout("timeout", "", "The number of merges that failed because they timed out towards storage", this),
- aborted("aborted", "", "The number of merges that failed because the storage node was (most likely) shutting down", this),
- wrongdistribution("wrongdistribution", "", "The number of merges that were discarded (flushed) because they were initiated at an older cluster state than the current", this),
- bucketnotfound("bucketnotfound", "", "The number of operations that failed because the bucket did not exist", this),
- busy("busy", "", "The number of merges that failed because the storage node was busy", this),
- exists("exists", "", "The number of merges that were rejected due to a merge operation for their bucket already being processed", this),
- rejected("rejected", "", "The number of merges that were rejected", this),
- other("other", "", "The number of other failures", this)
+ : metrics::MetricSet("failures", {}, "Detailed failure statistics", owner),
+ sum("total", {}, "Sum of all failures", this),
+ notready("notready", {}, "The number of merges discarded because distributor was not ready", this),
+ timeout("timeout", {}, "The number of merges that failed because they timed out towards storage", this),
+ aborted("aborted", {}, "The number of merges that failed because the storage node was (most likely) shutting down", this),
+ wrongdistribution("wrongdistribution", {}, "The number of merges that were discarded (flushed) because they were initiated at an older cluster state than the current", this),
+ bucketnotfound("bucketnotfound", {}, "The number of operations that failed because the bucket did not exist", this),
+ busy("busy", {}, "The number of merges that failed because the storage node was busy", this),
+ exists("exists", {}, "The number of merges that were rejected due to a merge operation for their bucket already being processed", this),
+ rejected("rejected", {}, "The number of merges that were rejected", this),
+ other("other", {}, "The number of other failures", this)
{
sum.addMetricToSum(notready);
sum.addMetricToSum(timeout);
@@ -102,8 +102,8 @@ MergeThrottler::MergeFailureMetrics::~MergeFailureMetrics() { }
MergeThrottler::MergeOperationMetrics::MergeOperationMetrics(const std::string& name, metrics::MetricSet* owner)
- : metrics::MetricSet(name, "", vespalib::make_string("Statistics for %s", name.c_str()), owner),
- ok("ok", "", vespalib::make_string("The number of successful merges for '%s'", name.c_str()), this),
+ : metrics::MetricSet(name, {}, vespalib::make_string("Statistics for %s", name.c_str()), owner),
+ ok("ok", {}, vespalib::make_string("The number of successful merges for '%s'", name.c_str()), this),
failures(this)
{
}
diff --git a/storage/src/vespa/storage/storageserver/storagemetricsset.cpp b/storage/src/vespa/storage/storageserver/storagemetricsset.cpp
index fde33d524b8..4ea9a9f9296 100644
--- a/storage/src/vespa/storage/storageserver/storagemetricsset.cpp
+++ b/storage/src/vespa/storage/storageserver/storagemetricsset.cpp
@@ -6,39 +6,39 @@
namespace storage {
MessageMemoryUseMetricSet::MessageMemoryUseMetricSet(metrics::MetricSet* owner)
- : metrics::MetricSet("message_memory_use", "memory", "Message use from storage messages", owner),
- total("total", "memory", "Message use from storage messages", this),
- lowpri("lowpri", "memory", "Message use from low priority storage messages", this),
- normalpri("normalpri", "memory", "Message use from normal priority storage messages", this),
- highpri("highpri", "memory", "Message use from high priority storage messages", this),
- veryhighpri("veryhighpri", "memory", "Message use from very high priority storage messages", this)
+ : metrics::MetricSet("message_memory_use", {{"memory"}}, "Message use from storage messages", owner),
+ total("total", {{"memory"}}, "Message use from storage messages", this),
+ lowpri("lowpri", {{"memory"}}, "Message use from low priority storage messages", this),
+ normalpri("normalpri", {{"memory"}}, "Message use from normal priority storage messages", this),
+ highpri("highpri", {{"memory"}}, "Message use from high priority storage messages", this),
+ veryhighpri("veryhighpri", {{"memory"}}, "Message use from very high priority storage messages", this)
{ }
MessageMemoryUseMetricSet::~MessageMemoryUseMetricSet() {}
DocumentSerializationMetricSet::DocumentSerializationMetricSet(metrics::MetricSet* owner)
- : metrics::MetricSet("document_serialization", "docserialization",
+ : metrics::MetricSet("document_serialization", {{"docserialization"}},
"Counts of document serialization of various types", owner),
usedCachedSerializationCount(
- "cached_serialization_count", "docserialization",
+ "cached_serialization_count", {{"docserialization"}},
"Number of times we didn't need to serialize the document as "
"we already had serialized version cached", this),
compressedDocumentCount(
- "compressed_serialization_count", "docserialization",
+ "compressed_serialization_count", {{"docserialization"}},
"Number of times we compressed document when serializing",
this),
compressionDidntHelpCount(
- "compressed_didnthelp_count", "docserialization",
+ "compressed_didnthelp_count", {{"docserialization"}},
"Number of times we compressed document when serializing, but "
"the compressed version was bigger, so it was dumped", this),
uncompressableCount(
- "uncompressable_serialization_count", "docserialization",
+ "uncompressable_serialization_count", {{"docserialization"}},
"Number of times we didn't attempt compression as document "
"had already been tagged uncompressable", this),
serializedUncompressed(
- "uncompressed_serialization_count", "docserialization",
+ "uncompressed_serialization_count", {{"docserialization"}},
"Number of times we serialized a document uncompressed", this),
inputWronglySerialized(
- "input_wrongly_serialized_count", "docserialization",
+ "input_wrongly_serialized_count", {{"docserialization"}},
"Number of times we reserialized a document because the "
"compression it had in cache did not match what was configured",
this)
@@ -46,11 +46,11 @@ DocumentSerializationMetricSet::DocumentSerializationMetricSet(metrics::MetricSe
DocumentSerializationMetricSet::~DocumentSerializationMetricSet() { }
StorageMetricSet::StorageMetricSet()
- : metrics::MetricSet("server", "memory",
+ : metrics::MetricSet("server", {{"memory"}},
"Metrics for VDS applications"),
- memoryUse("memoryusage", "memory", "", this),
+ memoryUse("memoryusage", {{"memory"}}, "", this),
memoryUse_messages(this),
- memoryUse_visiting("memoryusage_visiting", "memory",
+ memoryUse_visiting("memoryusage_visiting", {{"memory"}},
"Message use from visiting", this),
documentSerialization(this)
{ }
diff --git a/storage/src/vespa/storage/visiting/visitormetrics.cpp b/storage/src/vespa/storage/visiting/visitormetrics.cpp
index e6903dc6f11..191f9fabc60 100644
--- a/storage/src/vespa/storage/visiting/visitormetrics.cpp
+++ b/storage/src/vespa/storage/visiting/visitormetrics.cpp
@@ -7,26 +7,26 @@
namespace storage {
VisitorMetrics::VisitorMetrics()
- : metrics::MetricSet("visitor", "visitor", ""),
- queueSize("cv_queuesize", "", "Size of create visitor queue", this),
- queueSkips("cv_skipqueue", "",
+ : metrics::MetricSet("visitor", {{"visitor"}}, ""),
+ queueSize("cv_queuesize", {}, "Size of create visitor queue", this),
+ queueSkips("cv_skipqueue", {},
"Number of times we could skip queue as we had free visitor "
"spots", this),
- queueFull("cv_queuefull", "",
+ queueFull("cv_queuefull", {},
"Number of create visitor messages failed as queue is full",
this),
- queueWaitTime("cv_queuewaittime", "",
+ queueWaitTime("cv_queuewaittime", {},
"Milliseconds waiting in create visitor queue, for visitors "
"that was added to visitor queue but scheduled later", this),
- queueTimeoutWaitTime("cv_queuetimeoutwaittime", "",
+ queueTimeoutWaitTime("cv_queuetimeoutwaittime", {},
"Milliseconds waiting in create visitor queue, for visitors "
"that timed out while in the visitor quueue", this),
- queueEvictedWaitTime("cv_queueevictedwaittime", "",
+ queueEvictedWaitTime("cv_queueevictedwaittime", {},
"Milliseconds waiting in create visitor queue, for visitors "
"that was evicted from queue due to higher priority visitors "
"coming", this),
threads(),
- sum("allthreads", "sum", "", this)
+ sum("allthreads", {{"sum"}}, "", this)
{
queueSize.unsetOnZeroValue();
}
diff --git a/storage/src/vespa/storage/visiting/visitorthreadmetrics.h b/storage/src/vespa/storage/visiting/visitorthreadmetrics.h
index 991674eaec1..c21fe09cdb4 100644
--- a/storage/src/vespa/storage/visiting/visitorthreadmetrics.h
+++ b/storage/src/vespa/storage/visiting/visitorthreadmetrics.h
@@ -33,68 +33,68 @@ struct VisitorThreadMetrics : public metrics::MetricSet
VisitorThreadMetrics(const std::string& name,
const std::string& desc,
const metrics::LoadTypeSet& loadTypes)
- : metrics::MetricSet(name, "visitor partofsum thread", desc),
- queueSize("queuesize", "",
+ : metrics::MetricSet(name, {{"visitor"},{"partofsum"},{"thread"}}, desc),
+ queueSize("queuesize", {},
"Size of input message queue.", this),
averageQueueWaitingTime(
loadTypes,
DOUBLE("averagequeuewait",
- "",
+ {},
"Average time an operation spends in input queue."),
this),
averageVisitorLifeTime(
loadTypes,
DOUBLE("averagevisitorlifetime",
- "",
+ {},
"Average lifetime of a visitor"),
this),
averageVisitorCreationTime(
loadTypes,
DOUBLE("averagevisitorcreationtime",
- "",
+ {},
"Average time spent creating a visitor instance"),
this),
averageMessageSendTime(
loadTypes,
DOUBLE("averagemessagesendtime",
- "",
+ {},
"Average time it takes for messages to be sent to "
"their target (and be replied to)"),
this),
averageProcessingTime(
loadTypes,
DOUBLE("averageprocessingtime",
- "",
+ {},
"Average time visitor uses in handleDocuments() call"),
this),
createdVisitors(
loadTypes,
COUNT("created",
- "",
+ {},
"Number of visitors created."),
this),
abortedVisitors(
loadTypes,
COUNT("aborted",
- "",
+ {},
"Number of visitors aborted."),
this),
completedVisitors(
loadTypes,
COUNT("completed",
- "",
+ {},
"Number of visitors completed"),
this),
failedVisitors(
loadTypes,
COUNT("failed",
- "",
+ {},
"Number of visitors failed"),
this),
visitorDestinationFailureReplies(
loadTypes,
COUNT("destination_failure_replies",
- "",
+ {},
"Number of failure replies received from "
"the visitor destination"),
this)
diff --git a/storageframework/src/vespa/storageframework/defaultimplementation/component/componentregisterimpl.cpp b/storageframework/src/vespa/storageframework/defaultimplementation/component/componentregisterimpl.cpp
index ae448664f14..fd16f43050a 100644
--- a/storageframework/src/vespa/storageframework/defaultimplementation/component/componentregisterimpl.cpp
+++ b/storageframework/src/vespa/storageframework/defaultimplementation/component/componentregisterimpl.cpp
@@ -10,7 +10,7 @@ namespace storage::framework::defaultimplementation {
ComponentRegisterImpl::ComponentRegisterImpl()
: _componentLock(),
_components(),
- _topMetricSet("vds", "", ""),
+ _topMetricSet("vds", {}, ""),
_hooks(),
_metricManager(nullptr),
_clock(nullptr),
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
index 2d127eb86cf..fb55b2d5014 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java
@@ -162,6 +162,15 @@ public class IndexedTensor implements Tensor {
@Override
public TensorType type() { return type; }
+ @Override
+ public IndexedTensor withType(TensorType type) {
+ if (!this.type.isRenamableTo(type)) {
+ throw new IllegalArgumentException("IndexedTensor.withType: types are not compatible. Current type: '" +
+ this.type.toString() + "', requested type: '" + type.toString() + "'");
+ }
+ return new IndexedTensor(type, dimensionSizes, values);
+ }
+
public DimensionSizes dimensionSizes() {
return dimensionSizes;
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
index ef19ef2e96c..ec3020a1a4e 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/MappedTensor.java
@@ -42,6 +42,15 @@ public class MappedTensor implements Tensor {
public Map<TensorAddress, Double> cells() { return cells; }
@Override
+ public Tensor withType(TensorType other) {
+ if (!this.type.isRenamableTo(type)) {
+ throw new IllegalArgumentException("MappedTensor.withType: types are not compatible. Current type: '" +
+ this.type.toString() + "', requested type: '" + type.toString() + "'");
+ }
+ return new MappedTensor(other, cells);
+ }
+
+ @Override
public int hashCode() { return cells.hashCode(); }
@Override
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
index 5ff33aa340b..17e33c58a13 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/MixedTensor.java
@@ -99,6 +99,15 @@ public class MixedTensor implements Tensor {
}
@Override
+ public Tensor withType(TensorType other) {
+ if (!this.type.isRenamableTo(type)) {
+ throw new IllegalArgumentException("MixedTensor.withType: types are not compatible. Current type: '" +
+ this.type.toString() + "', requested type: '" + type.toString() + "'");
+ }
+ return new MixedTensor(other, cells, index);
+ }
+
+ @Override
public int hashCode() { return cells.hashCode(); }
@Override
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
index 59d5ee72372..483ccd330e0 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
@@ -86,6 +86,13 @@ public interface Tensor {
return valueIterator().next();
}
+ /**
+ * Returns this tensor with the given type if types are compatible
+ *
+ * @throws IllegalArgumentException if types are not compatible
+ */
+ Tensor withType(TensorType type);
+
// ----------------- Primitive tensor functions
default Tensor map(DoubleUnaryOperator mapper) {
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java b/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
index 1d447ed3eed..acba9eafd71 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/TensorType.java
@@ -87,7 +87,7 @@ public class TensorType {
* i.e if the given type is a generalization of this type.
*/
public boolean isAssignableTo(TensorType generalization) {
- return isConvertibleOrAssignableTo(generalization, false);
+ return isConvertibleOrAssignableTo(generalization, false, true);
}
/**
@@ -98,16 +98,25 @@ public class TensorType {
* converted to the given type by zero padding.
*/
public boolean isConvertibleTo(TensorType generalization) {
- return isConvertibleOrAssignableTo(generalization, true);
+ return isConvertibleOrAssignableTo(generalization, true, true);
}
- private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible) {
+ /**
+ * Returns whether or not this type can simply be renamed to
+ * the given type. This is the same as being assignable, but disregarding
+ * dimension names.
+ */
+ public boolean isRenamableTo(TensorType other) {
+ return isConvertibleOrAssignableTo(other, false, false);
+ }
+
+ private boolean isConvertibleOrAssignableTo(TensorType generalization, boolean convertible, boolean considerName) {
if (generalization.dimensions().size() != this.dimensions().size()) return false;
for (int i = 0; i < generalization.dimensions().size(); i++) {
Dimension thisDimension = this.dimensions().get(i);
Dimension generalizationDimension = generalization.dimensions().get(i);
if (thisDimension.isIndexed() != generalizationDimension.isIndexed()) return false;
- if ( ! thisDimension.name().equals(generalizationDimension.name())) return false;
+ if (considerName && ! thisDimension.name().equals(generalizationDimension.name())) return false;
if (generalizationDimension.size().isPresent()) {
if ( ! thisDimension.size().isPresent()) return false;
if (convertible) {
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java b/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
index 53d774de329..e18af235d59 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/functions/Rename.java
@@ -95,6 +95,11 @@ public class Rename extends PrimitiveTensorFunction {
toIndexes[i] = renamedType.indexOfDimension(newDimensionName).get();
}
+ // avoid building a new tensor if dimensions can simply be renamed
+ if (simpleRenameIsPossible(toIndexes)) {
+ return tensor.withType(renamedType);
+ }
+
Tensor.Builder builder = Tensor.Builder.of(renamedType);
for (Iterator<Tensor.Cell> i = tensor.cellIterator(); i.hasNext(); ) {
Map.Entry<TensorAddress, Double> cell = i.next();
@@ -104,6 +109,18 @@ public class Rename extends PrimitiveTensorFunction {
return builder.build();
}
+ /**
+ * If none of the dimensions change order after rename we can do a simple rename.
+ */
+ private boolean simpleRenameIsPossible(int[] toIndexes) {
+ for (int i = 0; i < toIndexes.length; ++i) {
+ if (toIndexes[i] != i) {
+ return false;
+ }
+ }
+ return true;
+ }
+
private TensorAddress rename(TensorAddress address, int[] toIndexes) {
String[] reorderedLabels = new String[toIndexes.length];
for (int i = 0; i < toIndexes.length; i++)