summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorValerij Fredriksen <freva@users.noreply.github.com>2021-06-29 09:51:42 +0200
committerGitHub <noreply@github.com>2021-06-29 09:51:42 +0200
commitfcda3fe473cc3b5639feda0e474cb31549713134 (patch)
treebf7720ba77ee43028d9062614aa20375c9064525 /node-repository
parent7afae61c7200d9a1c0e4e066e8fe57c39613e57b (diff)
parent658fb7d7d770967ba8969285a3bd5ccd484485f9 (diff)
Merge pull request #18445 from vespa-engine/bratseth/serialize-getWriter
Serialize calls to table.getWriter
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java76
1 files changed, 38 insertions, 38 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
index a2b8ae5b8cf..aa2a898d58b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
@@ -91,71 +91,71 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
- try (TableWriter writer = nodeTable.getWriter()) {
- addNodeMetrics(snapshots, writer);
+ try {
+ addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
// This error seems non-recoverable
nodeTable.repair(e);
- try (TableWriter writer = nodeTable.getWriter()) {
- addNodeMetrics(snapshots, writer);
- }
+ addNodeMetricsBody(snapshots);
}
}
}
- private void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots, TableWriter writer) {
+ private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
- for (var snapshot : snapshots) {
- Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
- if (atMillis.isEmpty()) continue;
- TableWriter.Row row = writer.newRow(atMillis.get() * 1000); // in microseconds
- row.putStr(0, snapshot.getFirst());
- // (1 is timestamp)
- row.putFloat(2, (float) snapshot.getSecond().load().cpu());
- row.putFloat(3, (float) snapshot.getSecond().load().memory());
- row.putFloat(4, (float) snapshot.getSecond().load().disk());
- row.putLong(5, snapshot.getSecond().generation());
- row.putBool(6, snapshot.getSecond().inService());
- row.putBool(7, snapshot.getSecond().stable());
- row.putFloat(8, (float) snapshot.getSecond().queryRate());
- row.append();
+ try (TableWriter writer = nodeTable.getWriter()) {
+ for (var snapshot : snapshots) {
+ Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
+ if (atMillis.isEmpty()) continue;
+ TableWriter.Row row = writer.newRow(atMillis.get() * 1000); // in microseconds
+ row.putStr(0, snapshot.getFirst());
+ // (1 is timestamp)
+ row.putFloat(2, (float) snapshot.getSecond().load().cpu());
+ row.putFloat(3, (float) snapshot.getSecond().load().memory());
+ row.putFloat(4, (float) snapshot.getSecond().load().disk());
+ row.putLong(5, snapshot.getSecond().generation());
+ row.putBool(6, snapshot.getSecond().inService());
+ row.putBool(7, snapshot.getSecond().stable());
+ row.putFloat(8, (float) snapshot.getSecond().queryRate());
+ row.append();
+ }
+ writer.commit();
}
- writer.commit();
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
- try (TableWriter writer = clusterTable.getWriter()) {
- addClusterMetrics(application, snapshots, writer);
+ try {
+ addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
// This error seems non-recoverable
clusterTable.repair(e);
- try (TableWriter writer = clusterTable.getWriter()) {
- addClusterMetrics(application, snapshots, writer);
- }
+ addClusterMetricsBody(application, snapshots);
}
}
}
- private void addClusterMetrics(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots, TableWriter writer) {
+ private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
- for (var snapshot : snapshots.entrySet()) {
- Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
- if (atMillis.isEmpty()) continue;
- TableWriter.Row row = writer.newRow(atMillis.get() * 1000); // in microseconds
- row.putStr(0, applicationId.serializedForm());
- row.putStr(1, snapshot.getKey().value());
- // (2 is timestamp)
- row.putFloat(3, (float) snapshot.getValue().queryRate());
- row.putFloat(4, (float) snapshot.getValue().writeRate());
- row.append();
+ try (TableWriter writer = clusterTable.getWriter()) {
+ for (var snapshot : snapshots.entrySet()) {
+ Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
+ if (atMillis.isEmpty()) continue;
+ TableWriter.Row row = writer.newRow(atMillis.get() * 1000); // in microseconds
+ row.putStr(0, applicationId.serializedForm());
+ row.putStr(1, snapshot.getKey().value());
+ // (2 is timestamp)
+ row.putFloat(3, (float) snapshot.getValue().queryRate());
+ row.putFloat(4, (float) snapshot.getValue().writeRate());
+ row.append();
+ }
+ writer.commit();
}
- writer.commit();
}
}