summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java
diff options
context:
space:
mode:
Diffstat (limited to 'node-repository/src/main/java')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java27
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java52
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterMetricSnapshot.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java76
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java103
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java59
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricSnapshot.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeMetricSnapshot.java)10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsDb.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsResponse.java39
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java186
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Resource.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceTarget.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java5
17 files changed, 179 insertions, 507 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index bddbcf43bd0..b16859fa6fb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -5,7 +5,6 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
-import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
@@ -128,32 +127,6 @@ public class Cluster {
return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
}
- /** The predicted duration of a rescaling of this cluster */
- public Duration scalingDuration(ClusterSpec clusterSpec) {
- int completedEventCount = 0;
- Duration totalDuration = Duration.ZERO;
- for (ScalingEvent event : scalingEvents()) {
- if (event.duration().isEmpty()) continue;
- completedEventCount++;
- totalDuration = totalDuration.plus(event.duration().get());
- }
-
- if (completedEventCount == 0) { // Use defaults
- if (clusterSpec.isStateful()) return Duration.ofHours(12);
- return Duration.ofMinutes(10);
- }
- else {
- Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
-
- // TODO: Remove when we have reliable completion for content clusters
- if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative())
- return Duration.ofHours(12);
-
- if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); // minimum
- return predictedDuration;
- }
- }
-
@Override
public int hashCode() { return id.hashCode(); }
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 9794d92b04f..2d192fae11f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -2,12 +2,14 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
+import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import java.time.Duration;
import java.time.Instant;
@@ -21,9 +23,9 @@ import java.util.Optional;
*/
public class Autoscaler {
- /** What cost difference is worth a reallocation? */
+ /** What cost difference factor is worth a reallocation? */
private static final double costDifferenceWorthReallocation = 0.1;
- /** What resource difference is worth a reallocation? */
+ /** What difference factor for a resource is worth a reallocation? */
private static final double resourceDifferenceWorthReallocation = 0.1;
private final MetricsDb metricsDb;
@@ -62,27 +64,31 @@ public class Autoscaler {
if ( ! stable(clusterNodes, nodeRepository))
return Advice.none("Cluster change in progress");
- Duration scalingWindow = cluster.scalingDuration(clusterNodes.clusterSpec());
+ Duration scalingWindow = scalingWindow(clusterNodes.clusterSpec(), cluster);
if (scaledIn(scalingWindow, cluster))
return Advice.dontScale("Won't autoscale now: Less than " + scalingWindow + " since last rescaling");
- var clusterNodesTimeseries = new ClusterNodesTimeseries(scalingWindow, cluster, clusterNodes, metricsDb);
- var currentAllocation = new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive());
+ ClusterTimeseries clusterTimeseries =
+ new ClusterTimeseries(scalingWindow, cluster, clusterNodes, metricsDb);
+ AllocatableClusterResources currentAllocation =
+ new AllocatableClusterResources(clusterNodes.asList(), nodeRepository, cluster.exclusive());
- int measurementsPerNode = clusterNodesTimeseries.measurementsPerNode();
+ int measurementsPerNode = clusterTimeseries.measurementsPerNode();
if (measurementsPerNode < minimumMeasurementsPerNode(scalingWindow))
return Advice.none("Collecting more data before making new scaling decisions: " +
"Have " + measurementsPerNode + " measurements per node but require " +
minimumMeasurementsPerNode(scalingWindow));
- int nodesMeasured = clusterNodesTimeseries.nodesMeasured();
+ int nodesMeasured = clusterTimeseries.nodesMeasured();
if (nodesMeasured != clusterNodes.size())
return Advice.none("Collecting more data before making new scaling decisions: " +
"Have measurements from " + nodesMeasured + " but require from " + clusterNodes.size());
+ double cpuLoad = clusterTimeseries.averageLoad(Resource.cpu);
+ double memoryLoad = clusterTimeseries.averageLoad(Resource.memory);
+ double diskLoad = clusterTimeseries.averageLoad(Resource.disk);
- var clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
- var target = ResourceTarget.idealLoad(clusterTimeseries, clusterNodesTimeseries, currentAllocation, application);
+ var target = ResourceTarget.idealLoad(cpuLoad, memoryLoad, diskLoad, currentAllocation, application);
Optional<AllocatableClusterResources> bestAllocation =
allocationOptimizer.findBestAllocation(target, currentAllocation, limits);
@@ -122,6 +128,32 @@ public class Autoscaler {
.isAfter(nodeRepository.clock().instant().minus(delay));
}
+ /** The duration of the window we need to consider to make a scaling decision. See also minimumMeasurementsPerNode */
+ private Duration scalingWindow(ClusterSpec clusterSpec, Cluster cluster) {
+ int completedEventCount = 0;
+ Duration totalDuration = Duration.ZERO;
+ for (ScalingEvent event : cluster.scalingEvents()) {
+ if (event.duration().isEmpty()) continue;
+ completedEventCount++;
+ totalDuration = totalDuration.plus(event.duration().get());
+ }
+
+ if (completedEventCount == 0) { // Use defaults
+ if (clusterSpec.isStateful()) return Duration.ofHours(12);
+ return Duration.ofMinutes(10);
+ }
+ else {
+ Duration predictedDuration = totalDuration.dividedBy(completedEventCount);
+
+ // TODO: Remove when we have reliable completion for content clusters
+ if (clusterSpec.isStateful() && predictedDuration.minus(Duration.ofHours(12)).isNegative())
+ return Duration.ofHours(12);
+
+ if (predictedDuration.minus(Duration.ofMinutes(5)).isNegative()) return Duration.ofMinutes(5); // minimum
+ return predictedDuration;
+ }
+ }
+
static Duration maxScalingWindow() {
return Duration.ofHours(48);
}
@@ -181,7 +213,7 @@ public class Autoscaler {
private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); }
private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); }
private static Advice scaleTo(ClusterResources target) {
- return new Advice(Optional.of(target), true, "Scaling to " + target + " due to load changes");
+ return new Advice(Optional.of(target), true, "Scaling due to load changes");
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterMetricSnapshot.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterMetricSnapshot.java
deleted file mode 100644
index fd8e91584c4..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterMetricSnapshot.java
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.autoscale;
-
-import java.time.Instant;
-
-/**
- * Cluster level metrics.
- * These are aggregated at fetch time over the nodes in the cluster at that point in time.
- *
- * @author bratseth
- */
-public class ClusterMetricSnapshot implements Comparable<ClusterMetricSnapshot> {
-
- private final Instant at;
-
- private final double queryRate;
-
- public ClusterMetricSnapshot(Instant at, double queryRate) {
- this.at = at;
- this.queryRate = queryRate;
- }
-
- public Instant at() { return at; }
-
- /** Queries per second */
- public double queryRate() { return queryRate; }
-
- public ClusterMetricSnapshot withQueryRate(double queryRate) {
- return new ClusterMetricSnapshot(at, queryRate);
- }
-
- @Override
- public int compareTo(ClusterMetricSnapshot other) {
- return at.compareTo(other.at);
- }
-
- @Override
- public String toString() { return "metrics at " + at + ":" +
- " queryRate: " + queryRate;
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java
deleted file mode 100644
index 173d76e4c26..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.autoscale;
-
-import com.yahoo.vespa.hosted.provision.NodeList;
-import com.yahoo.vespa.hosted.provision.applications.Cluster;
-
-import java.time.Duration;
-import java.util.List;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-/**
- * A series of metric snapshots for the nodes of a cluster used to compute load
- *
- * @author bratseth
- */
-public class ClusterNodesTimeseries {
-
- private final Cluster cluster;
- private final NodeList clusterNodes;
-
- /** The measurements for all nodes in this snapshot */
- private final List<NodeTimeseries> timeseries;
-
- public ClusterNodesTimeseries(Duration period, Cluster cluster, NodeList clusterNodes, MetricsDb db) {
- this.cluster = cluster;
- this.clusterNodes = clusterNodes;
- var timeseries = db.getNodeTimeseries(period, clusterNodes);
-
- if (cluster.lastScalingEvent().isPresent())
- timeseries = filter(timeseries, snapshot -> snapshot.generation() < 0 || // Content nodes do not yet send generation
- snapshot.generation() >= cluster.lastScalingEvent().get().generation());
- timeseries = filter(timeseries, snapshot -> snapshot.inService() && snapshot.stable());
-
- this.timeseries = timeseries;
- }
-
- /** The cluster this is a timeseries for */
- public Cluster cluster() { return cluster; }
-
- /** The nodes of the cluster this is a timeseries for */
- public NodeList clusterNodes() { return clusterNodes; }
-
- /** Returns the average number of measurements per node */
- public int measurementsPerNode() {
- int measurementCount = timeseries.stream().mapToInt(m -> m.size()).sum();
- return measurementCount / clusterNodes.size();
- }
-
- /** Returns the number of nodes measured in this */
- public int nodesMeasured() {
- return timeseries.size();
- }
-
- /** Returns the average load of this resource in this */
- public double averageLoad(Resource resource) {
- int measurementCount = timeseries.stream().mapToInt(m -> m.size()).sum();
- if (measurementCount == 0) return 0;
- double measurementSum = timeseries.stream().flatMap(m -> m.asList().stream()).mapToDouble(m -> value(resource, m)).sum();
- return measurementSum / measurementCount;
- }
-
- private double value(Resource resource, NodeMetricSnapshot snapshot) {
- switch (resource) {
- case cpu: return snapshot.cpu();
- case memory: return snapshot.memory();
- case disk: return snapshot.disk();
- default: throw new IllegalArgumentException("Got an unknown resource " + resource);
- }
- }
-
- private List<NodeTimeseries> filter(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) {
- return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.filter(filter)).collect(Collectors.toList());
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java
index bc0fe528464..e359579117f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterTimeseries.java
@@ -1,103 +1,70 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.autoscale;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import java.time.Duration;
import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
- * A list of metric snapshots from a cluster, sorted by increasing time (newest last).
+ * A series of metric snapshots for all nodes in a cluster
*
* @author bratseth
*/
public class ClusterTimeseries {
- private final ClusterSpec.Id cluster;
- private final List<ClusterMetricSnapshot> snapshots;
+ private final NodeList clusterNodes;
- ClusterTimeseries(ClusterSpec.Id cluster, List<ClusterMetricSnapshot> snapshots) {
- this.cluster = cluster;
- List<ClusterMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
- Collections.sort(sortedSnapshots);
- this.snapshots = Collections.unmodifiableList(sortedSnapshots);
- }
-
- public boolean isEmpty() { return snapshots.isEmpty(); }
-
- public int size() { return snapshots.size(); }
-
- public ClusterMetricSnapshot get(int index) { return snapshots.get(index); }
+ /** The measurements for all nodes in this snapshot */
+ private final List<NodeTimeseries> allTimeseries;
- public List<ClusterMetricSnapshot> asList() { return snapshots; }
+ public ClusterTimeseries(Duration period, Cluster cluster, NodeList clusterNodes, MetricsDb db) {
+ this.clusterNodes = clusterNodes;
+ var timeseries = db.getNodeTimeseries(period, clusterNodes);
- public ClusterSpec.Id cluster() { return cluster; }
+ if (cluster.lastScalingEvent().isPresent())
+ timeseries = filter(timeseries, snapshot -> snapshot.generation() < 0 || // Content nodes do not yet send generation
+ snapshot.generation() >= cluster.lastScalingEvent().get().generation());
+ timeseries = filter(timeseries, snapshot -> snapshot.inService() && snapshot.stable());
- public ClusterTimeseries add(ClusterMetricSnapshot snapshot) {
- List<ClusterMetricSnapshot> list = new ArrayList<>(snapshots);
- list.add(snapshot);
- return new ClusterTimeseries(cluster, list);
+ this.allTimeseries = timeseries;
}
- /** The max query growth rate we can predict from this time-series as a fraction of the current traffic per minute */
- public double maxQueryGrowthRate() {
- if (snapshots.isEmpty()) return 0.1;
-
- // Find the period having the highest growth rate, where total growth exceeds 30% increase
- double maxGrowthRate = 0; // In query rate per minute
- for (int start = 0; start < snapshots.size(); start++) {
- if (start > 0) { // Optimization: Skip this point when starting from the previous is better relative to the best rate so far
- Duration duration = durationBetween(start - 1, start);
- if (duration.toMinutes() != 0) {
- double growthRate = (queryRateAt(start - 1) - queryRateAt(start)) / duration.toMinutes();
- if (growthRate >= maxGrowthRate)
- continue;
- }
- }
- for (int end = start + 1; end < snapshots.size(); end++) {
- if (queryRateAt(end) >= queryRateAt(start) * 1.3) {
- Duration duration = durationBetween(start, end);
- if (duration.toMinutes() == 0) continue;
- double growthRate = (queryRateAt(end) - queryRateAt(start)) / duration.toMinutes();
- if (growthRate > maxGrowthRate)
- maxGrowthRate = growthRate;
- }
- }
- }
- if (maxGrowthRate == 0) { // No periods of significant growth
- if (durationBetween(0, snapshots.size() - 1).toHours() < 24)
- return 0.1; // ... because not much data
- else
- return 0.0; // ... because load is stable
- }
- if (queryRateNow() == 0) return 0.1; // Growth not expressible as a fraction of the current rate
- return maxGrowthRate / queryRateNow();
+ /** Returns the average number of measurements per node */
+ public int measurementsPerNode() {
+ int measurementCount = allTimeseries.stream().mapToInt(m -> m.size()).sum();
+ return measurementCount / clusterNodes.size();
}
- /** The current query rate as a fraction of the peak rate in this timeseries */
- public double currentQueryFractionOfMax() {
- if (snapshots.isEmpty()) return 0.5;
- var max = snapshots.stream().mapToDouble(ClusterMetricSnapshot::queryRate).max().getAsDouble();
- return snapshots.get(snapshots.size() - 1).queryRate() / max;
+ /** Returns the number of nodes measured in this */
+ public int nodesMeasured() {
+ return allTimeseries.size();
}
- private double queryRateAt(int index) {
- return snapshots.get(index).queryRate();
+ /** Returns the average load of this resource in this */
+ public double averageLoad(Resource resource) {
+ int measurementCount = allTimeseries.stream().mapToInt(m -> m.size()).sum();
+ if (measurementCount == 0) return 0;
+ double measurementSum = allTimeseries.stream().flatMap(m -> m.asList().stream()).mapToDouble(m -> value(resource, m)).sum();
+ return measurementSum / measurementCount;
}
- private double queryRateNow() {
- return queryRateAt(snapshots.size() - 1);
+ private double value(Resource resource, MetricSnapshot snapshot) {
+ switch (resource) {
+ case cpu: return snapshot.cpu();
+ case memory: return snapshot.memory();
+ case disk: return snapshot.disk();
+ default: throw new IllegalArgumentException("Got an unknown resource " + resource);
+ }
}
- private Duration durationBetween(int startIndex, int endIndex) {
- return Duration.between(snapshots.get(startIndex).at(), snapshots.get(endIndex).at());
+ private List<NodeTimeseries> filter(List<NodeTimeseries> timeseries, Predicate<MetricSnapshot> filter) {
+ return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.filter(filter)).collect(Collectors.toList());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
index bf8d354665a..1b1e5933604 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
@@ -2,12 +2,9 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.collections.Pair;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
@@ -29,10 +26,8 @@ public class MemoryMetricsDb implements MetricsDb {
private final NodeRepository nodeRepository;
- /** Metric time series by node (hostname). Each list of metric snapshots is sorted by increasing timestamp */
- private final Map<String, NodeTimeseries> nodeTimeseries = new HashMap<>();
-
- private final Map<Pair<ApplicationId, ClusterSpec.Id>, ClusterTimeseries> clusterTimeseries = new HashMap<>();
+ /** Metric time seriest by node (hostname). Each list of metric snapshots is sorted by increasing timestamp */
+ private final Map<String, NodeTimeseries> db = new HashMap<>();
/** Lock all access for now since we modify lists inside a map */
private final Object lock = new Object();
@@ -42,10 +37,7 @@ public class MemoryMetricsDb implements MetricsDb {
}
@Override
- public Clock clock() { return nodeRepository.clock(); }
-
- @Override
- public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics) {
+ public void add(Collection<Pair<String, MetricSnapshot>> nodeMetrics) {
synchronized (lock) {
for (var value : nodeMetrics) {
add(value.getFirst(), value.getSecond());
@@ -54,48 +46,27 @@ public class MemoryMetricsDb implements MetricsDb {
}
@Override
- public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics) {
- synchronized (lock) {
- for (var value : clusterMetrics.entrySet()) {
- add(application, value.getKey(), value.getValue());
- }
- }
- }
-
- public void clearClusterMetrics(ApplicationId application, ClusterSpec.Id cluster) {
- synchronized (lock) {
- clusterTimeseries.remove(new Pair<>(application, cluster));
- }
- }
-
- @Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
Instant startTime = nodeRepository.clock().instant().minus(period);
synchronized (lock) {
return hostnames.stream()
- .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).justAfter(startTime))
+ .map(hostname -> db.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).justAfter(startTime))
.collect(Collectors.toList());
}
}
@Override
- public ClusterTimeseries getClusterTimeseries(ApplicationId application, ClusterSpec.Id cluster) {
- return clusterTimeseries.computeIfAbsent(new Pair<>(application, cluster),
- __ -> new ClusterTimeseries(cluster, new ArrayList<>()));
- }
-
- @Override
public void gc() {
synchronized (lock) {
// Each measurement is Object + long + float = 16 + 8 + 4 = 28 bytes
// 12 hours with 1k nodes and 3 resources and 1 measurement/sec is about 5Gb
- for (String hostname : nodeTimeseries.keySet()) {
- var timeseries = nodeTimeseries.get(hostname);
+ for (String hostname : db.keySet()) {
+ var timeseries = db.get(hostname);
timeseries = timeseries.justAfter(nodeRepository.clock().instant().minus(Autoscaler.maxScalingWindow()));
if (timeseries.isEmpty())
- nodeTimeseries.remove(hostname);
+ db.remove(hostname);
else
- nodeTimeseries.put(hostname, timeseries);
+ db.put(hostname, timeseries);
}
}
}
@@ -103,22 +74,16 @@ public class MemoryMetricsDb implements MetricsDb {
@Override
public void close() {}
- private void add(String hostname, NodeMetricSnapshot snapshot) {
- NodeTimeseries timeseries = nodeTimeseries.get(hostname);
+ private void add(String hostname, MetricSnapshot snapshot) {
+ NodeTimeseries timeseries = db.get(hostname);
if (timeseries == null) { // new node
Optional<Node> node = nodeRepository.nodes().node(hostname);
if (node.isEmpty()) return;
if (node.get().allocation().isEmpty()) return;
timeseries = new NodeTimeseries(hostname, new ArrayList<>());
- nodeTimeseries.put(hostname, timeseries);
+ db.put(hostname, timeseries);
}
- nodeTimeseries.put(hostname, timeseries.add(snapshot));
- }
-
- private void add(ApplicationId application, ClusterSpec.Id cluster, ClusterMetricSnapshot snapshot) {
- var key = new Pair<>(application, cluster);
- var existing = clusterTimeseries.computeIfAbsent(key, __ -> new ClusterTimeseries(cluster, new ArrayList<>()));
- clusterTimeseries.put(key, existing.add(snapshot));
+ db.put(hostname, timeseries.add(snapshot));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeMetricSnapshot.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricSnapshot.java
index be9f7bd4819..82812592809 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeMetricSnapshot.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricSnapshot.java
@@ -8,7 +8,7 @@ import java.time.Instant;
*
* @author bratseth
*/
-public class NodeMetricSnapshot implements Comparable<NodeMetricSnapshot> {
+public class MetricSnapshot implements Comparable<MetricSnapshot> {
private final Instant at;
@@ -20,9 +20,9 @@ public class NodeMetricSnapshot implements Comparable<NodeMetricSnapshot> {
private final boolean stable;
private final double queryRate;
- public NodeMetricSnapshot(Instant at, double cpu, double memory, double disk,
- long generation, boolean inService, boolean stable,
- double queryRate) {
+ public MetricSnapshot(Instant at, double cpu, double memory, double disk,
+ long generation, boolean inService, boolean stable,
+ double queryRate) {
this.at = at;
this.cpu = cpu;
this.memory = memory;
@@ -48,7 +48,7 @@ public class NodeMetricSnapshot implements Comparable<NodeMetricSnapshot> {
public boolean stable() { return stable; }
@Override
- public int compareTo(NodeMetricSnapshot other) {
+ public int compareTo(MetricSnapshot other) {
return at.compareTo(other.at);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsDb.java
index 568c5f88661..6fdc87f2448 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsDb.java
@@ -2,17 +2,15 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.collections.Pair;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.time.Clock;
import java.time.Duration;
+import java.time.Instant;
import java.util.Collection;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -23,12 +21,8 @@ import java.util.stream.Collectors;
*/
public interface MetricsDb {
- Clock clock();
-
- /** Adds node snapshots to this. */
- void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics);
-
- void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics);
+ /** Adds snapshots to this. */
+ void add(Collection<Pair<String, MetricSnapshot>> nodeMetrics);
/**
* Returns a list with one entry for each hostname containing
@@ -42,15 +36,12 @@ public interface MetricsDb {
return getNodeTimeseries(period, nodes.stream().map(Node::hostname).collect(Collectors.toSet()));
}
- /** Returns all cluster level metric snapshots for a given cluster */
- ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId);
-
/** Must be called intermittently (as long as add is called) to gc old data */
void gc();
void close();
- static MemoryMetricsDb createTestInstance(NodeRepository nodeRepository) {
+ static MetricsDb createTestInstance(NodeRepository nodeRepository) {
return new MemoryMetricsDb(nodeRepository);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsResponse.java
index 0fa7a0e0bb1..d6661b89536 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsResponse.java
@@ -11,6 +11,7 @@ import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.applications.Application;
import java.time.Instant;
import java.util.ArrayList;
@@ -27,21 +28,14 @@ import java.util.Optional;
*/
public class MetricsResponse {
- /** Node level metrics */
- private final Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics;
-
- /**
- * Cluster level metrics.
- * Must be aggregated at fetch time to avoid issues with nodes and nodes joining/leaving the cluster over time.
- */
- private final Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics = new HashMap<>();
+ private final Collection<Pair<String, MetricSnapshot>> nodeMetrics;
/** Creates this from a metrics/V2 response */
public MetricsResponse(String response, NodeList applicationNodes, NodeRepository nodeRepository) {
this(SlimeUtils.jsonToSlime(response), applicationNodes, nodeRepository);
}
- public MetricsResponse(Collection<Pair<String, NodeMetricSnapshot>> metrics) {
+ public MetricsResponse(Collection<Pair<String, MetricSnapshot>> metrics) {
this.nodeMetrics = metrics;
}
@@ -52,9 +46,7 @@ public class MetricsResponse {
nodes.traverse((ArrayTraverser)(__, node) -> consumeNode(node, applicationNodes, nodeRepository));
}
- public Collection<Pair<String, NodeMetricSnapshot>> nodeMetrics() { return nodeMetrics; }
-
- public Map<ClusterSpec.Id, ClusterMetricSnapshot> clusterMetrics() { return clusterMetrics; }
+ public Collection<Pair<String, MetricSnapshot>> metrics() { return nodeMetrics; }
private void consumeNode(Inspector node, NodeList applicationNodes, NodeRepository nodeRepository) {
String hostname = node.field("hostname").asString();
@@ -67,21 +59,14 @@ public class MetricsResponse {
if (node.isEmpty()) return; // Node is not part of this cluster any more
long timestampSecond = nodeData.field("timestamp").asLong();
Map<String, Double> values = consumeMetrics(nodeData.field("metrics"));
- Instant at = Instant.ofEpochMilli(timestampSecond * 1000);
-
- nodeMetrics.add(new Pair<>(hostname, new NodeMetricSnapshot(at,
- Metric.cpu.from(values),
- Metric.memory.from(values),
- Metric.disk.from(values),
- (long)Metric.generation.from(values),
- Metric.inService.from(values) > 0,
- clusterIsStable(node.get(), applicationNodes, nodeRepository),
- Metric.queryRate.from(values))));
-
- var cluster = node.get().allocation().get().membership().cluster().id();
- var metrics = clusterMetrics.getOrDefault(cluster, new ClusterMetricSnapshot(at, 0.0));
- metrics = metrics.withQueryRate(metrics.queryRate() + Metric.queryRate.from(values));
- clusterMetrics.put(cluster, metrics);
+ nodeMetrics.add(new Pair<>(hostname, new MetricSnapshot(Instant.ofEpochMilli(timestampSecond * 1000),
+ Metric.cpu.from(values),
+ Metric.memory.from(values),
+ Metric.disk.from(values),
+ (long)Metric.generation.from(values),
+ Metric.inService.from(values) > 0,
+ clusterIsStable(node.get(), applicationNodes, nodeRepository),
+ Metric.queryRate.from(values))));
}
private boolean clusterIsStable(Node node, NodeList applicationNodes, NodeRepository nodeRepository) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java
index cedc2edfe63..24876609f58 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java
@@ -16,11 +16,11 @@ import java.util.stream.Collectors;
public class NodeTimeseries {
private final String hostname;
- private final List<NodeMetricSnapshot> snapshots;
+ private final List<MetricSnapshot> snapshots;
- NodeTimeseries(String hostname, List<NodeMetricSnapshot> snapshots) {
+ NodeTimeseries(String hostname, List<MetricSnapshot> snapshots) {
this.hostname = hostname;
- List<NodeMetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
+ List<MetricSnapshot> sortedSnapshots = new ArrayList<>(snapshots);
Collections.sort(sortedSnapshots);
this.snapshots = Collections.unmodifiableList(sortedSnapshots);
}
@@ -29,19 +29,19 @@ public class NodeTimeseries {
public int size() { return snapshots.size(); }
- public NodeMetricSnapshot get(int index) { return snapshots.get(index); }
+ public MetricSnapshot get(int index) { return snapshots.get(index); }
- public List<NodeMetricSnapshot> asList() { return snapshots; }
+ public List<MetricSnapshot> asList() { return snapshots; }
public String hostname() { return hostname; }
- public NodeTimeseries add(NodeMetricSnapshot snapshot) {
- List<NodeMetricSnapshot> list = new ArrayList<>(snapshots);
+ public NodeTimeseries add(MetricSnapshot snapshot) {
+ List<MetricSnapshot> list = new ArrayList<>(snapshots);
list.add(snapshot);
return new NodeTimeseries(hostname(), list);
}
- public NodeTimeseries filter(Predicate<NodeMetricSnapshot> filter) {
+ public NodeTimeseries filter(Predicate<MetricSnapshot> filter) {
return new NodeTimeseries(hostname, snapshots.stream().filter(filter).collect(Collectors.toList()));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
index efa1de6bb97..37e70e3539a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java
@@ -5,8 +5,6 @@ import com.google.inject.Inject;
import com.yahoo.collections.ListMap;
import com.yahoo.collections.Pair;
import com.yahoo.component.AbstractComponent;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.io.IOUtils;
import com.yahoo.vespa.defaults.Defaults;
import io.questdb.cairo.CairoConfiguration;
@@ -32,7 +30,6 @@ import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -48,8 +45,7 @@ import java.util.stream.Collectors;
public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
- private static final String nodeTable = "metrics";
- private static final String clusterTable = "clusterMetrics";
+ private static final String table = "metrics";
private final Clock clock;
private final String dataDir;
@@ -73,8 +69,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
private void initializeDb() {
- IOUtils.createDirectory(dataDir + "/" + nodeTable);
- IOUtils.createDirectory(dataDir + "/" + clusterTable);
+ IOUtils.createDirectory(dataDir + "/" + table);
// silence Questdb's custom logging system
IOUtils.writeFile(new File(dataDir, "quest-log.conf"), new byte[0]);
@@ -83,36 +78,32 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
CairoConfiguration configuration = new DefaultCairoConfiguration(dataDir);
engine = new CairoEngine(configuration);
- ensureTablesExist();
+ ensureExists(table);
}
@Override
- public Clock clock() { return clock; }
-
- @Override
- public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
- try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), nodeTable)) {
- addNodeMetrics(snapshots, writer);
+ public void add(Collection<Pair<String, MetricSnapshot>> snapshots) {
+ try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), table)) {
+ add(snapshots, writer);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
// This error seems non-recoverable
repair(e);
- try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), nodeTable)) {
- addNodeMetrics(snapshots, writer);
+ try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), table)) {
+ add(snapshots, writer);
}
}
}
}
- private void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots, TableWriter writer) {
+ private void add(Collection<Pair<String, MetricSnapshot>> snapshots, TableWriter writer) {
for (var snapshot : snapshots) {
long atMillis = adjustIfRecent(snapshot.getSecond().at().toEpochMilli(), highestTimestampAdded);
if (atMillis < highestTimestampAdded) continue; // Ignore old data
highestTimestampAdded = atMillis;
TableWriter.Row row = writer.newRow(atMillis * 1000); // in microseconds
row.putStr(0, snapshot.getFirst());
- // (1 is timestamp)
row.putFloat(2, (float)snapshot.getSecond().cpu());
row.putFloat(3, (float)snapshot.getSecond().memory());
row.putFloat(4, (float)snapshot.getSecond().disk());
@@ -126,70 +117,23 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
@Override
- public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
- try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), clusterTable)) {
- addClusterMetrics(application, snapshots, writer);
- }
- catch (CairoException e) {
- if (e.getMessage().contains("Cannot read offset")) {
- // This error seems non-recoverable
- repair(e);
- try (TableWriter writer = engine.getWriter(newContext().getCairoSecurityContext(), clusterTable)) {
- addClusterMetrics(application, snapshots, writer);
- }
- }
- }
- }
-
- private void addClusterMetrics(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots, TableWriter writer) {
- for (var snapshot : snapshots.entrySet()) {
- long atMillis = adjustIfRecent(snapshot.getValue().at().toEpochMilli(), highestTimestampAdded);
- if (atMillis < highestTimestampAdded) continue; // Ignore old data
- highestTimestampAdded = atMillis;
- TableWriter.Row row = writer.newRow(atMillis * 1000); // in microseconds
- row.putStr(0, applicationId.serializedForm());
- row.putStr(1, snapshot.getKey().value());
- // (2 is timestamp)
- row.putFloat(3, (float)snapshot.getValue().queryRate());
- row.append();
- }
- writer.commit();
- }
-
- @Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try (SqlCompiler compiler = new SqlCompiler(engine)) {
SqlExecutionContext context = newContext();
- var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, compiler, context);
+ var snapshots = getSnapshots(clock.instant().minus(period), hostnames, compiler, context);
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
- throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
- }
- }
-
- @Override
- public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- SqlExecutionContext context = newContext();
- return getClusterSnapshots(applicationId, clusterId, compiler, context);
- }
- catch (SqlException e) {
- throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
+ throw new IllegalStateException("Could not read timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
- gc(nodeTable);
- gc(clusterTable);
- }
-
- private void gc(String table) {
- // We remove full days at once and we want to see at least three days to not every only see weekend data
- Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
+ // Since we remove full days at once we need to keep at least the scaling window + 1 day
+ Instant oldestToKeep = clock.instant().minus(Autoscaler.maxScalingWindow().plus(Duration.ofDays(1)));
SqlExecutionContext context = newContext();
int partitions = 0;
try (SqlCompiler compiler = new SqlCompiler(engine)) {
@@ -213,7 +157,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
context);
}
catch (SqlException e) {
- log.log(Level.WARNING, "Failed to gc old metrics data in " + dataDir + " table " + table, e);
+ log.log(Level.WARNING, "Failed to gc old metrics data in " + dataDir, e);
}
}
@@ -237,26 +181,18 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
initializeDb();
}
- private boolean exists(String table, SqlExecutionContext context) {
- return 0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), table);
- }
-
- private void ensureTablesExist() {
+ private void ensureExists(String table) {
SqlExecutionContext context = newContext();
- if (exists(nodeTable, context))
- ensureNodeTableIsUpdated(context);
- else
- createNodeTable(context);
-
- if (exists(clusterTable, context))
- ensureClusterTableIsUpdated(context);
- else
- createClusterTable(context);
+ if (0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), table)) { // table exists
+ ensureTableIsUpdated(table, context);
+ } else {
+ createTable(table, context);
+ }
}
- private void createNodeTable(SqlExecutionContext context) {
+ private void createTable(String table, SqlExecutionContext context) {
try (SqlCompiler compiler = new SqlCompiler(engine)) {
- compiler.compile("create table " + nodeTable +
+ compiler.compile("create table " + table +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
@@ -266,39 +202,20 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
// compiler.compile("alter table " + tableName + " alter column hostname add index", context);
}
catch (SqlException e) {
- throw new IllegalStateException("Could not create Quest db table '" + nodeTable + "'", e);
- }
- }
-
- private void createClusterTable(SqlExecutionContext context) {
- try (SqlCompiler compiler = new SqlCompiler(engine)) {
- compiler.compile("create table " + clusterTable +
- " (application string, cluster string, at timestamp, queries_rate float)" +
- " timestamp(at)" +
- "PARTITION BY DAY;",
- context);
- // We should do this if we get a version where selecting on strings work embedded, see below
- // compiler.compile("alter table " + tableName + " alter column cluster add index", context);
- }
- catch (SqlException e) {
- throw new IllegalStateException("Could not create Quest db table '" + clusterTable + "'", e);
+ throw new IllegalStateException("Could not create Quest db table '" + table + "'", e);
}
}
- private void ensureNodeTableIsUpdated(SqlExecutionContext context) {
+ private void ensureTableIsUpdated(String table, SqlExecutionContext context) {
try (SqlCompiler compiler = new SqlCompiler(engine)) {
- if (0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), nodeTable)) {
- ensureColumnExists("queries_rate", "float", nodeTable, compiler, context); // TODO: Remove after March 2021
+ if (0 == engine.getStatus(context.getCairoSecurityContext(), new Path(), table)) {
+ ensureColumnExists("queries_rate", "float", table, compiler, context); // TODO: Remove after March 2021
}
} catch (SqlException e) {
repair(e);
}
}
- private void ensureClusterTableIsUpdated(SqlExecutionContext context) {
- // Nothing to do for now
- }
-
private void ensureColumnExists(String column, String columnType,
String table, SqlCompiler compiler, SqlExecutionContext context) throws SqlException {
if (columnNamesOf(table, compiler, context).contains(column)) return;
@@ -329,34 +246,34 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
return timestamp;
}
- private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
- Set<String> hostnames,
- SqlCompiler compiler,
- SqlExecutionContext context) throws SqlException {
+ private ListMap<String, MetricSnapshot> getSnapshots(Instant startTime,
+ Set<String> hostnames,
+ SqlCompiler compiler,
+ SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
- String sql = "select * from " + nodeTable + " where at in('" + from + "', '" + to + "');";
+ String sql = "select * from " + table + " where at in('" + from + "', '" + to + "');";
// WHERE clauses does not work:
// String sql = "select * from " + tableName + " where hostname in('host1', 'host2', 'host3');";
try (RecordCursorFactory factory = compiler.compile(sql, context).getRecordCursorFactory()) {
- ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
+ ListMap<String, MetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.contains(hostname)) {
snapshots.put(hostname,
- new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
- record.getFloat(2),
- record.getFloat(3),
- record.getFloat(4),
- record.getLong(5),
- record.getBool(6),
- record.getBool(7),
- record.getFloat(8)));
+ new MetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
+ record.getFloat(2),
+ record.getFloat(3),
+ record.getFloat(4),
+ record.getLong(5),
+ record.getBool(6),
+ record.getBool(7),
+ record.getFloat(8)));
}
}
}
@@ -364,29 +281,6 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb {
}
}
- private ClusterTimeseries getClusterSnapshots(ApplicationId application,
- ClusterSpec.Id cluster,
- SqlCompiler compiler,
- SqlExecutionContext context) throws SqlException {
- String sql = "select * from " + clusterTable;
- try (RecordCursorFactory factory = compiler.compile(sql, context).getRecordCursorFactory()) {
- List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
- try (RecordCursor cursor = factory.getCursor(context)) {
- Record record = cursor.getRecord();
- while (cursor.hasNext()) {
- String applicationIdString = record.getStr(0).toString();
- if ( ! application.serializedForm().equals(applicationIdString)) continue;
- String clusterId = record.getStr(1).toString();
- if (cluster.value().equals(clusterId)) {
- snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
- record.getFloat(3)));
- }
- }
- }
- return new ClusterTimeseries(cluster, snapshots);
- }
- }
-
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine, 1);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Resource.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Resource.java
index b841b31833f..8353f56df91 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Resource.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Resource.java
@@ -12,7 +12,7 @@ public enum Resource {
/** Cpu utilization ratio */
cpu {
- public double idealAverageLoad() { return 0.8; }
+ public double idealAverageLoad() { return 0.4; }
double valueFrom(NodeResources resources) { return resources.vcpu(); }
},
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceTarget.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceTarget.java
index c7151e3ae7b..a2fbeb3b710 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceTarget.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceTarget.java
@@ -3,8 +3,6 @@ package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import java.time.Duration;
-
/**
* A resource target to hit for the allocation optimizer.
* The target is measured in cpu, memory and disk per node in the allocation given by current.
@@ -49,16 +47,11 @@ public class ResourceTarget {
}
/** Create a target of achieving ideal load given a current load */
- public static ResourceTarget idealLoad(ClusterTimeseries clusterTimeseries,
- ClusterNodesTimeseries clusterNodesTimeseries,
- AllocatableClusterResources current,
- Application application) {
- return new ResourceTarget(nodeUsage(Resource.cpu, clusterNodesTimeseries.averageLoad(Resource.cpu), current)
- / idealCpuLoad(clusterTimeseries, clusterNodesTimeseries, application),
- nodeUsage(Resource.memory, clusterNodesTimeseries.averageLoad(Resource.memory), current)
- / Resource.memory.idealAverageLoad(),
- nodeUsage(Resource.disk, clusterNodesTimeseries.averageLoad(Resource.disk), current)
- / Resource.disk.idealAverageLoad(),
+ public static ResourceTarget idealLoad(double currentCpuLoad, double currentMemoryLoad, double currentDiskLoad,
+ AllocatableClusterResources current, Application application) {
+ return new ResourceTarget(nodeUsage(Resource.cpu, currentCpuLoad, current) / idealCpuLoad(application),
+ nodeUsage(Resource.memory, currentMemoryLoad, current) / Resource.memory.idealAverageLoad(),
+ nodeUsage(Resource.disk, currentDiskLoad, current) / Resource.disk.idealAverageLoad(),
true);
}
@@ -71,27 +64,16 @@ public class ResourceTarget {
}
/** Ideal cpu load must take the application traffic fraction into account */
- private static double idealCpuLoad(ClusterTimeseries clusterTimeseries,
- ClusterNodesTimeseries clusterNodesTimeseries,
- Application application) {
- // What's needed to have headroom for growth during scale-up as a fraction of current resources?
- double maxGrowthRate = clusterTimeseries.maxQueryGrowthRate(); // in fraction per minute of the current traffic
- Duration scalingDuration = clusterNodesTimeseries.cluster().scalingDuration(clusterNodesTimeseries.clusterNodes().clusterSpec());
- double growthRateHeadroom = 1 + maxGrowthRate * scalingDuration.toMinutes();
- // Cap headroom at 10% above the historical observed peak
- growthRateHeadroom = Math.min(growthRateHeadroom, 1 / clusterTimeseries.currentQueryFractionOfMax() + 0.1);
-
- // How much headroom is needed to handle sudden arrival of additional traffic due to another zone going down?
- double trafficShiftHeadroom;
+ private static double idealCpuLoad(Application application) {
+ double trafficFactor;
if (application.status().maxReadShare() == 0) // No traffic fraction data
- trafficShiftHeadroom = 2.0; // assume we currently get half of the global share of traffic
+ trafficFactor = 0.5; // assume we currently get half of the global share of traffic
else
- trafficShiftHeadroom = application.status().maxReadShare() / application.status().currentReadShare();
-
- if (trafficShiftHeadroom > 2.0) // The expectation that we have almost no load with almost no queries is incorrect due
- trafficShiftHeadroom = 2.0; // to write traffic; once that is separated we can increase this threshold
+ trafficFactor = application.status().currentReadShare() / application.status().maxReadShare();
- return 1 / growthRateHeadroom * 1 / trafficShiftHeadroom * Resource.cpu.idealAverageLoad();
+ if (trafficFactor < 0.5) // The expectation that we have almost no load with almost no queries is incorrect due
+ trafficFactor = 0.5; // to write traffic; once that is separated we can lower this threshold (but not to 0)
+ return trafficFactor * Resource.cpu.idealAverageLoad();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 9d910df01d9..bcfdaefb305 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -14,7 +14,7 @@ import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
-import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
+import com.yahoo.vespa.hosted.provision.autoscale.MetricSnapshot;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.autoscale.NodeTimeseries;
import com.yahoo.vespa.hosted.provision.node.History;
@@ -110,7 +110,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
// - 2. all nodes have switched to the right config generation
for (NodeTimeseries nodeTimeseries : metricsDb.getNodeTimeseries(Duration.between(event.at(), clock().instant()),
clusterNodes)) {
- Optional<NodeMetricSnapshot> firstOnNewGeneration =
+ Optional<MetricSnapshot> firstOnNewGeneration =
nodeTimeseries.asList().stream()
.filter(snapshot -> snapshot.generation() >= event.generation()).findFirst();
if (firstOnNewGeneration.isEmpty()) return cluster; // Not completed
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
index 01ab73c20b2..b8548c4c3f4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsFetcher;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsResponse;
+import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.util.Set;
@@ -73,8 +74,7 @@ public class NodeMetricsDbMaintainer extends NodeRepositoryMaintainer {
warnings.add(1);
}
else if (response != null) {
- metricsDb.addNodeMetrics(response.nodeMetrics());
- metricsDb.addClusterMetrics(application, response.clusterMetrics());
+ metricsDb.add(response.metrics());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
index 10db9a08eeb..e0a11aa5dac 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
@@ -59,7 +59,7 @@ public class RetiredExpirer extends NodeRepositoryMaintainer {
List<Node> retiredNodes = entry.getValue();
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
- if ( ! deployment.isValid()) continue;
+ if ( ! deployment.isValid()) continue; // this will be done at another config server
List<Node> nodesToRemove = retiredNodes.stream().filter(this::canRemove).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index 4235bae6850..ceaf88dd7d9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -9,7 +9,8 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
-import com.yahoo.vespa.hosted.provision.autoscale.ClusterNodesTimeseries;
+import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
+import com.yahoo.vespa.hosted.provision.autoscale.ClusterTimeseries;
import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.autoscale.Resource;
@@ -73,7 +74,7 @@ public class ApplicationSerializer {
}
private static void clusterUtilizationToSlime(Cluster cluster, NodeList nodes, MetricsDb metricsDb, Cursor utilizationObject) {
- var timeseries = new ClusterNodesTimeseries(Duration.ofHours(1), cluster, nodes, metricsDb);
+ var timeseries = new ClusterTimeseries(Duration.ofHours(1), cluster, nodes, metricsDb);
utilizationObject.setDouble("cpu", timeseries.averageLoad(Resource.cpu));
utilizationObject.setDouble("memory", timeseries.averageLoad(Resource.memory));