summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@gmail.com>2022-08-01 21:55:28 +0200
committerJon Bratseth <bratseth@gmail.com>2022-08-01 21:55:28 +0200
commitdd0645d48a9386b10446bb6c3b56d6d8b38e35e7 (patch)
tree2cf8b87c60de0d1e2862c49fddd89026e2a8fc00
parent3bb080297cfb21fea5d17f3e0079ff5d4179ec00 (diff)
Separate loading methods
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java124
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java205
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java91
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Loader.java158
4 files changed, 254 insertions, 324 deletions
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index d9198452b23..e6873e7118f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -39,11 +39,11 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
assertTrue("No measurements -> No change", fixture.autoscale().isEmpty());
- fixture.applyCpuLoad(0.7f, 59);
+ fixture.loader().applyCpuLoad(0.7f, 59);
assertTrue("Too few measurements -> No change", fixture.autoscale().isEmpty());
fixture.tester().clock().advance(Duration.ofDays(1));
- fixture.applyCpuLoad(0.7f, 120);
+ fixture.loader().applyCpuLoad(0.7f, 120);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
9, 1, 2.8, 5.0, 50.0,
fixture.autoscale());
@@ -54,14 +54,14 @@ public class AutoscalingTest {
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyCpuLoad(0.8f, 3);
+ fixture.loader().applyCpuLoad(0.8f, 3);
assertTrue("Load change is large, but insufficient measurements for new config -> No change",
fixture.autoscale().isEmpty());
- fixture.applyCpuLoad(0.19f, 100);
+ fixture.loader().applyCpuLoad(0.19f, 100);
assertEquals("Load change is small -> No change", Optional.empty(), fixture.autoscale().target());
- fixture.applyCpuLoad(0.1f, 120);
+ fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
9, 1, 1.0, 5.0, 50.0,
fixture.autoscale());
@@ -72,7 +72,7 @@ public class AutoscalingTest {
public void test_autoscaling_up_is_fast_TODO() {
var fixture = AutoscalingTester.fixture().build();
fixture.tester().clock().advance(Duration.ofDays(1)); // TODO: Remove the need for this
- fixture.applyLoad(1.0, 1.0, 1.0, 120); // TODO: Make this low
+ fixture.loader().applyLoad(1.0, 1.0, 1.0, 120); // TODO: Make this low
fixture.tester().assertResources("Scaling up since resource usage is too high",
10, 1, 9.4, 8.5, 92.6,
fixture.autoscale());
@@ -82,12 +82,12 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_single_container_group() {
var fixture = AutoscalingTester.fixture().clusterType(ClusterSpec.Type.container).build();
- fixture.applyCpuLoad(0.25f, 120);
+ fixture.loader().applyCpuLoad(0.25f, 120);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up since cpu usage is too high",
5, 1, 3.8, 8.0, 50.5,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
- fixture.applyCpuLoad(0.1f, 120);
+ fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
4, 1, 2.5, 6.4, 25.5,
fixture.autoscale());
@@ -102,11 +102,11 @@ public class AutoscalingTest {
.capacity(Capacity.from(new ClusterResources(5, 1, resources)))
.build();
- assertTrue(fixture.tester().nodeRepository().nodes().list().owner(fixture.application).stream()
- .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == slow));
+ assertTrue(fixture.tester().nodeRepository().nodes().list().owner(fixture.applicationId).stream()
+ .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == slow));
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyCpuLoad(0.25, 120);
+ fixture.loader().applyCpuLoad(0.25, 120);
// Changing min and max from slow to any
ClusterResources min = new ClusterResources( 2, 1,
@@ -135,14 +135,14 @@ public class AutoscalingTest {
.build();
// Redeployment without target: Uses current resource numbers with *requested* non-numbers (i.e disk-speed any)
- assertTrue(fixture.tester().nodeRepository().applications().get(fixture.application).get().cluster(fixture.cluster.id()).get().targetResources().isEmpty());
+ assertTrue(fixture.tester().nodeRepository().applications().get(fixture.applicationId).get().cluster(fixture.clusterSpec.id()).get().targetResources().isEmpty());
fixture.deploy();
assertEquals(DiskSpeed.any, fixture.nodes().first().get().allocation().get().requestedResources().diskSpeed());
// Autoscaling: Uses disk-speed any as well
fixture.deactivateRetired(capacity);
fixture.tester().clock().advance(Duration.ofDays(1));
- fixture.applyCpuLoad(0.8, 120);
+ fixture.loader().applyCpuLoad(0.8, 120);
assertEquals(DiskSpeed.any, fixture.autoscale(capacity).target().get().nodeResources().diskSpeed());
}
@@ -156,7 +156,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max)).build();
fixture.tester().clock().advance(Duration.ofDays(1));
- fixture.applyLoad(0.25, 0.95, 0.95, 120);
+ fixture.loader().applyLoad(0.25, 0.95, 0.95, 120);
fixture.tester().assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
fixture.autoscale());
@@ -170,7 +170,7 @@ public class AutoscalingTest {
// deploy
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.05f, 0.05f, 0.05f, 120);
+ fixture.loader().applyLoad(0.05f, 0.05f, 0.05f, 120);
fixture.tester().assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.4, 13.9,
fixture.autoscale());
@@ -186,13 +186,13 @@ public class AutoscalingTest {
.build();
NodeResources defaultResources =
- new CapacityPolicies(fixture.tester().nodeRepository()).defaultNodeResources(fixture.cluster, fixture.application, false);
+ new CapacityPolicies(fixture.tester().nodeRepository()).defaultNodeResources(fixture.clusterSpec, fixture.applicationId, false);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
fixture.nodes().toResources());
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.25, 0.95, 0.95, 120);
+ fixture.loader().applyLoad(0.25, 0.95, 0.95, 120);
fixture.tester().assertResources("Scaling up",
5, 1,
defaultResources.vcpu(), defaultResources.memoryGb(), defaultResources.diskGb(),
@@ -209,7 +209,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyCpuLoad(0.3, 240);
+ fixture.loader().applyCpuLoad(0.3, 240);
fixture.tester().assertResources("Scaling up",
6, 6, 3.8, 8.0, 10.0,
fixture.autoscale());
@@ -222,7 +222,7 @@ public class AutoscalingTest {
// deploy
fixture.tester().clock().advance(Duration.ofDays(1));
- fixture.applyCpuLoad(0.25, 120);
+ fixture.loader().applyCpuLoad(0.25, 120);
assertTrue(fixture.autoscale().isEmpty());
}
@@ -240,7 +240,7 @@ public class AutoscalingTest {
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.01, 0.01, 0.01, 120);
+ fixture.loader().applyLoad(0.01, 0.01, 0.01, 120);
Autoscaler.Advice suggestion = fixture.suggest();
fixture.tester().assertResources("Choosing the remote disk flavor as it has less disk",
2, 1, 3.0, 100.0, 10.0,
@@ -263,7 +263,7 @@ public class AutoscalingTest {
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.01, 0.01, 0.01, 120);
+ fixture.loader().applyLoad(0.01, 0.01, 0.01, 120);
Autoscaler.Advice suggestion = fixture.suggest();
fixture.tester().assertResources("Always prefers local disk for content",
2, 1, 3.0, 100.0, 75.0,
@@ -277,17 +277,17 @@ public class AutoscalingTest {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
var fixture = AutoscalingTester.fixture().capacity(Capacity.from(min, min)).build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyCpuLoad(1.0, 120);
+ fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
8, 1, 9.3, 5.7, 57.1,
- fixture.tester().suggest(fixture.application, fixture.cluster.id(), min, min));
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@Test
public void not_using_out_of_service_measurements() {
var fixture = AutoscalingTester.fixture().build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.9, 0.6, 0.7, 1, false, true, 120);
+ fixture.loader().applyLoad(0.9, 0.6, 0.7, 1, false, true, 120);
assertTrue("Not scaling up since nodes were measured while cluster was out of service",
fixture.autoscale().isEmpty());
}
@@ -296,7 +296,7 @@ public class AutoscalingTest {
public void not_using_unstable_measurements() {
var fixture = AutoscalingTester.fixture().build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(0.9, 0.6, 0.7, 1, true, false, 120);
+ fixture.loader().applyLoad(0.9, 0.6, 0.7, 1, true, false, 120);
assertTrue("Not scaling up since nodes were measured while cluster was out of service",
fixture.autoscale().isEmpty());
}
@@ -311,7 +311,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyCpuLoad(0.9, 120);
+ fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling the number of groups, but nothing requires us to stay with 1 node per group",
10, 5, 7.7, 40.0, 40.0,
fixture.autoscale());
@@ -327,9 +327,9 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- Duration timePassed = fixture.addCpuMeasurements(0.25, 120);
+ Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
- fixture.addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 1.0);
+ fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 1.0);
fixture.tester().assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper",
10, 1, 2.3, 27.8, 27.8,
fixture.autoscale());
@@ -346,9 +346,9 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- Duration timePassed = fixture.addCpuMeasurements(0.25, 120);
+ Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
- fixture.addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
+ fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
6, 1, 1.0, 50.0, 50.0,
fixture.autoscale());
@@ -364,7 +364,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(1));
- fixture.applyMemLoad(1.0, 1000);
+ fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
8, 2, 6.5, 96.2, 62.5,
fixture.autoscale());
@@ -380,7 +380,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyMemLoad(0.02, 120);
+ fixture.loader().applyMemLoad(0.02, 120);
fixture.tester().assertResources("Scaling down",
6, 1, 3.1, 4.0, 100.0,
fixture.autoscale());
@@ -389,10 +389,10 @@ public class AutoscalingTest {
@Test
public void scaling_down_only_after_delay() {
var fixture = AutoscalingTester.fixture().build();
- fixture.applyMemLoad(0.02, 120);
+ fixture.loader().applyMemLoad(0.02, 120);
assertTrue("Too soon after initial deployment", fixture.autoscale().target().isEmpty());
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyMemLoad(0.02, 120);
+ fixture.loader().applyMemLoad(0.02, 120);
fixture.tester().assertResources("Scaling down since enough time has passed",
6, 1, 1.2, 4.0, 80.0,
fixture.autoscale());
@@ -404,7 +404,7 @@ public class AutoscalingTest {
var fixture = AutoscalingTester.fixture()
.resourceCalculator(new OnlySubtractingWhenForecastingCalculator(0))
.build();
- fixture.applyLoad(1.0, 1.0, 0.7, 1000);
+ fixture.loader().applyLoad(1.0, 1.0, 0.7, 1000);
fixture.tester().assertResources("Scaling up",
9, 1, 5.0, 9.6, 72.9,
fixture.autoscale());
@@ -414,7 +414,7 @@ public class AutoscalingTest {
var fixture = AutoscalingTester.fixture()
.resourceCalculator(new OnlySubtractingWhenForecastingCalculator(3))
.build();
- fixture.applyLoad(1.0, 1.0, 0.7, 1000);
+ fixture.loader().applyLoad(1.0, 1.0, 0.7, 1000);
fixture.tester().assertResources("With 3Gb memory tax, we scale up memory more",
7, 1, 6.4, 15.8, 97.2,
fixture.autoscale());
@@ -437,7 +437,7 @@ public class AutoscalingTest {
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyMemLoad(0.9, 120);
+ fixture.loader().applyMemLoad(0.9, 120);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high.",
8, 1, 3, 80, 57.1,
fixture.autoscale());
@@ -445,9 +445,9 @@ public class AutoscalingTest {
fixture.deactivateRetired(Capacity.from(scaledResources));
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyMemLoad(0.3, 1000);
+ fixture.loader().applyMemLoad(0.3, 1000);
fixture.tester().assertResources("Scaling down since resource usage has gone down",
- 5, 1, 3, 80, 100,
+ 5, 1, 3, 80, 100.0,
fixture.autoscale());
}
@@ -459,7 +459,7 @@ public class AutoscalingTest {
.capacity(Capacity.from(min, max))
.build();
fixture.tester.clock().advance(Duration.ofDays(1));
- fixture.applyCpuLoad(0.25, 120);
+ fixture.loader().applyCpuLoad(0.25, 120);
// (no read share stored)
fixture.tester().assertResources("Advice to scale up since we set aside for bcp by default",
@@ -482,9 +482,9 @@ public class AutoscalingTest {
var fixture = AutoscalingTester.fixture().build();
fixture.tester().clock().advance(Duration.ofDays(2));
- Duration timeAdded = fixture.addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 0.0);
+ Duration timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.25, 200);
+ fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data",
9, 1, 2.1, 5, 50,
@@ -492,20 +492,20 @@ public class AutoscalingTest {
fixture.setScalingDuration(Duration.ofMinutes(5));
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100, t -> 10.0 + (t < 50 ? t : 100 - t), t -> 0.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100, t -> 10.0 + (t < 50 ? t : 100 - t), t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.25, 200);
+ fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
9, 1, 1.8, 5, 50,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofMinutes(60));
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100,
- t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)),
- t -> 0.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100,
+ t -> 10.0 + (t < 50 ? t * t * t : 125000 - (t - 49) * (t - 49) * (t - 49)),
+ t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.25, 200);
+ fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
9, 1, 2.1, 5, 50,
fixture.autoscale());
@@ -515,48 +515,48 @@ public class AutoscalingTest {
public void test_autoscaling_considers_query_vs_write_rate() {
var fixture = AutoscalingTester.fixture().build();
- fixture.addCpuMeasurements(0.4, 220);
+ fixture.loader().addCpuMeasurements(0.4, 220);
// Why twice the query rate at time = 0?
// This makes headroom for queries doubling, which we want to observe the effect of here
fixture.tester().clock().advance(Duration.ofDays(2));
- var timeAdded = fixture.addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0);
+ var timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.4, 200);
+ fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
9, 1, 2.4, 5, 50,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.4, 200);
+ fixture.loader().addCpuMeasurements(0.4, 200);
// TODO: Ackhually, we scale down here - why?
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
9, 1, 2.1, 5, 50,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.4, 200);
+ fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
9, 1, 1.1, 5, 50,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.4, 200);
+ fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> largest possible",
8, 1, 4.9, 5.7, 57.1,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
- timeAdded = fixture.addLoadMeasurements(100, t -> 0.0, t -> 10.0);
+ timeAdded = fixture.loader().addLoadMeasurements(100, t -> 0.0, t -> 10.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.addCpuMeasurements(0.4, 200);
+ fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write only -> smallest possible",
6, 1, 1.0, 8, 80,
fixture.autoscale());
@@ -568,7 +568,7 @@ public class AutoscalingTest {
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(1.0, 1.0, 1.0, 200);
+ fixture.loader().applyLoad(1.0, 1.0, 1.0, 200);
assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
fixture.autoscale().target().isEmpty());
}
@@ -589,7 +589,7 @@ public class AutoscalingTest {
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(1.0, 1.0, 1.0, 200);
+ fixture.loader().applyLoad(1.0, 1.0, 1.0, 200);
fixture.tester().assertResources("We scale even in dev because resources are required",
3, 1, 1.0, 7.7, 83.3,
fixture.autoscale());
@@ -608,7 +608,7 @@ public class AutoscalingTest {
.zone(new Zone(Environment.dev, RegionName.from("us-east")))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.applyLoad(1.0, 1.0, 1.0, 200);
+ fixture.loader().applyLoad(1.0, 1.0, 1.0, 200);
fixture.tester().assertResources("We scale even in dev because resources are required",
3, 1, 1.5, 8, 50,
fixture.autoscale());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index daab3b1d469..e21c57b3ef5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -1,22 +1,18 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.autoscale;
-import com.yahoo.collections.Pair;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.Nodelike;
import com.yahoo.vespa.hosted.provision.applications.Application;
@@ -29,13 +25,9 @@ import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import java.time.Duration;
-import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
-import java.util.Map;
import java.util.Set;
-import java.util.function.IntFunction;
-import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -50,15 +42,6 @@ class AutoscalingTester {
private final HostResourcesCalculator hostResourcesCalculator;
private final CapacityPolicies capacityPolicies;
- /** Creates an autoscaling tester with a single host type ready */
- public AutoscalingTester(NodeResources hostResources) {
- this(Environment.prod, hostResources);
- }
-
- public AutoscalingTester(Environment environment, NodeResources hostResources) {
- this(new Zone(environment, RegionName.from("us-east")), null, List.of(hostResources));
- }
-
public AutoscalingTester(Zone zone, HostResourcesCalculator resourcesCalculator, List<NodeResources> hostResources) {
this(zone, hostResources, resourcesCalculator, 20);
}
@@ -70,10 +53,6 @@ class AutoscalingTester {
provisioningTester.activateTenantHosts();
}
- public AutoscalingTester(Zone zone, List<Flavor> flavors) {
- this(zone, flavors, new MockHostResourcesCalculator(zone, 3));
- }
-
private AutoscalingTester(Zone zone, List<Flavor> flavors, HostResourcesCalculator resourcesCalculator) {
provisioningTester = new ProvisioningTester.Builder().zone(zone)
.flavors(flavors)
@@ -131,10 +110,6 @@ class AutoscalingTester {
nodeRepository().nodes().setReady(List.of(host), Agent.system, getClass().getSimpleName());
}
- public void deactivateRetired(ApplicationId application, ClusterSpec cluster, ClusterResources resources) {
- deactivateRetired(application, cluster, Capacity.from(resources));
- }
-
public void deactivateRetired(ApplicationId application, ClusterSpec cluster, Capacity capacity) {
try (Mutex lock = nodeRepository().nodes().lock(application)) {
for (Node node : nodeRepository().nodes().list(Node.State.active).owner(application)) {
@@ -145,145 +120,6 @@ class AutoscalingTester {
deploy(application, cluster, capacity);
}
- public ClusterModel clusterModel(ApplicationId applicationId, ClusterSpec clusterSpec) {
- var application = nodeRepository().applications().get(applicationId).get();
- return new ClusterModel(application,
- clusterSpec,
- application.cluster(clusterSpec.id()).get(),
- nodeRepository().nodes().list(Node.State.active).cluster(clusterSpec.id()),
- nodeRepository().metricsDb(),
- nodeRepository().clock());
- }
-
- /**
- * Adds measurements with the given resource value and ideal values for the other resources,
- * scaled to take one node redundancy into account.
- * (I.e we adjust to measure a bit lower load than "naively" wanted to offset for the autoscaler
- * wanting to see the ideal load with one node missing.)
- *
- * @param otherResourcesLoad the load factor relative to ideal to use for other resources
- * @param count the number of measurements
- * @param applicationId the application we're adding measurements for all nodes of
- */
- public Duration addCpuMeasurements(float value, float otherResourcesLoad,
- int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
- float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
- Instant initialTime = clock().instant();
- for (int i = 0; i < count; i++) {
- clock().advance(Duration.ofSeconds(150));
- for (Node node : nodes) {
- Load load = new Load(value,
- ClusterModel.idealMemoryLoad * otherResourcesLoad,
- ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
- nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
- new NodeMetricSnapshot(clock().instant(),
- load,
- 0,
- true,
- true,
- 0.0))));
- }
- }
- return Duration.between(initialTime, clock().instant());
- }
-
- /**
- * Adds measurements with the given resource value and ideal values for the other resources,
- * scaled to take one node redundancy into account.
- * (I.e we adjust to measure a bit lower load than "naively" wanted to offset for the autoscaler
- * wanting to see the ideal load with one node missing.)
- *
- * @param otherResourcesLoad the load factor relative to ideal to use for other resources
- * @param count the number of measurements
- * @param applicationId the application we're adding measurements for all nodes of
- * @return the duration added to the current time by this
- */
- public Duration addDiskMeasurements(float value, float otherResourcesLoad,
- int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
- float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
- Instant initialTime = clock().instant();
- for (int i = 0; i < count; i++) {
- clock().advance(Duration.ofSeconds(150));
- for (Node node : nodes) {
- Load load = new Load(ClusterModel.idealQueryCpuLoad * otherResourcesLoad,
- ClusterModel.idealContentDiskLoad * otherResourcesLoad,
- value).multiply(oneExtraNodeFactor);
- nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
- new NodeMetricSnapshot(clock().instant(),
- load,
- 0,
- true,
- true,
- 0.0))));
- }
- }
- return Duration.between(initialTime, clock().instant());
- }
-
- /**
- * Adds measurements with the given resource value and ideal values for the other resources,
- * scaled to take one node redundancy into account.
- * (I.e we adjust to measure a bit lower load than "naively" wanted to offset for the autoscaler
- * wanting to see the ideal load with one node missing.)
- *
- * @param otherResourcesLoad the load factor relative to ideal to use for other resources
- * @param count the number of measurements
- * @param applicationId the application we're adding measurements for all nodes of
- */
- public void addMemMeasurements(float value, float otherResourcesLoad,
- int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
- float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
- for (int i = 0; i < count; i++) {
- clock().advance(Duration.ofMinutes(1));
- for (Node node : nodes) {
- float cpu = (float) 0.2 * otherResourcesLoad * oneExtraNodeFactor;
- float memory = value * oneExtraNodeFactor;
- float disk = (float) ClusterModel.idealContentDiskLoad * otherResourcesLoad * oneExtraNodeFactor;
- Load load = new Load(0.2 * otherResourcesLoad,
- value,
- ClusterModel.idealContentDiskLoad * otherResourcesLoad).multiply(oneExtraNodeFactor);
- nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
- new NodeMetricSnapshot(clock().instant(),
- load,
- 0,
- true,
- true,
- 0.0))));
- }
- }
- }
-
- public void addMeasurements(float cpu, float memory, float disk, int count, ApplicationId applicationId) {
- addMeasurements(cpu, memory, disk, 0, true, true, count, applicationId);
- }
-
- public void addMeasurements(float cpu, float memory, float disk, int generation, boolean inService, boolean stable,
- int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
- for (int i = 0; i < count; i++) {
- clock().advance(Duration.ofMinutes(1));
- for (Node node : nodes) {
- nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
- new NodeMetricSnapshot(clock().instant(),
- new Load(cpu, memory, disk),
- generation,
- inService,
- stable,
- 0.0))));
- }
- }
- }
-
- public void storeReadShare(double currentReadShare, double maxReadShare, ApplicationId applicationId) {
- Application application = nodeRepository().applications().require(applicationId);
- application = application.with(application.status().withCurrentReadShare(currentReadShare)
- .withMaxReadShare(maxReadShare));
- nodeRepository().applications().put(application, nodeRepository().nodes().lock(applicationId));
- }
-
/** Creates a single redeployment event with bogus data except for the given duration */
public void setScalingDuration(ApplicationId applicationId, ClusterSpec.Id clusterId, Duration duration) {
Application application = nodeRepository().applications().require(applicationId);
@@ -304,47 +140,6 @@ class AutoscalingTester {
nodeRepository().applications().put(application, nodeRepository().nodes().lock(applicationId));
}
- /** Creates the given number of measurements, spaced 5 minutes between, using the given function */
- public Duration addLoadMeasurements(ApplicationId application,
- ClusterSpec.Id cluster,
- int measurements,
- IntFunction<Double> queryRate,
- IntFunction<Double> writeRate) {
- Instant initialTime = clock().instant();
- for (int i = 0; i < measurements; i++) {
- nodeMetricsDb().addClusterMetrics(application,
- Map.of(cluster, new ClusterMetricSnapshot(clock().instant(),
- queryRate.apply(i),
- writeRate.apply(i))));
- clock().advance(Duration.ofMinutes(5));
- }
- return Duration.between(initialTime, clock().instant());
- }
-
- /** Creates the given number of measurements, spaced 5 minutes between, using the given function */
- public Duration addQueryRateMeasurements(ApplicationId application,
- ClusterSpec.Id cluster,
- int measurements,
- IntFunction<Double> queryRate) {
- return addQueryRateMeasurements(application, cluster, measurements, Duration.ofMinutes(5), queryRate);
- }
-
- public Duration addQueryRateMeasurements(ApplicationId application,
- ClusterSpec.Id cluster,
- int measurements,
- Duration samplingInterval,
- IntFunction<Double> queryRate) {
- Instant initialTime = clock().instant();
- for (int i = 0; i < measurements; i++) {
- nodeMetricsDb().addClusterMetrics(application,
- Map.of(cluster, new ClusterMetricSnapshot(clock().instant(),
- queryRate.apply(i),
- 0.0)));
- clock().advance(samplingInterval);
- }
- return Duration.between(initialTime, clock().instant());
- }
-
public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster.isExclusive()));
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 56ffc4ef993..88ddb0ad9f8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.autoscale;
+import com.yahoo.collections.Pair;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.Cloud;
@@ -10,14 +11,17 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
import java.time.Duration;
+import java.time.Instant;
import java.util.Arrays;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.function.IntFunction;
import java.util.stream.Collectors;
@@ -30,38 +34,51 @@ import java.util.stream.Collectors;
public class Fixture {
final AutoscalingTester tester;
- final ApplicationId application;
- final ClusterSpec cluster;
+ final ApplicationId applicationId;
+ final ClusterSpec clusterSpec;
final Capacity capacity;
+ final Loader loader;
public Fixture(Fixture.Builder builder, Optional<ClusterResources> initialResources) {
- application = builder.application;
- cluster = builder.cluster;
+ applicationId = builder.application;
+ clusterSpec = builder.cluster;
capacity = builder.capacity;
tester = new AutoscalingTester(builder.zone, builder.resourceCalculator, builder.hostResources);
var deployCapacity = initialResources.isPresent() ? Capacity.from(initialResources.get()) : capacity;
tester.deploy(builder.application, builder.cluster, deployCapacity);
+ this.loader = new Loader(this);
}
public AutoscalingTester tester() { return tester; }
- public ApplicationId applicationId() { return application; }
+ public ApplicationId applicationId() { return applicationId; }
- public ClusterSpec.Id clusterId() { return cluster.id(); }
+ public ClusterSpec.Id clusterId() { return clusterSpec.id(); }
public Application application() {
- return tester().nodeRepository().applications().get(application).orElse(Application.empty(application));
+ return tester().nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId));
}
public Cluster cluster() {
return application().cluster(clusterId()).get();
}
+ public ClusterModel clusterModel() {
+ return new ClusterModel(application(),
+ clusterSpec,
+ cluster(),
+ nodes(),
+ tester.nodeRepository().metricsDb(),
+ tester.nodeRepository().clock());
+ }
+
/** Returns the nodes allocated to the fixture application cluster */
public NodeList nodes() {
- return tester().nodeRepository().nodes().list().owner(application).cluster(cluster.id());
+ return tester().nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterSpec.id());
}
+ public Loader loader() { return loader; }
+
/** Autoscale within the deployed capacity of this. */
public Autoscaler.Advice autoscale() {
return autoscale(capacity);
@@ -69,12 +86,12 @@ public class Fixture {
/** Autoscale within the given capacity. */
public Autoscaler.Advice autoscale(Capacity capacity) {
- return tester().autoscale(application, cluster, capacity);
+ return tester().autoscale(applicationId, clusterSpec, capacity);
}
/** Compute an autoscaling suggestion for this. */
public Autoscaler.Advice suggest() {
- return tester().suggest(application, cluster.id(), capacity.minResources(), capacity.maxResources());
+ return tester().suggest(applicationId, clusterSpec.id(), capacity.minResources(), capacity.maxResources());
}
/** Redeploy with the deployed capacity of this. */
@@ -84,62 +101,22 @@ public class Fixture {
/** Redeploy with the given capacity. */
public void deploy(Capacity capacity) {
- tester().deploy(application, cluster, capacity);
+ tester().deploy(applicationId, clusterSpec, capacity);
}
public void deactivateRetired(Capacity capacity) {
- tester().deactivateRetired(application, cluster, capacity);
+ tester().deactivateRetired(applicationId, clusterSpec, capacity);
}
public void setScalingDuration(Duration duration) {
- tester().setScalingDuration(application, cluster.id(), duration);
- }
-
- public Duration addCpuMeasurements(double cpuLoad, int measurements) {
- return tester().addCpuMeasurements((float)cpuLoad, 1.0f, measurements, application);
- }
-
- public Duration addLoadMeasurements(int measurements, IntFunction<Double> queryRate, IntFunction<Double> writeRate) {
- return tester().addLoadMeasurements(application, cluster.id(), measurements, queryRate, writeRate);
- }
-
- public void applyCpuLoad(double cpuLoad, int measurements) {
- Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
- tester().addCpuMeasurements((float)cpuLoad, 1.0f, measurements, application);
- tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
- tester().addQueryRateMeasurements(application, cluster.id(), measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- }
-
- public void applyMemLoad(double memLoad, int measurements) {
- Duration samplingInterval = Duration.ofSeconds(150L); // in addMemMeasurements
- tester().addMemMeasurements((float)memLoad, 1.0f, measurements, application);
- tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
- tester().addQueryRateMeasurements(application, cluster.id(), measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- }
-
- public void applyDiskLoad(double diskLoad, int measurements) {
- Duration samplingInterval = Duration.ofSeconds(150L); // in addDiskMeasurements
- tester().addDiskMeasurements((float)diskLoad, 1.0f, measurements, application);
- tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
- tester().addQueryRateMeasurements(application, cluster.id(), measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- }
-
- public void applyLoad(double cpuLoad, double memoryLoad, double diskLoad, int measurements) {
- Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
- tester().addMeasurements((float)cpuLoad, (float)memoryLoad, (float)diskLoad, measurements, application);
- tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
- tester().addQueryRateMeasurements(application, cluster.id(), measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- }
-
- public void applyLoad(double cpuLoad, double memoryLoad, double diskLoad, int generation, boolean inService, boolean stable, int measurements) {
- Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
- tester().addMeasurements((float)cpuLoad, (float)memoryLoad, (float)diskLoad, generation, inService, stable, measurements, application);
- tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
- tester().addQueryRateMeasurements(application, cluster.id(), measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ tester().setScalingDuration(applicationId, clusterSpec.id(), duration);
}
public void storeReadShare(double currentReadShare, double maxReadShare) {
- tester().storeReadShare(currentReadShare, maxReadShare, application);
+ var application = application();
+ application = application.with(application.status().withCurrentReadShare(currentReadShare)
+ .withMaxReadShare(maxReadShare));
+ tester.nodeRepository().applications().put(application, tester.nodeRepository().nodes().lock(applicationId));
}
public static class Builder {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Loader.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Loader.java
new file mode 100644
index 00000000000..db4fe917b53
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Loader.java
@@ -0,0 +1,158 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.autoscale;
+
+import com.yahoo.collections.Pair;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.List;
+import java.util.Map;
+import java.util.function.IntFunction;
+
+/**
+ * A helper for applying load to an application represented by a fixture,
+ *
+ * @author bratseth
+ */
+public class Loader {
+
+ private final Fixture fixture;
+
+ public Loader(Fixture fixture) {
+ this.fixture = fixture;
+ }
+
+ /**
+ * Adds measurements with the given resource value and ideal values for the other resources,
+ * scaled to take one node redundancy into account.
+ * (I.e we adjust to measure a bit lower load than "naively" wanted to offset for the autoscaler
+ * wanting to see the ideal load with one node missing.)
+ *
+ * @param count the number of measurements
+ */
+ public Duration addCpuMeasurements(double value, int count) {
+ NodeList nodes = fixture.nodes();
+ float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
+ Instant initialTime = fixture.tester().clock().instant();
+ for (int i = 0; i < count; i++) {
+ fixture.tester().clock().advance(Duration.ofSeconds(150));
+ for (Node node : nodes) {
+ Load load = new Load(value,
+ ClusterModel.idealMemoryLoad,
+ ClusterModel.idealContentDiskLoad).multiply(oneExtraNodeFactor);
+ fixture.tester().nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
+ new NodeMetricSnapshot(fixture.tester().clock().instant(),
+ load,
+ 0,
+ true,
+ true,
+ 0.0))));
+ }
+ }
+ return Duration.between(initialTime, fixture.tester().clock().instant());
+ }
+
+ /** Creates the given number of measurements, spaced 5 minutes between, using the given function */
+ public Duration addLoadMeasurements(int measurements, IntFunction<Double> queryRate, IntFunction<Double> writeRate) {
+ Instant initialTime = fixture.tester().clock().instant();
+ for (int i = 0; i < measurements; i++) {
+ fixture.tester().nodeMetricsDb().addClusterMetrics(fixture.applicationId(),
+ Map.of(fixture.clusterId(), new ClusterMetricSnapshot(fixture.tester().clock().instant(),
+ queryRate.apply(i),
+ writeRate.apply(i))));
+ fixture.tester().clock().advance(Duration.ofMinutes(5));
+ }
+ return Duration.between(initialTime, fixture.tester().clock().instant());
+ }
+
+ public void applyCpuLoad(double cpuLoad, int measurements) {
+ Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
+ addCpuMeasurements((float)cpuLoad, measurements);
+ fixture.tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
+ addQueryRateMeasurements(measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ }
+
+ public void applyMemLoad(double memLoad, int measurements) {
+ Duration samplingInterval = Duration.ofSeconds(150L); // in addMemMeasurements
+ addMemMeasurements(memLoad, measurements);
+ fixture.tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
+ addQueryRateMeasurements(measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ }
+
+ /**
+ * Adds measurements with the given resource value and ideal values for the other resources,
+ * scaled to take one node redundancy into account.
+ * (I.e we adjust to measure a bit lower load than "naively" wanted to offset for the autoscaler
+ * wanting to see the ideal load with one node missing.)
+ */
+ public void addMemMeasurements(double value, int count) {
+ NodeList nodes = fixture.nodes();
+ float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
+ for (int i = 0; i < count; i++) {
+ fixture.tester().clock().advance(Duration.ofMinutes(1));
+ for (Node node : nodes) {
+ Load load = new Load(0.2,
+ value,
+ ClusterModel.idealContentDiskLoad).multiply(oneExtraNodeFactor);
+ fixture.tester().nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
+ new NodeMetricSnapshot(fixture.tester().clock().instant(),
+ load,
+ 0,
+ true,
+ true,
+ 0.0))));
+ }
+ }
+ }
+
+ public Duration addMeasurements(double cpu, double memory, double disk, int count) {
+ return addMeasurements(cpu, memory, disk, 0, true, true, count);
+ }
+
+ public Duration addMeasurements(double cpu, double memory, double disk, int generation, boolean inService, boolean stable,
+ int count) {
+ Instant initialTime = fixture.tester().clock().instant();
+ for (int i = 0; i < count; i++) {
+ fixture.tester().clock().advance(Duration.ofMinutes(1));
+ for (Node node : fixture.nodes()) {
+ fixture.tester().nodeMetricsDb().addNodeMetrics(List.of(new Pair<>(node.hostname(),
+ new NodeMetricSnapshot(fixture.tester().clock().instant(),
+ new Load(cpu, memory, disk),
+ generation,
+ inService,
+ stable,
+ 0.0))));
+ }
+ }
+ return Duration.between(initialTime, fixture.tester().clock().instant());
+ }
+
+ public void applyLoad(double cpuLoad, double memoryLoad, double diskLoad, int measurements) {
+ Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
+ addMeasurements(cpuLoad, memoryLoad, diskLoad, measurements);
+ fixture.tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
+ addQueryRateMeasurements(measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ }
+
+ public void applyLoad(double cpuLoad, double memoryLoad, double diskLoad, int generation, boolean inService, boolean stable, int measurements) {
+ Duration samplingInterval = Duration.ofSeconds(150L); // in addCpuMeasurements
+ addMeasurements(cpuLoad, memoryLoad, diskLoad, generation, inService, stable, measurements);
+ fixture.tester().clock().advance(samplingInterval.negated().multipliedBy(measurements));
+ addQueryRateMeasurements(measurements, samplingInterval, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ }
+
+ public Duration addQueryRateMeasurements(int measurements, Duration samplingInterval, IntFunction<Double> queryRate) {
+ Instant initialTime = fixture.tester().clock().instant();
+ for (int i = 0; i < measurements; i++) {
+ fixture.tester().nodeMetricsDb().addClusterMetrics(fixture.applicationId(),
+ Map.of(fixture.clusterId(), new ClusterMetricSnapshot(fixture.tester().clock().instant(),
+ queryRate.apply(i),
+ 0.0)));
+ fixture.tester().clock().advance(samplingInterval);
+ }
+ return Duration.between(initialTime, fixture.tester().clock().instant());
+ }
+
+}