aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository/src
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@vespa.ai>2023-07-07 13:25:29 +0200
committerJon Bratseth <bratseth@vespa.ai>2023-07-07 13:25:29 +0200
commiteb73976f20d1c1b8e0c1e04fa7397a69b44b7e69 (patch)
tree22d563f4c0de4c1b0d00eb560b81fa8a3f7beff5 /node-repository/src
parent348909c64f5968f31c32530f2aa517d9deda65aa (diff)
Don't pick resources which are against recommendations
Diffstat (limited to 'node-repository/src')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java88
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java40
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java8
9 files changed, 127 insertions, 67 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
index a2ef76e84d0..40d1d50e0e8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
@@ -195,6 +195,7 @@ public class AllocatableClusterResources {
else { // Return the cheapest flavor satisfying the requested resources, if any
NodeResources cappedWantedResources = applicationLimits.cap(wantedResources.nodeResources());
Optional<AllocatableClusterResources> best = Optional.empty();
+ Optional<AllocatableClusterResources> bestDisregardingDiskLimit = Optional.empty();
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
@@ -202,7 +203,9 @@ public class AllocatableClusterResources {
// Adjust where we don't need exact match to the flavor
if (flavor.resources().storageType() == NodeResources.StorageType.remote) {
- double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive).diskGb();
+ double diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, true).diskGb();
+ if (diskGb > applicationLimits.max().nodeResources().diskGb() || diskGb < applicationLimits.min().nodeResources().diskGb()) // TODO: Remove when disk limit is enforced
+ diskGb = systemLimits.enlargeToLegal(cappedWantedResources, applicationId, clusterSpec, exclusive, false).diskGb();
advertisedResources = advertisedResources.withDiskGb(diskGb);
realResources = realResources.withDiskGb(diskGb);
}
@@ -213,14 +216,24 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
+
var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
advertisedResources,
wantedResources,
clusterSpec);
+
+ if ( ! systemLimits.isWithinAdvertisedDiskLimits(advertisedResources, clusterSpec)) { // TODO: Remove when disk limit is enforced
+ if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get())) {
+ bestDisregardingDiskLimit = Optional.of(candidate);
+ }
+ continue;
+ }
if (best.isEmpty() || candidate.preferableTo(best.get())) {
best = Optional.of(candidate);
}
}
+ if (best.isEmpty())
+ best = bestDisregardingDiskLimit;
return best;
}
}
@@ -234,7 +247,7 @@ public class AllocatableClusterResources {
boolean bestCase) {
var systemLimits = new NodeResourceLimits(nodeRepository);
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive, bestCase);
- advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive); // Ask for something legal
+ advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase); // What we'll really get
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index b56e8d1b247..2287b768dee 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.Optional;
@@ -63,9 +64,8 @@ public class AllocationOptimizer {
availableRealHostResources,
nodeRepository);
if (allocatableResources.isEmpty()) continue;
- if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get())) {
+ if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
bestAllocation = allocatableResources;
- }
}
}
return bestAllocation;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 61975ca0dc1..8c5a7b6c61e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -38,15 +38,10 @@ public class NodeResourceLimits {
illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, cluster.isExclusive()));
}
- // TODO: Move this check into the above when we are ready to fail, not just warn on this. */
+ // TODO: Remove this when we are ready to fail, not just warn on this. */
public boolean isWithinAdvertisedDiskLimits(NodeResources requested, ClusterSpec cluster) {
if (requested.diskGbIsUnspecified() || requested.memoryGbIsUnspecified()) return true;
- double minDiskGb = requested.memoryGb() * switch (cluster.type()) {
- case combined, content -> 3;
- case container -> 2;
- default -> 0; // No constraint on other types
- };
- return requested.diskGb() >= minDiskGb;
+ return requested.diskGb() >= minAdvertisedDiskGb(requested, cluster);
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
@@ -66,9 +61,12 @@ public class NodeResourceLimits {
return true;
}
- public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive) {
+ public NodeResources enlargeToLegal(NodeResources requested, ApplicationId applicationId, ClusterSpec cluster, boolean exclusive, boolean followRecommendations) {
if (requested.isUnspecified()) return requested;
+ if (followRecommendations) // TODO: Do unconditionally when we enforce this limit
+ requested = requested.withDiskGb(Math.max(minAdvertisedDiskGb(requested, cluster), requested.diskGb()));
+
return requested.withVcpu(Math.max(minAdvertisedVcpu(applicationId, cluster), requested.vcpu()))
.withMemoryGb(Math.max(minAdvertisedMemoryGb(cluster), requested.memoryGb()))
.withDiskGb(Math.max(minAdvertisedDiskGb(requested, exclusive), requested.diskGb()));
@@ -92,6 +90,15 @@ public class NodeResourceLimits {
return minRealDiskGb() + reservedDiskSpaceGb(requested.storageType(), exclusive);
}
+ // TODO: Move this check into the above when we are ready to fail, not just warn on this. */
+ private double minAdvertisedDiskGb(NodeResources requested, ClusterSpec cluster) {
+ return requested.memoryGb() * switch (cluster.type()) {
+ case combined, content -> 3;
+ case container -> 2;
+ default -> 0; // No constraint on other types
+ };
+ }
+
// Note: Assumes node type 'host'
private long reservedDiskSpaceGb(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && ! zone().cloud().allowHostSharing())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index b6c7324c75c..42b9e53dd8a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.yolean.Exceptions;
import java.util.ArrayList;
import java.util.List;
@@ -39,9 +40,10 @@ class Preparer {
return nodes;
}
catch (NodeAllocationException e) {
+ e.printStackTrace();
throw new NodeAllocationException("Could not satisfy " + requestedNodes +
( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
- " in " + application + " " + cluster + ": " + e.getMessage(),
+ " in " + application + " " + cluster + ": " + Exceptions.toMessageString(e),
e.retryable());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index f49537c3ec8..bd31c7578b9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -57,11 +57,15 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_single_content_group() {
- var fixture = DynamicProvisioningTester.fixture().awsProdSetup(true).build();
+ var now = new ClusterResources(5, 1, new NodeResources(2, 16, 750, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .build();
fixture.loader().applyCpuLoad(0.7f, 10);
var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high",
- 8, 1, 4.0, 9.3, 36.2,
+ 9, 1, 3.6, 8.5, 360.9,
fixture.autoscale());
fixture.deploy(Capacity.from(scaledResources));
@@ -83,7 +87,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 7, 1, 1.1, 8.7, 25.4,
+ 8, 1, 1.0, 8.3, 338.4,
fixture.autoscale());
}
@@ -210,7 +214,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 9, 1, 4, 16.0, 25.5,
+ 9, 1, 4, 16.0, 150,
fixture.autoscale());
}
@@ -227,9 +231,9 @@ public class AutoscalingTest {
@Test
public void test_autoscaling_without_traffic_exclusive() {
- var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 10, 0.3));
- var now = new ClusterResources(4, 1, new NodeResources(8, 16, 10, 0.3));
- var max = new ClusterResources(4, 1, new NodeResources(16, 32, 50, 0.3));
+ var min = new ClusterResources(1, 1, new NodeResources(0.5, 4, 100, 0.3));
+ var now = new ClusterResources(4, 1, new NodeResources(8, 16, 100, 0.3));
+ var max = new ClusterResources(4, 1, new NodeResources(16, 32, 500, 0.3));
var fixture = DynamicProvisioningTester.fixture(min, now, max)
.clusterType(ClusterSpec.Type.container)
.awsProdSetup(false)
@@ -238,7 +242,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(duration.negated());
fixture.loader().zeroTraffic(20, 1);
fixture.tester().assertResources("Scaled down",
- 2, 1, 2, 16, 10,
+ 2, 1, 2, 16, 100,
fixture.autoscale());
}
@@ -256,7 +260,7 @@ public class AutoscalingTest {
fixture.completeLastScaling();
fixture.loader().applyCpuLoad(0.1f, 120);
fixture.tester().assertResources("Scaling down since cpu usage has gone down",
- 3, 1, 2, 16, 27.2,
+ 3, 1, 2, 16, 75.0,
fixture.autoscale());
}
@@ -283,7 +287,7 @@ public class AutoscalingTest {
new NodeResources(100, 1000, 1000, 1, DiskSpeed.any));
var capacity = Capacity.from(min, max);
ClusterResources scaledResources = fixture.tester().assertResources("Scaling up",
- 13, 1, 1.5, 29.1, 26.7,
+ 13, 1, 1.5, 29.1, 87.3,
fixture.autoscale(capacity));
assertEquals("Disk speed from new capacity is used",
DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
@@ -383,15 +387,15 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 6, 6, 5.0, 7.4, 10.0,
+ 6, 6, 5.0, 7.4, 22.3,
fixture.autoscale());
}
@Test
public void autoscaling_respects_group_size_limit() {
- var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 10, 1));
- var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
+ var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
+ var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
@@ -400,7 +404,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.4, 240);
fixture.tester().assertResources("Scaling cpu up",
- 8, 4, 4.6, 4.2, 10.0,
+ 12, 6, 2.8, 4.2, 27.5,
fixture.autoscale());
}
@@ -456,7 +460,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 13, 1, 4, 8, 13.6,
+ 13, 1, 4, 8, 100.0,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -491,9 +495,49 @@ public class AutoscalingTest {
}
@Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1));
+ var now = new ClusterResources(10, 1, new NodeResources(5, 50, 150, 1));
+ var max = new ClusterResources(10, 1, new NodeResources(10, 100, 200, 1));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(true)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 11, 1, 13.0, 60.0, 179.9,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 10.0, 66.2, 198.6,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
+ public void autoscaling_shouldnt_choose_too_small_disk_compared_to_memory_exclusive() {
+ var min = new ClusterResources(10, 1, new NodeResources(1, 10, 19, 1, DiskSpeed.any, StorageType.remote));
+ var now = new ClusterResources(10, 1, new NodeResources(16, 64, 192, 1, DiskSpeed.any, StorageType.remote));
+ var max = new ClusterResources(10, 1, new NodeResources(30, 200, 500, 1, DiskSpeed.any, StorageType.remote));
+ var fixture = DynamicProvisioningTester.fixture()
+ .awsProdSetup(false)
+ .initialResources(Optional.of(now))
+ .capacity(Capacity.from(min, max))
+ .build();
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.loader().applyLoad(new Load(0.5, 0.8, 0.1), 120);
+ fixture.tester().assertResources("Suggesting resources where disk is 3x memory (this is a content cluster)",
+ 13, 1, 36.0, 72.0, 900.0,
+ fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
+ fixture.tester().assertResources("Autoscaling to resources where disk is 3x memory (this is a content cluster)",
+ 10, 1, 16.0, 64, 247.5,
+ fixture.tester().autoscale(fixture.applicationId, fixture.clusterSpec, Capacity.from(min, max)));
+ }
+
+ @Test
public void test_autoscaling_group_size_unconstrained() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
- var now = new ClusterResources(5, 5, new NodeResources(3, 100, 100, 1));
+ var now = new ClusterResources(5, 5, new NodeResources(3, 100, 300, 1));
var max = new ClusterResources(20, 20, new NodeResources(10, 1000, 1000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
@@ -503,7 +547,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 10, 5, 7.7, 41.5, 38.5,
+ 10, 5, 7.7, 41.5, 124.6,
fixture.autoscale());
}
@@ -520,7 +564,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.9, 120);
fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time",
- 7, 7, 9.4, 78.6, 77.0,
+ 7, 7, 9.4, 78.6, 235.8,
fixture.autoscale());
}
@@ -539,7 +583,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 7, 1, 3.2, 43.3, 40.1,
+ 7, 1, 3.2, 43.3, 129.8,
fixture.autoscale());
}
@@ -559,7 +603,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
- 5, 1, 1.0, 62.6, 60.1,
+ 5, 1, 1.0, 62.6, 187.7,
fixture.autoscale());
}
@@ -576,7 +620,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(1));
fixture.loader().applyMemLoad(1.0, 1000);
fixture.tester().assertResources("Increase group size to reduce memory load",
- 8, 2, 13.9, 96.3, 60.1,
+ 8, 2, 13.9, 96.3, 288.8,
fixture.autoscale());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 1b677224295..8aaf0eb20e7 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -75,7 +75,7 @@ public class ScalingSuggestionsMaintainerTest {
assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 11.8 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 14.2 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index df0654de8d5..c99728b714b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.Architecture;
@@ -316,13 +317,6 @@ public class DynamicProvisioningTest {
tester.assertNodes("Allocation specifies memory in the advertised amount",
2, 1, 2, 20, 40,
app1, cluster1);
-
- // Redeploy the same
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40),
- resources(4, 1, 2, 20, 40)));
- tester.assertNodes("Allocation specifies memory in the advertised amount",
- 2, 1, 2, 20, 40,
- app1, cluster1);
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 477101e10e2..a76b576e430 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -498,7 +498,7 @@ public class ProvisioningTest {
@Test
public void test_changing_limits() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.flavors(List.of(hostFlavor))
.build();
@@ -508,52 +508,52 @@ public class ProvisioningTest {
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(8, 4, 4, 20, 40)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(8, 4, 4, 20, 400)));
tester.assertNodes("Initial allocation at min",
- 4, 2, 2, 10, 20,
+ 4, 2, 2, 10, 200,
app1, cluster1);
// Move window above current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 40),
- resources(10, 5, 5, 25, 50)));
+ tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 4, 21, 400),
+ resources(10, 5, 5, 25, 500)));
tester.assertNodes("New allocation at new min",
- 8, 4, 4, 21, 40,
+ 8, 4, 4, 21, 400,
app1, cluster1);
// Move window below current allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20),
- resources(6, 3, 3, 15, 25)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200),
+ resources(6, 3, 3, 15, 250)));
tester.assertNodes("Allocation preserving resources within new limits",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Widening window does not change allocation
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 15),
- resources(8, 4, 4, 21, 30)));
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 150),
+ resources(8, 4, 4, 21, 300)));
tester.assertNodes("Same allocation",
- 6, 2, 3, 14.57, 25,
+ 6, 2, 3, 14.57, 250,
app1, cluster1);
// Changing limits in opposite directions cause a mixture of min and max
- tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 10),
- resources(4, 2, 14, 40, 13)));
+ tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 10, 30, 100),
+ resources(4, 2, 14, 40, 130)));
tester.assertNodes("A mix of min and max",
- 4, 1, 10, 30, 13,
+ 4, 1, 10, 30, 130,
app1, cluster1);
// Changing group size
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 10),
- resources(9, 3, 12, 35, 15)));
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 8, 25, 100),
+ resources(9, 3, 12, 35, 150)));
tester.assertNodes("Groups changed",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
// Stop specifying node resources
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(6, 3, NodeResources.unspecified()),
new ClusterResources(9, 3, NodeResources.unspecified())));
tester.assertNodes("No change",
- 9, 3, 8, 30, 13,
+ 9, 3, 8, 30, 130,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 62f42b0d035..a6a988052e6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -513,18 +513,18 @@ public class VirtualNodeProvisioningTest {
2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources( 5, 6, 11, 1);
- var newMaxResources = new NodeResources(20, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 6, 18, 1);
+ var newMaxResources = new NodeResources(20, 10, 90, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total (redundancy adjusted) resources",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 5, 6.0, 11, 1.0,
+ 7, 1, 5, 6.0, 18, 1.0,
app1, cluster1);
}