summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorjonmv <venstad@gmail.com>2023-05-30 12:00:20 +0200
committerjonmv <venstad@gmail.com>2023-05-30 12:00:20 +0200
commitf20e878c588dcc45a0d3b9d1abeb9f1e43a01201 (patch)
treed7c1b064016baf9c149e62c1cbe65fda346514d9 /node-repository
parent9d59de7b07d5476ed587df0c361406fb3ce4b368 (diff)
Reuse empty exclusive hosts when nodes fit exactly on the host
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java52
2 files changed, 57 insertions, 9 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 2334ff48aac..265040823b2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -38,8 +38,6 @@ public class NodePrioritizer {
private final NameResolver nameResolver;
private final Nodes nodes;
private final boolean dynamicProvisioning;
- /** Whether node specification allows new nodes to be allocated. */
- private final boolean canAllocateNew;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
@@ -81,9 +79,6 @@ public class NodePrioritizer {
// NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
// In non-dynamically provisioned zones, we only allow allocating to spare hosts to replace failed nodes.
this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
- // Do not allocate new nodes for exclusive deployments in dynamically provisioned zones: provision new host instead.
- this.canAllocateNew = requestedNodes instanceof NodeSpec.CountNodeSpec
- && (!dynamicProvisioning || !requestedNodes.isExclusive());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
@@ -136,15 +131,14 @@ public class NodePrioritizer {
/** Add a node on each host with enough capacity for the requested flavor */
private void addCandidatesOnExistingHosts() {
- if ( !canAllocateNew) return;
+ if (requestedNodes.resources().isEmpty()) return;
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
if (nodes.suspended(host)) continue; // Hosts that are suspended may be down for some time, e.g. for OS upgrade
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
- // TODO jonmv: allow reusing exclusive hosts after all, if the host matches what we'd provision for that node anyway.
- if (host.exclusiveToApplicationId().isPresent()) continue; // Never allocate new nodes to exclusive hosts
+ if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
@@ -160,6 +154,10 @@ public class NodePrioritizer {
}
}
+ private boolean fitsPerfectly(Node host) {
+ return requestedNodes.resources().get().compatibleWith(host.resources());
+ }
+
/** Add existing nodes allocated to the application */
private void addApplicationNodes() {
EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index 60ac30293a3..d4a911fa1ab 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -21,6 +21,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -31,6 +32,7 @@ import org.junit.Test;
import java.time.Instant;
import java.util.Collection;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
@@ -85,7 +87,6 @@ public class DynamicProvisioningTest {
assertEquals(20, tester.nodeRepository().nodes().list().size());
assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
- assert false: "TODO: test with TTL";
}
@Test
@@ -117,6 +118,55 @@ public class DynamicProvisioningTest {
}
@Test
+ public void empty_exclusive_to_hosts_reused_iff_new_allocation_fits_perfectly() {
+ var tester = tester(true);
+
+ NodeResources highResources = new NodeResources(4, 80, 100, 1);
+ NodeResources lowResources = new NodeResources(2, 20, 50, 1);
+
+ ApplicationId application = ProvisioningTester.applicationId();
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+
+ // Total of 4 nodes should now be in node-repo, 2 active hosts and 2 active nodes.
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+
+ // Redeploying the application causes no changes at all.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+
+ // Deploying with a smaller node flavour causes new, smaller hosts to be provisioned.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, lowResources, tester);
+
+ // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes, of which 2 are retired.
+ NodeList nodes = tester.nodeRepository().nodes().list();
+ assertEquals(8, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(2, nodes.retired().size());
+
+ // Remove the child nodes, and redeploy with the original flavour. This should reuse the existing hosts.
+ tester.nodeRepository().database().writeTo(State.deprovisioned, nodes.retired().asList(), Agent.operator, Optional.empty());
+ tester.nodeRepository().nodes().list().state(State.deprovisioned).forEach(tester.nodeRepository().nodes()::forget);
+
+ // Total of 6 nodes should now be in node-repo, 4 active hosts and 2 active nodes.
+ nodes = tester.nodeRepository().nodes().list();
+ assertEquals(6, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(2, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ assertEquals(0, nodes.retired().size());
+
+ // Deploy again with high resources.
+ prepareAndActivate(application, clusterSpec("mycluster", true), 2, 1, highResources, tester);
+ // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes.
+ nodes = tester.nodeRepository().nodes().list();
+ assertEquals(8, nodes.size());
+ assertEquals(4, nodes.nodeType(NodeType.host).state(Node.State.active).size());
+ assertEquals(4, nodes.nodeType(NodeType.tenant).state(Node.State.active).size());
+ }
+
+ @Test
public void avoids_allocating_to_empty_hosts() {
var tester = tester(false);
tester.makeReadyHosts(6, new NodeResources(12, 12, 200, 12));