aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository/src
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@vespa.ai>2023-07-12 09:06:04 +0200
committerJon Bratseth <bratseth@vespa.ai>2023-07-12 09:06:04 +0200
commit746370b7ee46d6c11d78e81435b475dc415c4552 (patch)
treea7b9f122deb7d1b0efcd9fcbf667e6b493f4a47f /node-repository/src
parent54c45551d4deeb423e254e0f721806fd6b7f89c5 (diff)
Remove unnecessary tracking of surplus nodes
Diffstat (limited to 'node-repository/src')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java20
4 files changed, 12 insertions, 51 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 331759127e4..8213286639c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -273,7 +273,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, clusterSpec, nodeSpec,
true, nodeRepository().nameResolver(), nodeRepository().nodes(), nodeRepository().resourcesCalculator(),
nodeRepository().spareCount(), nodeSpec.cloudAccount().isExclave(nodeRepository().zone()));
- List<NodeCandidate> nodeCandidates = prioritizer.collect(List.of());
+ List<NodeCandidate> nodeCandidates = prioritizer.collect();
MutableInteger index = new MutableInteger(0);
return nodeCandidates
.stream()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 8600be62aa7..283ce5f88c2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -46,33 +46,28 @@ public class GroupPreparer {
* @param application the application we are allocating to
* @param cluster the cluster and group we are allocating to
* @param requestedNodes a specification of the requested nodes
- * @param surplusActiveNodes currently active nodes which are available to be assigned to this group.
- * This method will remove from this list if it finds it needs additional nodes
* @param allNodes list of all nodes and hosts
* @return the list of nodes this cluster group will have allocated if activated
*/
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, LockedNodeList allNodes) {
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, LockedNodeList allNodes) {
log.log(Level.FINE, () -> "Preparing " + cluster.type().name() + " " + cluster.id() + " with requested resources " +
requestedNodes.resources().orElse(NodeResources.unspecified()));
// Try preparing in memory without global unallocated lock. Most of the time there should be no changes,
// and we can return nodes previously allocated.
NodeIndices indices = new NodeIndices(cluster.id(), allNodes);
- NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::probeNext, allNodes);
+ NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, indices::probeNext, allNodes);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
- surplusActiveNodes.removeAll(acceptedNodes);
indices.commitProbe();
return acceptedNodes;
} else {
// There were some changes, so re-do the allocation with locks
indices.resetProbe();
- return prepareWithLocks(application, cluster, requestedNodes, surplusActiveNodes, indices);
+ return prepareWithLocks(application, cluster, requestedNodes, indices);
}
}
@@ -80,13 +75,11 @@ public class GroupPreparer {
LockedNodeList createUnlockedNodeList() { return nodeRepository.nodes().list(PROBE_LOCK); }
/// Note that this will write to the node repo.
- private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, NodeIndices indices) {
+ private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, NodeIndices indices) {
try (Mutex lock = nodeRepository.applications().lock(application);
Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList allNodes = nodeRepository.nodes().list(allocationLock);
- NodeAllocation allocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::next, allNodes);
+ NodeAllocation allocation = prepareAllocation(application, cluster, requestedNodes, indices::next, allNodes);
NodeType hostType = allocation.nodeType().hostType();
if (canProvisionDynamically(hostType) && allocation.hostDeficit().isPresent()) {
HostSharing sharing = hostSharing(cluster, hostType);
@@ -128,7 +121,7 @@ public class GroupPreparer {
// Non-dynamically provisioned zone with a deficit because we just now retired some nodes.
// Try again, but without retiring
indices.resetProbe();
- List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), surplusActiveNodes, indices);
+ List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), indices);
log.warning("Prepared " + application + " " + cluster.id() + " without retirement due to lack of capacity");
return accepted;
}
@@ -140,14 +133,12 @@ public class GroupPreparer {
List<Node> acceptedNodes = allocation.finalNodes();
nodeRepository.nodes().reserve(allocation.reservableNodes());
nodeRepository.nodes().addReservedNodes(new LockedNodeList(allocation.newNodes(), allocationLock));
- surplusActiveNodes.removeAll(acceptedNodes);
return acceptedNodes;
}
}
private NodeAllocation prepareAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, Supplier<Integer> nextIndex,
- LockedNodeList allNodes) {
+ Supplier<Integer> nextIndex, LockedNodeList allNodes) {
NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requestedNodes, nextIndex, nodeRepository);
NodePrioritizer prioritizer = new NodePrioritizer(allNodes,
@@ -160,7 +151,7 @@ public class GroupPreparer {
nodeRepository.resourcesCalculator(),
nodeRepository.spareCount(),
requestedNodes.cloudAccount().isExclave(nodeRepository.zone()));
- allocation.offer(prioritizer.collect(surplusActiveNodes));
+ allocation.offer(prioritizer.collect());
return allocation;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 9f00e5fdbba..3c33897af57 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -81,9 +81,8 @@ public class NodePrioritizer {
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
- public List<NodeCandidate> collect(List<Node> surplusActiveNodes) {
+ public List<NodeCandidate> collect() {
addApplicationNodes();
- addSurplusNodes(surplusActiveNodes);
addReadyNodes();
addCandidatesOnExistingHosts();
return prioritize();
@@ -115,19 +114,6 @@ public class NodePrioritizer {
return nodes;
}
- /**
- * Add nodes that have been previously reserved to the same application from
- * an earlier downsizing of a cluster
- */
- private void addSurplusNodes(List<Node> surplusNodes) {
- for (Node node : surplusNodes) {
- NodeCandidate candidate = candidateFrom(node, true);
- if (!candidate.violatesSpares || canAllocateToSpareHosts) {
- candidates.add(candidate);
- }
- }
- }
-
/** Add a node on each host with enough capacity for the requested flavor */
private void addCandidatesOnExistingHosts() {
if (requestedNodes.resources().isEmpty()) return;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 5b2578d7a12..e4a7e0326e2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -12,7 +12,6 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
-import java.util.stream.Collectors;
/**
* Performs preparation of node activation changes for an application.
@@ -54,15 +53,14 @@ class Preparer {
// active config model which is changed on activate
private List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requested) {
LockedNodeList allNodes = groupPreparer.createUnlockedNodeList();
- List<Node> surplusNodes = findNodesInRemovableGroups(application, requested.groups(), allNodes);
- List<Node> accepted = groupPreparer.prepare(application, cluster, requested, surplusNodes, allNodes);
+ List<Node> accepted = groupPreparer.prepare(application, cluster, requested, allNodes);
if (requested.rejectNonActiveParent()) { // TODO: Move to NodeAllocation
NodeList activeHosts = allNodes.state(Node.State.active).parents().nodeType(requested.type().hostType());
accepted = accepted.stream()
.filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent())
.toList();
}
- return accepted.stream().filter(node -> ! surplusNodes.contains(node)).collect(Collectors.toList());
+ return new ArrayList<>(accepted);
}
/** Prepare a load balancer for given application and cluster */
@@ -70,18 +68,4 @@ class Preparer {
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
}
- /**
- * Returns a list of the nodes which are
- * in groups with index number above or equal the group count
- */
- private List<Node> findNodesInRemovableGroups(ApplicationId application, int wantedGroups, NodeList allNodes) {
- List<Node> surplusNodes = new ArrayList<>();
- for (Node node : allNodes.owner(application).state(Node.State.active)) {
- ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
- if (nodeCluster.group().get().index() >= wantedGroups)
- surplusNodes.add(node);
- }
- return surplusNodes;
- }
-
}