From 5ed5240a1617659e0a29cb2d574854866516340b Mon Sep 17 00:00:00 2001 From: Henning Baldersheim Date: Thu, 30 Sep 2021 12:11:19 +0200 Subject: Read nodes once from node repo when starting prepare. Only re-read if noderepo has been modified. --- .../provision/provisioning/GroupPreparer.java | 33 +++++++++++++++++----- .../hosted/provision/provisioning/Preparer.java | 19 +++++++------ 2 files changed, 37 insertions(+), 15 deletions(-) (limited to 'node-repository/src') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index 35fc02d7e5d..76ccac9c618 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -31,6 +31,18 @@ public class GroupPreparer { private final NodeRepository nodeRepository; private final Optional hostProvisioner; + /** + * Contains list of prepared nodes and the NodesAndHost object to use for next prepare call. + */ + public static class PrepareResult { + public final List prepared; + public final NodesAndHosts allNodesAndHosts; + PrepareResult(List prepared, NodesAndHosts allNodesAndHosts) { + this.prepared = prepared; + this.allNodesAndHosts = allNodesAndHosts; + } + } + public GroupPreparer(NodeRepository nodeRepository, Optional hostProvisioner) { this.nodeRepository = nodeRepository; this.hostProvisioner = hostProvisioner; @@ -46,32 +58,39 @@ public class GroupPreparer { * This method will remove from this list if it finds it needs additional nodes * @param indices the next available node indices for this cluster. * This method will consume these when it allocates new nodes to the cluster. - * @return the list of nodes this cluster group will have allocated if activated + * @param allNodesAndHosts list of all nodes and hosts. Use createNodesAndHostUnlocked to create param for + * first invocation. Then use previous PrepareResult.allNodesAndHosts for the following. + * @return the list of nodes this cluster group will have allocated if activated, and */ // Note: This operation may make persisted changes to the set of reserved and inactive nodes, // but it may not change the set of active nodes, as the active nodes must stay in sync with the // active config model which is changed on activate - public List prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, - List surplusActiveNodes, NodeIndices indices, int wantedGroups) { + public PrepareResult prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, + List surplusActiveNodes, NodeIndices indices, int wantedGroups, + NodesAndHosts allNodesAndHosts) { // Try preparing in memory without global unallocated lock. Most of the time there should be no changes and we // can return nodes previously allocated. - NodesAndHosts allNodesAndHosts = NodesAndHosts.create(nodeRepository.nodes().list(PROBE_LOCK)); NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes, indices::probeNext, wantedGroups, allNodesAndHosts); if (probeAllocation.fulfilledAndNoChanges()) { List acceptedNodes = probeAllocation.finalNodes(); surplusActiveNodes.removeAll(acceptedNodes); indices.commitProbe(); - return acceptedNodes; + return new PrepareResult(acceptedNodes, allNodesAndHosts); } else { // There were some changes, so re-do the allocation with locks indices.resetProbe(); - return prepareWithLocks(application, cluster, requestedNodes, surplusActiveNodes, indices, wantedGroups); + List prepared = prepareWithLocks(application, cluster, requestedNodes, surplusActiveNodes, indices, wantedGroups); + return new PrepareResult(prepared, createNodesAndHostUnlocked()); } } + // Use this to create allNodesAndHosts param to prepare method for first invocation of prepare + public NodesAndHosts createNodesAndHostUnlocked() { return NodesAndHosts.create(nodeRepository.nodes().list(PROBE_LOCK)); } + + /// Note that this will write to the node repo. private List prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, - List surplusActiveNodes, NodeIndices indices, int wantedGroups) { + List surplusActiveNodes, NodeIndices indices, int wantedGroups) { try (Mutex lock = nodeRepository.nodes().lock(application); Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) { NodesAndHosts allNodesAndHosts = NodesAndHosts.create(nodeRepository.nodes().list(allocationLock)); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index d5b754117cb..a27cf252a3a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -5,9 +5,11 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterMembership; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.OutOfCapacityException; +import com.yahoo.vespa.hosted.provision.LockedNodeList; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; +import com.yahoo.vespa.hosted.provision.NodesAndHosts; import com.yahoo.vespa.hosted.provision.node.Nodes; import java.util.ArrayList; @@ -57,22 +59,23 @@ class Preparer { // but it may not change the set of active nodes, as the active nodes must stay in sync with the // active config model which is changed on activate private List prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) { - NodeList allNodes = nodeRepository.nodes().list(); - NodeList appNodes = allNodes.owner(application); + NodesAndHosts allNodesAndHosts = groupPreparer.createNodesAndHostUnlocked(); + NodeList appNodes = allNodesAndHosts.nodes().owner(application); List surplusNodes = findNodesInRemovableGroups(appNodes, cluster, wantedGroups); List usedIndices = appNodes.cluster(cluster.id()).mapToList(node -> node.allocation().get().membership().index()); NodeIndices indices = new NodeIndices(usedIndices, ! cluster.type().isContent()); List acceptedNodes = new ArrayList<>(); + for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) { ClusterSpec clusterGroup = cluster.with(Optional.of(ClusterSpec.Group.from(groupIndex))); - List accepted = groupPreparer.prepare(application, clusterGroup, - requestedNodes.fraction(wantedGroups), surplusNodes, - indices, wantedGroups); - + GroupPreparer.PrepareResult result = groupPreparer.prepare( + application, clusterGroup, requestedNodes.fraction(wantedGroups), + surplusNodes, indices, wantedGroups, allNodesAndHosts); + allNodesAndHosts = result.allNodesAndHosts; // Might have changed + List accepted = result.prepared; if (requestedNodes.rejectNonActiveParent()) { - Nodes nodes = nodeRepository.nodes(); - NodeList activeHosts = nodes.list(Node.State.active).parents().nodeType(requestedNodes.type().hostType()); + NodeList activeHosts = allNodesAndHosts.nodes().state(Node.State.active).parents().nodeType(requestedNodes.type().hostType()); accepted = accepted.stream() .filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent()) .collect(Collectors.toList()); -- cgit v1.2.3