diff options
author | Valerij Fredriksen <valerijf@verizonmedia.com> | 2019-02-05 16:39:54 +0100 |
---|---|---|
committer | Valerij Fredriksen <valerij92@gmail.com> | 2019-02-06 23:58:59 +0100 |
commit | d8a0298c58418f2eabeda23d09563ea0304cd9c8 (patch) | |
tree | a00c041728de0a5c473d5b1439a1e7711228d755 | |
parent | 7c071ebfcce392c669756be87c5974595bafe0b1 (diff) |
Get NodeList once
3 files changed, 33 insertions, 25 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index 03d2557d8b6..ee754b5d89c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -7,6 +7,7 @@ import com.yahoo.config.provision.OutOfCapacityException; import com.yahoo.lang.MutableInteger; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import java.util.List; @@ -48,12 +49,9 @@ public class GroupPreparer { try (Mutex allocationLock = nodeRepository.lockAllocation()) { // Create a prioritized set of nodes - NodePrioritizer prioritizer = new NodePrioritizer(nodeRepository.getNodes(), - application, - cluster, - requestedNodes, - spareCount, - nodeRepository.nameResolver()); + NodeList nodeList = nodeRepository.list(); + NodePrioritizer prioritizer = new NodePrioritizer( + nodeList, application, cluster, requestedNodes, spareCount, nodeRepository.nameResolver()); prioritizer.addApplicationNodes(); prioritizer.addSurplusNodes(surplusActiveNodes); @@ -61,7 +59,8 @@ public class GroupPreparer { prioritizer.addNewDockerNodes(allocationLock); // Allocate from the prioritized list - NodeAllocation allocation = new NodeAllocation(application, cluster, requestedNodes, highestIndex, nodeRepository); + NodeAllocation allocation = new NodeAllocation(nodeList, application, cluster, requestedNodes, + highestIndex, nodeRepository.zone(), nodeRepository.clock()); allocation.offer(prioritizer.prioritize()); if (! allocation.fulfilled() && requestedNodes.canFail()) throw new OutOfCapacityException("Could not satisfy " + requestedNodes + " for " + cluster + diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java index 39ce9377969..7a00665523c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java @@ -6,12 +6,14 @@ import com.yahoo.config.provision.ClusterMembership; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.TenantName; +import com.yahoo.config.provision.Zone; import com.yahoo.lang.MutableInteger; import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeRepository; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.Allocation; +import java.time.Clock; import java.util.ArrayList; import java.util.Collection; import java.util.Comparator; @@ -30,6 +32,9 @@ import java.util.stream.Collectors; */ class NodeAllocation { + /** List of all nodes in node-repository */ + private final NodeList allNodes; + /** The application this list is for */ private final ApplicationId application; @@ -60,15 +65,19 @@ class NodeAllocation { /** The next membership index to assign to a new node */ private final MutableInteger highestIndex; - private final NodeRepository nodeRepository; + private final Zone zone; + + private final Clock clock; - NodeAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, MutableInteger highestIndex, - NodeRepository nodeRepository) { + NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, + MutableInteger highestIndex, Zone zone, Clock clock) { + this.allNodes = allNodes; this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; this.highestIndex = highestIndex; - this.nodeRepository = nodeRepository; + this.zone = zone; + this.clock = clock; } /** @@ -128,7 +137,7 @@ class NodeAllocation { } offeredPriority.node = offered.allocate(application, ClusterMembership.from(cluster, highestIndex.add(1)), - nodeRepository.clock().instant()); + clock.instant()); accepted.add(acceptNode(offeredPriority, false)); } } @@ -142,7 +151,7 @@ class NodeAllocation { } private boolean checkForClashingParentHost() { - return nodeRepository.zone().system() == SystemName.main && nodeRepository.zone().environment().isProduction(); + return zone.system() == SystemName.main && zone.environment().isProduction(); } private boolean offeredNodeHasParentHostnameAlreadyAccepted(Collection<PrioritizableNode> accepted, Node offered) { @@ -161,7 +170,7 @@ class NodeAllocation { */ private boolean exclusiveTo(TenantName tenant, Optional<String> parentHostname) { if ( ! parentHostname.isPresent()) return true; - for (Node nodeOnHost : nodeRepository.list().childrenOf(parentHostname.get())) { + for (Node nodeOnHost : allNodes.childrenOf(parentHostname.get())) { if ( ! nodeOnHost.allocation().isPresent()) continue; if ( nodeOnHost.allocation().get().membership().cluster().isExclusive() && @@ -174,7 +183,7 @@ class NodeAllocation { private boolean hostsOnly(TenantName tenant, Optional<String> parentHostname) { if ( ! parentHostname.isPresent()) return true; // yes, as host is exclusive - for (Node nodeOnHost : nodeRepository.list().childrenOf(parentHostname.get())) { + for (Node nodeOnHost : allNodes.childrenOf(parentHostname.get())) { if ( ! nodeOnHost.allocation().isPresent()) continue; if ( ! nodeOnHost.allocation().get().owner().tenant().equals(tenant)) return false; @@ -221,7 +230,7 @@ class NodeAllocation { } else { ++wasRetiredJustNow; // Retire nodes which are of an unwanted flavor, retired flavor or have an overlapping parent host - node = node.retire(nodeRepository.clock().instant()); + node = node.retire(clock.instant()); prioritizableNode.node = node; } if ( ! node.allocation().get().membership().cluster().equals(cluster)) { @@ -278,7 +287,7 @@ class NodeAllocation { if (deltaRetiredCount > 0) { // retire until deltaRetiredCount is 0, prefer to retire higher indexes to minimize redistribution for (PrioritizableNode node : byDecreasingIndex(nodes)) { if ( ! node.node.allocation().get().membership().retired() && node.node.state().equals(Node.State.active)) { - node.node = node.node.retire(Agent.application, nodeRepository.clock().instant()); + node.node = node.node.retire(Agent.application, clock.instant()); surplusNodes.add(node.node); // offer this node to other groups if (--deltaRetiredCount == 0) break; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java index 51f5eb982d4..a5e5473e513 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java @@ -31,7 +31,7 @@ import java.util.stream.Collectors; * * @author smorgrav */ -public class NodePrioritizer { +class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); @@ -46,9 +46,9 @@ public class NodePrioritizer { private final boolean isAllocatingForReplacement; private final Set<Node> spareHosts; - NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, + NodePrioritizer(NodeList allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, int spares, NameResolver nameResolver) { - this.allNodes = new NodeList(allNodes); + this.allNodes = allNodes; this.capacity = new DockerHostCapacity(allNodes); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; @@ -57,14 +57,14 @@ public class NodePrioritizer { this.spareHosts = findSpareHosts(allNodes, capacity, spares); - int nofFailedNodes = (int) allNodes.stream() + int nofFailedNodes = (int) allNodes.asList().stream() .filter(node -> node.state().equals(Node.State.failed)) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) .count(); - int nofNodesInCluster = (int) allNodes.stream() + int nofNodesInCluster = (int) allNodes.asList().stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(appId)) .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id())) @@ -80,8 +80,8 @@ public class NodePrioritizer { * We do not count retired or inactive nodes as used capacity (as they could have been * moved to create space for the spare node in the first place). */ - private static Set<Node> findSpareHosts(List<Node> nodes, DockerHostCapacity capacity, int spares) { - return nodes.stream() + private static Set<Node> findSpareHosts(NodeList nodes, DockerHostCapacity capacity, int spares) { + return nodes.asList().stream() .filter(node -> node.type().equals(NodeType.host)) .filter(dockerHost -> dockerHost.state().equals(Node.State.active)) .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0) |