diff options
author | Jon Bratseth <bratseth@verizonmedia.com> | 2020-01-23 10:05:51 +0100 |
---|---|---|
committer | Jon Bratseth <bratseth@verizonmedia.com> | 2020-01-23 10:05:51 +0100 |
commit | 137173a4d6e4914497d10b68a89699b685d6b776 (patch) | |
tree | 9f10fb8f14a9b0df6139f4de5fd3d79138c0811b /node-repository | |
parent | 73bd08b8810794337a6c86f8cc32a031b6583242 (diff) |
Use class field
Diffstat (limited to 'node-repository')
2 files changed, 9 insertions, 11 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index 74d3ac73641..6686160982a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -84,8 +84,7 @@ public class GroupPreparer { prioritizer.addApplicationNodes(); prioritizer.addSurplusNodes(surplusActiveNodes); prioritizer.addReadyNodes(); - prioritizer.addNewDockerNodes(dynamicProvisioningEnabled && preprovisionCapacityFlag.value().isEmpty(), - application); + prioritizer.addNewDockerNodes(dynamicProvisioningEnabled && preprovisionCapacityFlag.value().isEmpty()); // Allocate from the prioritized list NodeAllocation allocation = new NodeAllocation(nodeList, application, cluster, requestedNodes, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java index 2e241337949..a5fd37640d8 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java @@ -40,7 +40,7 @@ public class NodePrioritizer { private final LockedNodeList allNodes; private final DockerHostCapacity capacity; private final NodeSpec requestedNodes; - private final ApplicationId appId; + private final ApplicationId application; private final ClusterSpec clusterSpec; private final NameResolver nameResolver; private final boolean isDocker; @@ -50,19 +50,19 @@ public class NodePrioritizer { private final int currentClusterSize; private final Set<Node> spareHosts; - NodePrioritizer(LockedNodeList allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec, + NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int spares, int wantedGroups, NameResolver nameResolver, HostResourcesCalculator hostResourcesCalculator, boolean inPlaceResizeEnabled) { this.allNodes = allNodes; this.capacity = new DockerHostCapacity(allNodes, hostResourcesCalculator); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; - this.appId = appId; + this.application = application; this.nameResolver = nameResolver; this.spareHosts = findSpareHosts(allNodes, capacity, spares); this.inPlaceResizeEnabled = inPlaceResizeEnabled; - NodeList nodesInCluster = allNodes.owner(appId).type(clusterSpec.type()).cluster(clusterSpec.id()); + NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() @@ -122,9 +122,8 @@ public class NodePrioritizer { * * @param exclusively whether the ready docker nodes should only be added on hosts that * already have nodes allocated to this tenant - * @param application the application we are adding nodes for */ - void addNewDockerNodes(boolean exclusively, ApplicationId application) { + void addNewDockerNodes(boolean exclusively) { if ( ! isDocker) return; LockedNodeList candidates = allNodes @@ -135,7 +134,7 @@ public class NodePrioritizer { Set<String> candidateHostnames = candidates.asList().stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.allocation() - .map(a -> a.owner().tenant().equals(appId.tenant())) + .map(a -> a.owner().tenant().equals(this.application.tenant())) .orElse(false)) .flatMap(node -> node.parentHostname().stream()) .collect(Collectors.toSet()); @@ -154,7 +153,7 @@ public class NodePrioritizer { if (host.status().wantToRetire()) continue; boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(host, wantedResources); - boolean conflictingCluster = allNodes.childrenOf(host).owner(appId).asList().stream() + boolean conflictingCluster = allNodes.childrenOf(host).owner(application).asList().stream() .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id())); if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue; @@ -190,7 +189,7 @@ public class NodePrioritizer { .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) - .filter(node -> node.allocation().get().owner().equals(appId)) + .filter(node -> node.allocation().get().owner().equals(application)) .map(node -> toPrioritizable(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } |