aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
diff options
context:
space:
mode:
Diffstat (limited to 'node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java40
1 files changed, 23 insertions, 17 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index dc6c0c7afa0..b92d6fb6d18 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -34,33 +34,39 @@ public class NodePrioritizer {
private final HostCapacity capacity;
private final HostResourcesCalculator calculator;
private final NodeSpec requested;
+ private final ApplicationId application;
+ private final ClusterSpec clusterSpec;
private final IP.Allocation.Context ipAllocationContext;
private final Nodes nodes;
private final boolean dynamicProvisioning;
private final boolean allowHostSharing;
- private final AllocationParams params;
+ private final boolean exclusiveAllocation;
+ private final boolean makeExclusive;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
private final Set<Node> spareHosts;
- public NodePrioritizer(AllocationParams params, LockedNodeList allNodes, NodeSpec nodeSpec,
+ public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
boolean dynamicProvisioning, boolean allowHostSharing, IP.Allocation.Context ipAllocationContext, Nodes nodes,
- HostResourcesCalculator hostResourcesCalculator, int spareCount) {
+ HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean exclusiveAllocation, boolean makeExclusive) {
this.allNodes = allNodes;
this.calculator = hostResourcesCalculator;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
this.requested = nodeSpec;
+ this.clusterSpec = clusterSpec;
+ this.application = application;
this.dynamicProvisioning = dynamicProvisioning;
this.allowHostSharing = allowHostSharing;
- this.params = params;
+ this.exclusiveAllocation = exclusiveAllocation;
+ this.makeExclusive = makeExclusive;
this.spareHosts = dynamicProvisioning ?
capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) :
capacity.findSpareHosts(this.allNodes.asList(), spareCount);
this.ipAllocationContext = ipAllocationContext;
this.nodes = nodes;
- NodeList nodesInCluster = this.allNodes.owner(params.application()).type(params.cluster().type()).cluster(params.cluster().id());
+ NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream()
.flatMap(node -> node.allocation()
@@ -75,7 +81,7 @@ public class NodePrioritizer {
// In dynamically provisioned zones, we can always take spare hosts since we can provision new on-demand,
// NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
// In non-dynamically provisioned zones, we only allow allocating to spare hosts to replace failed nodes.
- this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, params.cluster().group());
+ this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
@@ -119,19 +125,19 @@ public class NodePrioritizer {
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
if (nodes.suspended(host)) continue; // Hosts that are suspended may be down for some time, e.g. for OS upgrade
- if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(params.application().tenant())) continue;
- if (host.reservedTo().isPresent() && params.application().instance().isTester()) continue;
- if (params.makeExclusive()) {
- if ( ! allowHostSharing && params.exclusiveAllocation() && ! fitsPerfectly(host)) continue;
+ if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
+ if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
+ if (makeExclusive) {
+ if ( ! allowHostSharing && exclusiveAllocation && ! fitsPerfectly(host)) continue;
} else {
if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
}
- if ( ! host.provisionedForApplicationId().map(params.application()::equals).orElse(true)) continue;
- if ( ! host.exclusiveToApplicationId().map(params.application()::equals).orElse(true)) continue;
- if ( ! host.exclusiveToClusterType().map(params.cluster().type()::equals).orElse(true)) continue;
+ if ( ! host.provisionedForApplicationId().map(application::equals).orElse(true)) continue;
+ if ( ! host.exclusiveToApplicationId().map(application::equals).orElse(true)) continue;
+ if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requested.resources().get())) continue;
- if ( ! allNodes.childrenOf(host).owner(params.application()).cluster(params.cluster().id()).isEmpty()) continue;
+ if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
if ( ! requested.cloudAccount().isUnspecified() && ! requested.cloudAccount().equals(host.cloudAccount())) continue;
candidates.add(NodeCandidate.createNewChild(requested.resources().get(),
@@ -154,8 +160,8 @@ public class NodePrioritizer {
.filter(node -> node.type() == requested.type())
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
- .filter(node -> node.allocation().get().owner().equals(params.application()))
- .filter(node -> node.allocation().get().membership().cluster().id().equals(params.cluster().id()))
+ .filter(node -> node.allocation().get().owner().equals(application))
+ .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
.filter(node -> node.state() == Node.State.active || canStillAllocate(node))
.map(node -> candidateFrom(node, false))
.forEach(candidates::add);
@@ -185,7 +191,7 @@ public class NodePrioritizer {
parent.exclusiveToApplicationId().isEmpty()
&& requested.canResize(node.resources(),
capacity.unusedCapacityOf(parent),
- params.cluster().type(),
+ clusterSpec.type(),
topologyChange,
currentClusterSize));
} else {