summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@gmail.com>2020-05-19 20:52:00 +0200
committerJon Bratseth <bratseth@gmail.com>2020-05-19 20:52:00 +0200
commitbebf9f618d5ca4f9798b067e05cf6d33c1faf7ca (patch)
tree0167292c88d697c9220fbd7504c1b12245c6491b /node-repository
parent7832bca16b1568a0b5481ac472b18f87f92dc123 (diff)
Nonfunctional cleanup
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java32
4 files changed, 32 insertions, 40 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index bb06db3e78b..490fed681f9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -792,13 +792,13 @@ public class NodeRepository extends AbstractComponent {
public boolean canAllocateTenantNodeTo(Node host) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
+ if (host.status().wantToRetire()) return false;
+ if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
- // Do not allocate to hosts we want to retire or are currently retiring
- if (host.status().wantToRetire() || host.allocation().map(alloc -> alloc.membership().retired()).orElse(false))
- return false;
-
- if ( ! zone.getCloud().dynamicProvisioning()) return host.state() == State.active;
- else return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
+ if ( zone.getCloud().dynamicProvisioning())
+ return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
+ else
+ return host.state() == State.active;
}
/** Returns the time keeper of this system */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
index 672be25c5be..b508198db3a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
@@ -11,8 +11,8 @@ import java.util.Objects;
/**
* Capacity calculation for docker hosts.
* <p>
- * The calculations is based on an immutable copy of nodes that represents
- * all capacities in the system - i.e. all nodes in the node repo give or take.
+ * The calculations are based on an immutable copy of nodes that represents
+ * all capacities in the system - i.e. all nodes in the node repo.
*
* @author smorgrav
*/
@@ -30,7 +30,7 @@ public class DockerHostCapacity {
int result = compare(freeCapacityOf(hostB, true), freeCapacityOf(hostA, true));
if (result != 0) return result;
- // If resources are equal we want to assign to the one with the most IPaddresses free
+ // If resources are equal we want to assign to the one with the most IP addresses free
return freeIPs(hostB) - freeIPs(hostA);
}
@@ -65,9 +65,9 @@ public class DockerHostCapacity {
NodeResources freeCapacityOf(Node host, boolean excludeInactive) {
// Only hosts have free capacity
- if (!host.type().canRun(NodeType.tenant)) return new NodeResources(0, 0, 0, 0);
- NodeResources hostResources = hostResourcesCalculator.advertisedResourcesOf(host.flavor());
+ if ( ! host.type().canRun(NodeType.tenant)) return new NodeResources(0, 0, 0, 0);
+ NodeResources hostResources = hostResourcesCalculator.advertisedResourcesOf(host.flavor());
return allNodes.childrenOf(host).asList().stream()
.filter(node -> !(excludeInactive && isInactiveOrRetired(node)))
.map(node -> node.flavor().resources().justNumbers())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 30f9001093d..980cd628f3c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -67,24 +67,22 @@ public class GroupPreparer {
try (Mutex allocationLock = nodeRepository.lockUnallocated()) {
// Create a prioritized set of nodes
- LockedNodeList nodeList = nodeRepository.list(allocationLock);
- NodePrioritizer prioritizer = new NodePrioritizer(nodeList,
+ LockedNodeList allNodes = nodeRepository.list(allocationLock);
+ NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requestedNodes,
+ highestIndex, nodeRepository);
+
+ NodePrioritizer prioritizer = new NodePrioritizer(allNodes,
application,
cluster,
requestedNodes,
spareCount,
wantedGroups,
- nodeRepository.nameResolver(),
- nodeRepository.resourcesCalculator(),
- allocateFully);
-
+ allocateFully,
+ nodeRepository);
prioritizer.addApplicationNodes();
prioritizer.addSurplusNodes(surplusActiveNodes);
prioritizer.addReadyNodes();
- prioritizer.addNewDockerNodes(nodeRepository::canAllocateTenantNodeTo);
- // Allocate from the prioritized list
- NodeAllocation allocation = new NodeAllocation(nodeList, application, cluster, requestedNodes,
- highestIndex, nodeRepository);
+ prioritizer.addNewDockerNodes();
allocation.offer(prioritizer.prioritize());
if (dynamicProvisioningEnabled) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index a7d83bbfad9..d64188976ff 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -11,6 +11,7 @@ import java.util.logging.Level;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
@@ -42,7 +43,7 @@ public class NodePrioritizer {
private final NodeSpec requestedNodes;
private final ApplicationId application;
private final ClusterSpec clusterSpec;
- private final NameResolver nameResolver;
+ private final NodeRepository nodeRepository;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final boolean isTopologyChange;
@@ -52,16 +53,15 @@ public class NodePrioritizer {
private final Set<Node> spareHosts;
NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
- int spares, int wantedGroups, NameResolver nameResolver, HostResourcesCalculator hostResourcesCalculator,
- boolean allocateFully) {
+ int spares, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) {
this.allNodes = allNodes;
- this.capacity = new DockerHostCapacity(allNodes, hostResourcesCalculator);
+ this.capacity = new DockerHostCapacity(allNodes, nodeRepository.resourcesCalculator());
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.application = application;
- this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, capacity, spares);
this.allocateFully = allocateFully;
+ this.nodeRepository = nodeRepository;
NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
@@ -78,9 +78,8 @@ public class NodePrioritizer {
.filter(clusterSpec.group()::equals)
.count();
- this.isAllocatingForReplacement = isReplacement(
- nodesInCluster.size(),
- nodesInCluster.state(Node.State.failed).size());
+ this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(),
+ nodesInCluster.state(Node.State.failed).size());
this.isDocker = resources(requestedNodes) != null;
}
@@ -119,11 +118,11 @@ public class NodePrioritizer {
}
/** Add a node on each docker host with enough capacity for the requested flavor */
- void addNewDockerNodes(Predicate<Node> canAllocateTenantNodeTo) {
+ void addNewDockerNodes() {
if ( ! isDocker) return;
LockedNodeList candidates = allNodes
- .filter(node -> node.type() != NodeType.host || canAllocateTenantNodeTo.test(node))
+ .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node))
.filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant()));
if (allocateFully) {
@@ -142,25 +141,20 @@ public class NodePrioritizer {
}
private void addNewDockerNodesOn(LockedNodeList candidates) {
- NodeResources wantedResources = resources(requestedNodes);
-
for (Node host : candidates) {
- boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(host, wantedResources);
- boolean conflictingCluster = allNodes.childrenOf(host).owner(application).asList().stream()
- .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
-
- if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
+ if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue;
+ if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
- log.log(Level.FINE, "Trying to add new Docker node on " + host);
Optional<IP.Allocation> allocation;
try {
- allocation = host.ipConfig().pool().findAllocation(allNodes, nameResolver);
+ allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver());
if (allocation.isEmpty()) continue; // No free addresses in this pool
} catch (Exception e) {
log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname(), e);
continue;
}
+ log.log(Level.FINE, "Creating new docker node on " + host);
Node newNode = Node.createDockerNode(allocation.get().addresses(),
allocation.get().hostname(),
host.hostname(),