summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authortoby <smorgrav@yahoo-inc.com>2017-05-22 13:35:52 +0200
committertoby <smorgrav@yahoo-inc.com>2017-05-24 14:37:29 +0200
commite6e222824a554ce92c0a7f64de346deefe531090 (patch)
tree4659cd48191dc06723de4b9754c48b1decd1bdbb /node-repository
parent0aec20ed8585ebe0399eb1f7173ebbd1419616d9 (diff)
Add dynamic docker allocation to hosted provisioning
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerAllocator.java152
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerCapacityConstraints.java101
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java45
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java9
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java154
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSnapshot.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationVisualizer.java143
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java21
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java139
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java37
18 files changed, 822 insertions, 45 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
index 09110c99fef..86a21806d26 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java
@@ -51,10 +51,10 @@ public final class Node {
Optional.empty(), History.empty(), type);
}
- /** Creates a node in the initial state (provisioned) */
+ /** Creates a node in the initial state (reserved for docker containers, provisioned otherwise) */
public static Node create(String openStackId, Set<String> ipAddresses, Set<String> additionalIpAddresses, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) {
return new Node(openStackId, ipAddresses, additionalIpAddresses, hostname, parentHostname, flavor, Status.initial(), State.provisioned,
- Optional.empty(), History.empty(), type);
+ Optional.empty(), History.empty(), type);
}
/** Do not use. Construct nodes by calling {@link NodeRepository#createNode} */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 0b4d290a340..c3879bee10c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -241,12 +241,17 @@ public class NodeRepository extends AbstractComponent {
// ----------------- Node lifecycle -----------------------------------------------------------
/** Creates a new node object, without adding it to the node repo. If no IP address is given, it will be resolved */
- public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
+ public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Set<String> additionalIpAddresses, Optional<String> parentHostname,
Flavor flavor, NodeType type) {
if (ipAddresses.isEmpty()) {
ipAddresses = nameResolver.getAllByNameOrThrow(hostname);
}
- return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), Collections.emptySet(), hostname, parentHostname, flavor, type);
+ return Node.create(openStackId, ImmutableSet.copyOf(ipAddresses), additionalIpAddresses, hostname, parentHostname, flavor, type);
+ }
+
+ public Node createNode(String openStackId, String hostname, Set<String> ipAddresses, Optional<String> parentHostname,
+ Flavor flavor, NodeType type) {
+ return createNode(openStackId, hostname, ipAddresses, Collections.emptySet(), parentHostname, flavor, type);
}
public Node createNode(String openStackId, String hostname, Optional<String> parentHostname,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerAllocator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerAllocator.java
new file mode 100644
index 00000000000..a027b40ebad
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerAllocator.java
@@ -0,0 +1,152 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.hosted.provision.Node;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.stream.Collectors;
+
+/**
+ * Set of methods to allocate new docker nodes
+ * <p>
+ * The nodes are not added to the repository here - this is done by caller.
+ *
+ * @author smorgrav
+ */
+public class DockerAllocator {
+
+ /**
+ * The docker container allocation algorithm
+ */
+ static List<Node> allocateNewDockerNodes(NodeAllocation allocation,
+ NodeSpec requestedNodes,
+ List<Node> allNodes,
+ List<Node> nodesBefore,
+ NodeFlavors flavors,
+ Flavor flavor,
+ int nofSpares,
+ BiConsumer<List<Node>, String> recorder) {
+ // Try allocate new nodes with all constraints in place
+ List<Node> nodesWithHeadroomAndSpares = DockerCapacityConstraints.addHeadroomAndSpareNodes(allNodes, flavors, nofSpares);
+ recorder.accept(nodesWithHeadroomAndSpares, "Headroom and spares");
+ List<Node> accepted = DockerAllocator.allocate(allocation, flavor, nodesWithHeadroomAndSpares);
+
+ List<Node> allNodesIncludingAccepted = new ArrayList<>(allNodes);
+ allNodesIncludingAccepted.addAll(accepted);
+ recorder.accept(allNodesIncludingAccepted, "1st dynamic docker allocation - fullfilled: " + allocation.fullfilled());
+
+ // If still not fully allocated - try to allocate the remaining nodes with only hard constraints
+ if (!allocation.fullfilled()) {
+ List<Node> nodesWithSpares = DockerCapacityConstraints.addSpareNodes(allNodesIncludingAccepted, nofSpares);
+ recorder.accept(nodesWithSpares, "Spares only");
+
+ List<Node> acceptedWithHard = DockerAllocator.allocate(allocation, flavor, nodesWithSpares);
+ accepted.addAll(acceptedWithHard);
+ allNodesIncludingAccepted.addAll(acceptedWithHard);
+ recorder.accept(allNodesIncludingAccepted, "2nd dynamic docker allocation - fullfilled: " + allocation.fullfilled());
+
+ // If still not fully allocated and this is a replacement - drop all constraints
+ boolean isReplacement = DockerAllocator.isReplacement(requestedNodes, nodesBefore, allNodes);
+ if (!allocation.fullfilled() && isReplacement) {
+ List<Node> finalTry = DockerAllocator.allocate(allocation, flavor, allNodesIncludingAccepted);
+ accepted.addAll(finalTry);
+ allNodesIncludingAccepted.addAll(finalTry);
+ recorder.accept(allNodesIncludingAccepted, "Final dynamic docker alloction - fullfilled: " + allocation.fullfilled());
+ }
+ }
+
+ return accepted;
+ }
+
+ /**
+ * Offer the node allocation a prioritized set of new nodes according to capacity constraints
+ *
+ * @param allocation The allocation we want to fulfill
+ * @param flavor Since we create nodes here we need to know the exact flavor
+ * @param nodes The nodes relevant for the allocation (all nodes from node repo give or take)
+ * @return Nodes accepted by the node allocation - these nodes does not exist in the noderepo yet.
+ * @see DockerHostCapacity
+ */
+ public static List<Node> allocate(NodeAllocation allocation, Flavor flavor, List<Node> nodes) {
+
+ DockerHostCapacity dockerCapacity = new DockerHostCapacity(nodes);
+
+ // Get all active docker hosts with enough capacity and ip slots - sorted on free capacity
+ List<Node> dockerHosts = nodes.stream()
+ .filter(node -> node.type().equals(NodeType.host))
+ .filter(dockerHost -> dockerHost.state().equals(Node.State.active))
+ .filter(dockerHost -> dockerCapacity.hasCapacity(dockerHost, flavor))
+ .sorted(dockerCapacity::compare)
+ .collect(Collectors.toList());
+
+ // Create one node pr. docker host that we can offer to the allocation
+ List<Node> offers = new LinkedList<>();
+ for (Node parentHost : dockerHosts) {
+ Set<String> ipAddresses = DockerHostCapacity.findFreeIps(parentHost, nodes);
+ if (ipAddresses.isEmpty()) continue;
+ String ipAddress = ipAddresses.stream().findFirst().get();
+ String hostname = lookupHostname(ipAddress);
+ if (hostname == null) continue;
+ Node node = Node.createDockerNode("fake-" + hostname, Collections.singleton(ipAddress),
+ Collections.emptySet(), hostname, Optional.of(parentHost.hostname()), flavor, NodeType.tenant);
+ offers.add(node);
+ }
+
+ return allocation.offer(offers, false);
+ }
+
+ /**
+ * From ipAddress - get hostname
+ *
+ * @return hostname or null if not able to do the loopup
+ */
+ private static String lookupHostname(String ipAddress) {
+ try {
+ return InetAddress.getByName(ipAddress).getHostName();
+ } catch (UnknownHostException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ /**
+ * This is an heuristic way to find if new nodes are to replace failing nodes
+ * or are to expand the cluster.
+ *
+ * The current implementation does not account for failed nodes that are not in the application
+ * anymore. The consequence is that we will ignore the spare capacity constraints too often - in
+ * particular when the number of failed nodes (not in the application anymore)
+ * for the cluster equal to the upscaling of the cluster.
+ *
+ * The deployment algorithm will still try to allocate the the capacity outside the spare capacity if possible.
+ *
+ * TODO propagate this information either through the node object or from the configserver deployer
+ */
+ private static boolean isReplacement(NodeSpec nodeSpec, List<Node> nodesBefore, List<Node> nodesReserved) {
+ int wantedCount = 0;
+ if (nodeSpec instanceof NodeSpec.CountNodeSpec) {
+ NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) nodeSpec;
+ wantedCount = countSpec.getCount();
+ }
+
+ List<Node> failedNodes = new ArrayList<>();
+ for (Node node : nodesBefore) {
+ if (node.state() == Node.State.failed) {
+ failedNodes.add(node);
+ }
+ }
+
+ if (failedNodes.size() == 0) return false;
+ return (wantedCount <= nodesReserved.size() + failedNodes.size());
+ }
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerCapacityConstraints.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerCapacityConstraints.java
new file mode 100644
index 00000000000..d7cd8bb1e19
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerCapacityConstraints.java
@@ -0,0 +1,101 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.cloud.config.ApplicationIdConfig;
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.lang.MutableInteger;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+
+import java.time.Clock;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Enforce allocation constraints for docker by manipulating the NodeList we operate on.
+ *
+ * The constraints comes in two flavors: headroom and spare.
+ *
+ * <b>Headroom</b> is the number of docker nodes (of various flavors) we want to reserve for new applications.
+ * This is e.g. to make sure we don't smear out small docker flavors on all hosts
+ * starving allocations for bigger flavors.
+ *
+ * <b>Spares</b> is to make sure we have replacement for applications if one or more hosts go down.
+ * It is more important to safeguard already onboarded applications than accept new applications.
+ *
+ * For now - we will use spare also as a means to reserve capacity for future applications that
+ * have had a separate AI process.
+ *
+ * When using spares - we will relay on maintenance jobs to reclaim the spare capacity whenever the
+ * capacity has been recovered (e.g. when the dead docker host is replaced)
+ *
+ * @author smorgrav
+ */
+public class DockerCapacityConstraints {
+
+ /** This is a static utility class */
+ private DockerCapacityConstraints() {}
+
+ /**
+ * Spare nodes in first iteration is a node that fills up the two
+ * largest hosts (in terms of free capacity)
+ */
+ public static List<Node> addSpareNodes(List<Node> nodes, int spares) {
+ DockerHostCapacity capacity = new DockerHostCapacity(nodes);
+ List<Flavor> spareFlavors = nodes.stream()
+ .filter(node -> node.type().equals(NodeType.host))
+ .filter(dockerHost -> dockerHost.state().equals(Node.State.active))
+ .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
+ .sorted(capacity::compare)
+ .limit(spares)
+ .map(dockerHost -> freeCapacityAsFlavor(dockerHost, nodes))
+ .collect(Collectors.toList());
+
+ return addNodes(nodes, spareFlavors, "spare");
+ }
+
+ public static List<Node> addHeadroomAndSpareNodes(List<Node> nodes, NodeFlavors flavors, int nofSpares) {
+ List<Node> sparesAndHeadroom = addSpareNodes(nodes, nofSpares);
+ return addNodes(sparesAndHeadroom, flavors.getFlavors(), "headroom");
+ }
+
+ private static List<Node> addNodes(List<Node> nodes, List<Flavor> flavors, String id) {
+ List<Node> headroom = new ArrayList<>(nodes);
+ for (Flavor flavor : flavors) {
+ int headroomCount = flavor.getIdealHeadroom();
+ if (headroomCount > 0) {
+ NodeAllocation allocation = createHeadRoomAllocation(flavor, headroomCount, id);
+ List<Node> acceptedNodes = DockerAllocator.allocate(allocation, flavor, headroom);
+ headroom.addAll(acceptedNodes);
+ }
+ }
+ return headroom;
+ }
+
+ private static Flavor freeCapacityAsFlavor(Node host, List<Node> nodes) {
+ ResourceCapacity hostCapacity = new ResourceCapacity(host);
+ for (Node container : new NodeList(nodes).childNodes(host).asList()) {
+ hostCapacity.subtract(container);
+ }
+ return hostCapacity.asFlavor();
+ }
+
+ private static NodeAllocation createHeadRoomAllocation(Flavor flavor, int count, String id) {
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container,
+ new ClusterSpec.Id(id), new Version());
+ ApplicationId appId = new ApplicationId(
+ new ApplicationIdConfig(
+ new ApplicationIdConfig.Builder()
+ .tenant(id)
+ .application(id + "-" + flavor.name())
+ .instance("temporarynode")));
+
+ return new NodeAllocation(appId, cluster, new NodeSpec.CountNodeSpec(count, flavor),
+ new MutableInteger(0), Clock.systemUTC());
+ }
+} \ No newline at end of file
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
index 9ab5d54bb54..eeb3149ee53 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
@@ -19,6 +19,9 @@ import java.util.Set;
*/
public class DockerHostCapacity {
+ /** Tenant name for headroom nodes - only used internally */
+ public static final String HEADROOM_TENANT = "-__!@#$$%THISisHEADroom";
+
/**
* An immutable list of nodes
*/
@@ -104,8 +107,7 @@ public class DockerHostCapacity {
ResourceCapacity hostCapacity = new ResourceCapacity(dockerHost);
for (Node container : allNodes.childNodes(dockerHost).asList()) {
- // Until we have migrated we might have docker containers unallocated - TODO check off if headroom tenant is safe
- if (includeHeadroom || !(container.allocation().isPresent() && container.allocation().get().owner().tenant().value().equals("headroom"))) {
+ if (includeHeadroom || !(container.allocation().isPresent() && container.allocation().get().owner().tenant().value().equals(HEADROOM_TENANT))) {
hostCapacity.subtract(container);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 1c6bf8881c6..474b79af957 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -4,6 +4,8 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.google.common.collect.ComparisonChain;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.lang.MutableInteger;
import com.yahoo.transaction.Mutex;
@@ -13,6 +15,8 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.time.Clock;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
+import java.util.function.BiConsumer;
/**
* Performs preparation of node activation changes for a single host group in an application.
@@ -41,14 +45,19 @@ class GroupPreparer {
* This method will remove from this list if it finds it needs additional nodes
* @param highestIndex the current highest node index among all active nodes in this cluster.
* This method will increase this number when it allocates new nodes to the cluster.
+ * @param nofSpares The number of spare docker hosts we want when dynamically allocate docker containers
+ * @param debugRecorder Debug facility to step through the allocation process after the fact
* @return the list of nodes this cluster group will have allocated if activated
*/
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
- List<Node> surplusActiveNodes, MutableInteger highestIndex) {
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
+ List<Node> surplusActiveNodes, MutableInteger highestIndex, int nofSpares, BiConsumer<List<Node>, String> debugRecorder) {
try (Mutex lock = nodeRepository.lock(application)) {
+
+ // A snapshot of nodes before we start the process - used to determind if this is a replacement
+ List<Node> nodesBefore = nodeRepository.getNodes(application, Node.State.values());
NodeAllocation allocation = new NodeAllocation(application, cluster, requestedNodes, highestIndex, clock);
// Use active nodes
@@ -71,9 +80,31 @@ class GroupPreparer {
// Use new, ready nodes. Lock ready pool to ensure that nodes are not grabbed by others.
try (Mutex readyLock = nodeRepository.lockUnallocated()) {
+
+ // Check if we have ready nodes that we can allocate
List<Node> readyNodes = nodeRepository.getNodes(requestedNodes.type(), Node.State.ready);
accepted = allocation.offer(prioritizeNodes(readyNodes, requestedNodes), !canChangeGroup);
allocation.update(nodeRepository.reserve(accepted));
+
+ if(nodeRepository.dynamicAllocationEnabled()) {
+ // Check if we have available capacity on docker hosts that we can allocate
+ if (!allocation.fullfilled()) {
+ // The new dynamic allocation method
+ Optional<Flavor> flavor = getFlavor(requestedNodes);
+ if (flavor.isPresent() && flavor.get().getType().equals(Flavor.Type.DOCKER_CONTAINER)) {
+ List<Node> allNodes = nodeRepository.getNodes(Node.State.values());
+ NodeFlavors flavors = nodeRepository.getAvailableFlavors();
+ accepted = DockerAllocator.allocateNewDockerNodes(allocation, requestedNodes, allNodes,
+ nodesBefore, flavors, flavor.get(), nofSpares, debugRecorder);
+
+ // Add nodes to the node repository
+ if (allocation.fullfilled()) {
+ List<Node> nodesAddedToNodeRepo = nodeRepository.addDockerNodes(accepted);
+ allocation.update(nodesAddedToNodeRepo);
+ }
+ }
+ }
+ }
}
if (allocation.fullfilled())
@@ -83,7 +114,15 @@ class GroupPreparer {
outOfCapacityDetails(allocation));
}
}
-
+
+ private Optional<Flavor> getFlavor(NodeSpec nodeSpec) {
+ if (nodeSpec instanceof NodeSpec.CountNodeSpec) {
+ NodeSpec.CountNodeSpec countSpec = (NodeSpec.CountNodeSpec) nodeSpec;
+ return Optional.of(countSpec.getFlavor());
+ }
+ return Optional.empty();
+ }
+
private String outOfCapacityDetails(NodeAllocation allocation) {
if (allocation.wouldBeFulfilledWithClashingParentHost()) {
return ": Not enough nodes available on separate physical hosts.";
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 1feb12ba9ab..bac99b2bab4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -5,6 +5,7 @@ import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
@@ -17,7 +18,6 @@ import com.yahoo.log.LogLevel;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.hosted.provision.node.filter.ApplicationFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;
@@ -27,6 +27,7 @@ import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
+import java.util.function.BiConsumer;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -39,24 +40,30 @@ import java.util.logging.Logger;
public class NodeRepositoryProvisioner implements Provisioner {
private static Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName());
+ private static final int SPARE_CAPACITY_PROD = 2;
+ private static final int SPARE_CAPACITY_NONPROD = 0;
private final NodeRepository nodeRepository;
private final CapacityPolicies capacityPolicies;
private final Zone zone;
private final Preparer preparer;
private final Activator activator;
+ private final BiConsumer<List<Node>, String> debugRecorder;
@Inject
public NodeRepositoryProvisioner(NodeRepository nodeRepository, NodeFlavors flavors, Zone zone) {
- this(nodeRepository, flavors, zone, Clock.systemUTC());
+ this(nodeRepository, flavors, zone, Clock.systemUTC(), (x,y) -> {});
}
- public NodeRepositoryProvisioner(NodeRepository nodeRepository, NodeFlavors flavors, Zone zone, Clock clock) {
+ public NodeRepositoryProvisioner(NodeRepository nodeRepository, NodeFlavors flavors, Zone zone, Clock clock, BiConsumer<List<Node>, String> debugRecorder) {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone, flavors);
this.zone = zone;
- this.preparer = new Preparer(nodeRepository, clock);
+ this.preparer = new Preparer(nodeRepository, clock, zone.environment().equals(Environment.prod)
+ ? SPARE_CAPACITY_PROD
+ : SPARE_CAPACITY_NONPROD);
this.activator = new Activator(nodeRepository, clock);
+ this.debugRecorder = debugRecorder;
}
/**
@@ -92,7 +99,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
effectiveGroups = 1; // type request with multiple groups is not supported
}
- return asSortedHosts(preparer.prepare(application, cluster, requestedNodes, effectiveGroups));
+ return asSortedHosts(preparer.prepare(application, cluster, requestedNodes, effectiveGroups, debugRecorder));
}
@Override
@@ -121,5 +128,4 @@ public class NodeRepositoryProvisioner implements Provisioner {
}
return hosts;
}
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index cb6786270e7..ab4012b7d3e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -58,6 +58,12 @@ public interface NodeSpec {
this.flavor = flavor;
}
+ public Flavor getFlavor() {
+ return flavor;
+ }
+
+ public int getCount() { return count; }
+
@Override
public NodeType type() { return NodeType.tenant; }
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 57db9707049..dfd7be7804a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -14,6 +14,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
import java.util.Optional;
+import java.util.function.BiConsumer;
/**
* Performs preparation of node activation changes for an application.
@@ -25,10 +26,12 @@ class Preparer {
private final NodeRepository nodeRepository;
private final Clock clock;
private final GroupPreparer groupPreparer;
+ private final int nofSpares;
- public Preparer(NodeRepository nodeRepository, Clock clock) {
+ public Preparer(NodeRepository nodeRepository, Clock clock, int nofSpares) {
this.nodeRepository = nodeRepository;
this.clock = clock;
+ this.nofSpares = nofSpares;
groupPreparer = new GroupPreparer(nodeRepository, clock);
}
@@ -40,7 +43,7 @@ class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups, BiConsumer<List<Node>, String> debugRecorder) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
@@ -48,7 +51,7 @@ class Preparer {
for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) {
ClusterSpec clusterGroup = cluster.changeGroup(Optional.of(ClusterSpec.Group.from(groupIndex)));
List<Node> accepted = groupPreparer.prepare(application, clusterGroup,
- requestedNodes.fraction(wantedGroups), surplusNodes, highestIndex);
+ requestedNodes.fraction(wantedGroups), surplusNodes, highestIndex, nofSpares, debugRecorder);
replace(acceptedNodes, accepted);
}
moveToActiveGroup(surplusNodes, wantedGroups, cluster.group());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
index 6aa8520b1f5..ed4f45ece63 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
@@ -85,7 +85,7 @@ public class FailedExpirerTest {
NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", flavorName);
NodeRepository nodeRepository = new NodeRepository(nodeFlavors, curator, clock, Zone.defaultZone(),
new MockNameResolver().mockAnyLookup());
- NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, Zone.defaultZone(), clock);
+ NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, Zone.defaultZone(), clock, (x,y) -> {});
Flavor defaultFlavor = nodeFlavors.getFlavorOrThrow("default");
List<Node> hostNodes = new ArrayList<>(3);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
index f78dc031b0d..1b9371d44ac 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
@@ -40,7 +40,7 @@ public class ReservationExpirerTest {
NodeFlavors flavors = FlavorConfigBuilder.createDummies("default");
NodeRepository nodeRepository = new NodeRepository(flavors, curator, clock, Zone.defaultZone(),
new MockNameResolver().mockAnyLookup());
- NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, Zone.defaultZone(), clock);
+ NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, Zone.defaultZone(), clock, (x,y) -> {});
List<Node> nodes = new ArrayList<>(2);
nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
new file mode 100644
index 00000000000..7a24e4eaeaf
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
@@ -0,0 +1,154 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeFlavors;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.lang.MutableInteger;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.node.Allocation;
+import com.yahoo.vespa.hosted.provision.node.Generation;
+import com.yahoo.vespa.hosted.provision.node.History;
+import com.yahoo.vespa.hosted.provision.node.Status;
+import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
+
+import javax.swing.JFrame;
+import java.time.Clock;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+
+
+/**
+ * Graphically run allocation procedure to ease manual comprehension.
+ *
+ * Extremely useful when trying to understand test cases and build corner cases.
+ */
+public class AllocationSimulator {
+
+ private AllocationVisualizer visualizer;
+ private NodeList nodes = new NodeList(new ArrayList<>());
+ private NodeFlavors flavors;
+
+ private AllocationSimulator(AllocationVisualizer visualizer) {
+ this.visualizer = visualizer;
+
+ //
+ // Setup flavors
+ //
+ FlavorConfigBuilder b = new FlavorConfigBuilder();
+ b.addFlavor("host-large", 8., 8., 8, Flavor.Type.BARE_METAL).idealHeadroom(1);
+ b.addFlavor("host-small", 5., 5., 5, Flavor.Type.BARE_METAL).idealHeadroom(2);
+ b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER);
+ flavors = new NodeFlavors(b.build());
+
+ //
+ // Initiate nodes in system
+ //
+ List<Node> initialNodes = new ArrayList<>();
+ initialNodes.add(host("host1", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host2", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host3", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host4", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host5", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host6", flavors.getFlavorOrThrow("host-large")));
+ initialNodes.add(host("host7", flavors.getFlavorOrThrow("host-small")));
+ initialNodes.add(host("host8", flavors.getFlavorOrThrow("host-small")));
+ initialNodes.add(host("host9", flavors.getFlavorOrThrow("host-small")));
+ initialNodes.add(host("host10", flavors.getFlavorOrThrow("host-small")));
+ initialNodes.add(node("node1", flavors.getFlavorOrThrow("d-2"), Optional.of("host1"), Optional.of("test")));
+ nodes = new NodeList(initialNodes);
+
+ visualizer.addStep(nodes.asList(), "Initial state", "");
+ }
+
+ /* ------------ Create node and flavor methods ----------------*/
+
+ private Node host(String hostname, Flavor flavor) {
+ return node(hostname, flavor, Optional.empty(), Optional.empty());
+ }
+
+ private Node node(String hostname, Flavor flavor, Optional<String> parent, Optional<String> tenant) {
+ return new Node("fake", Collections.singleton("127.0.0.1"),
+ parent.isPresent() ? Collections.emptySet() : getAdditionalIP(), hostname, parent, flavor, Status.initial(),
+ parent.isPresent() ? Node.State.ready : Node.State.active, allocation(tenant), History.empty(), parent.isPresent() ? NodeType.tenant : NodeType.host);
+ }
+
+ private Set<String> getAdditionalIP() {
+ Set<String> h = new HashSet<String>();
+ Collections.addAll(h, "::1", "::2", "::3", "::4", "::5", "::6", "::7", "::8");
+ return h;
+ }
+
+ private Optional<Allocation> allocation(Optional<String> tenant) {
+ if (tenant.isPresent()) {
+ Allocation allocation = new Allocation(app(tenant.get()), ClusterMembership.from("container/id1/3", new Version()), Generation.inital(), false);
+ return Optional.of(allocation);
+ }
+ return Optional.empty();
+ }
+
+ private ApplicationId app(String tenant) {
+ return new ApplicationId.Builder()
+ .tenant(tenant)
+ .applicationName("test")
+ .instanceName("default").build();
+ }
+
+ private ClusterSpec cluster() {
+ return ClusterSpec.from(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), ClusterSpec.Group.from(1), Version.fromString("6.41"));
+ }
+
+ /* ------------ Methods to add events to the system ----------------*/
+
+ public void addCluster(String task, int count, Flavor flavor, String id) {
+ NodeSpec.CountNodeSpec nodeSpec = new NodeSpec.CountNodeSpec(count, flavor);
+ NodeAllocation allocation = new NodeAllocation(app(id), cluster(), nodeSpec, new MutableInteger(0), Clock.systemUTC());
+
+ List<Node> accepted = DockerAllocator.allocateNewDockerNodes(allocation,
+ nodeSpec,
+ new ArrayList<>(nodes.asList()),
+ new ArrayList<>(nodes.asList()),
+ flavors,
+ flavor,
+ 2,
+ (nodes, message)-> visualizer.addStep(nodes, id, message));
+
+ accepted.addAll(nodes.asList());
+ nodes = new NodeList(accepted);
+ }
+
+
+ public static void main(String[] args) {
+
+ AllocationVisualizer visualisator = new AllocationVisualizer();
+
+ javax.swing.SwingUtilities.invokeLater(() -> {
+ JFrame frame = new JFrame("Allocation Simulator");
+ frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+ frame.setContentPane(visualisator);
+ frame.pack();
+ frame.setVisible(true);
+ });
+
+ AllocationSimulator simulator = new AllocationSimulator(visualisator);
+ simulator.addCluster("App1 : 3 * d-1 nodes", 3, simulator.flavors.getFlavorOrThrow("d-1"), "App1");
+ simulator.addCluster("App2 : 2 * d-2 nodes", 2, simulator.flavors.getFlavorOrThrow("d-2"), "App2");
+ simulator.addCluster("App3 : 3 * d-2 nodes", 3, simulator.flavors.getFlavorOrThrow("d-2"), "App3");
+ simulator.addCluster("App4 : 3 * d-3 nodes", 3, simulator.flavors.getFlavorOrThrow("d-3"), "App4");
+ simulator.addCluster("App5 : 3 * d-3 nodes", 3, simulator.flavors.getFlavorOrThrow("d-3"), "App5");
+ simulator.addCluster("App6 : 5 * d-2 nodes", 5, simulator.flavors.getFlavorOrThrow("d-2"), "App6");
+ }
+} \ No newline at end of file
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSnapshot.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSnapshot.java
new file mode 100644
index 00000000000..80e9670546d
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSnapshot.java
@@ -0,0 +1,18 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.vespa.hosted.provision.NodeList;
+
+/**
+ * @author smorgrav
+ */
+public class AllocationSnapshot {
+ NodeList nodes;
+ String message;
+ String task;
+
+ AllocationSnapshot(NodeList nodes, String task, String message) {
+ this.nodes = nodes;
+ this.message = message;
+ this.task = task;
+ }
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationVisualizer.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationVisualizer.java
new file mode 100644
index 00000000000..ba9a7f2924f
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationVisualizer.java
@@ -0,0 +1,143 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+
+import javax.swing.JButton;
+import javax.swing.JFrame;
+import javax.swing.JPanel;
+import java.awt.Color;
+import java.awt.Dimension;
+import java.awt.Font;
+import java.awt.Graphics;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * @author smorgrav
+ */
+public class AllocationVisualizer extends JPanel {
+ // Container box's width and height
+ private static final int BOX_WIDTH = 1024;
+ private static final int BOX_HEIGHT = 480;
+
+ // node properties
+ private int nodeWidth = BOX_WIDTH / 15;
+ private int nodeHeight = nodeWidth / 2;
+ private int nodeSpacing = nodeWidth / 3;
+
+ private final List<AllocationSnapshot> steps;
+ int step = 0;
+
+ public AllocationVisualizer() {
+ this(new ArrayList<>());
+ }
+
+ public AllocationVisualizer(List<AllocationSnapshot> steps) {
+ this.steps = steps;
+ this.setPreferredSize(new Dimension(BOX_WIDTH, BOX_HEIGHT));
+
+ JButton back = new JButton("Back");
+ back.addActionListener(e -> {
+ if (step > 0) step -= 1;
+ repaint();
+ });
+ JButton forward = new JButton("Forward");
+ forward.addActionListener(e -> {
+ if (step < steps.size() - 1) step += 1;
+ repaint();
+ });
+ this.add(back);
+ this.add(forward);
+ }
+
+
+ public void addStep(List<Node> nodes, String task, String message) {
+ steps.add(new AllocationSnapshot(new NodeList(nodes), task, message));
+ }
+
+ @Override
+ public void paintComponent(Graphics g) {
+ super.paintComponent(g);
+ System.out.println("PAINTING");
+ if (steps.size() == 0) return;
+
+ int nodeX = 40;
+ int nodeY = BOX_HEIGHT - 20; //Start at the bottom
+
+ // Draw the box
+ g.setColor(Color.WHITE);
+ g.fillRect(0, 0, BOX_WIDTH, BOX_HEIGHT);
+
+ // Find number of docker hosts (to calculate start, and width of each)
+ // Draw the docker hosts - and color each container according to application
+ AllocationSnapshot simStep = steps.get(step);
+ NodeList hosts = simStep.nodes.nodeType(NodeType.host);
+ for (Node host : hosts.asList()) {
+
+ // Paint the host
+ paintNode(host, g, nodeX, nodeY, true);
+
+ // Paint containers
+ NodeList containers = simStep.nodes.childNodes(host);
+ for (Node container : containers.asList()) {
+ nodeY = paintNode(container, g, nodeX, nodeY, false);
+ }
+
+ // Next host
+ nodeX += nodeWidth + nodeSpacing;
+ nodeY = BOX_HEIGHT - 20;
+ }
+
+ // Display messages
+ g.setColor(Color.BLACK);
+ g.setFont(new Font("Courier New", Font.BOLD, 15));
+ g.drawString(simStep.task, 20, 30);
+ g.drawString(simStep.message, 20, 50);
+ }
+
+ private int paintNode(Node node, Graphics g, int x, int y, boolean isHost) {
+
+ if (isHost) {
+ g.setColor(Color.GRAY);
+ for (int i = 0; i < node.flavor().getMinMainMemoryAvailableGb(); i++) {
+ g.fillRect(x, y - nodeHeight, nodeWidth, nodeHeight);
+ y = y - (nodeHeight + 2);
+ }
+ } else {
+ g.setColor(Color.YELLOW);
+ int multi = (int) node.flavor().getMinMainMemoryAvailableGb();
+ int height = multi * nodeHeight + ((multi - 1) * 2);
+ g.fillRect(x, y - height, nodeWidth, height);
+
+ // Write tenant name in allocation
+ String tenantName = node.allocation().get().owner().tenant().value();
+ g.setColor(Color.BLACK);
+ g.setFont(new Font("Courier New", Font.PLAIN, 12));
+ g.drawString(tenantName, x + nodeWidth / 2 - 20, y - height / 2);
+
+ y = y - height - 2;
+ }
+ return y;
+ }
+
+ public static void visualize(List<AllocationSnapshot> snaps) {
+ AllocationVisualizer visualisator = new AllocationVisualizer(snaps);
+ javax.swing.SwingUtilities.invokeLater(() -> {
+ JFrame frame = new JFrame("Allocation Simulator");
+ frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+ frame.setContentPane(visualisator);
+ frame.pack();
+ frame.setVisible(true);
+ });
+
+ while(true) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
index 2b843929b6e..464dd83211a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
@@ -29,8 +29,6 @@ import static org.junit.Assert.assertFalse;
*/
public class DockerHostCapacityTest {
- private static final String HEADROOM_TENANT = "-__!@#$$%THISisHEADroom";
-
private DockerHostCapacity capacity;
private List<Node> nodes;
private Node host1, host2, host3;
@@ -74,24 +72,17 @@ public class DockerHostCapacityTest {
assertEquals(host1, nodes.get(1));
assertEquals(host2, nodes.get(2));
- // Add a headroom node to host1 and the host2 should be prioritized
- Allocation allocation = new Allocation(app(HEADROOM_TENANT), ClusterMembership.from("container/id1/3", new Version()), Generation.inital(), false);
- Node nodeF = Node.create("nodeF", Collections.singleton("::6"), Collections.emptySet(), "nodeF", Optional.of("host1"), flavorDocker, NodeType.tenant);
- nodeF.with(allocation);
- nodes.add(nodeF);
+ // Replace a node for host 2 with a headroom node - host2 should then be prioritized
+ Allocation allocation = new Allocation(app(DockerHostCapacity.HEADROOM_TENANT), ClusterMembership.from("container/id1/3", new Version()), Generation.inital(), false);
+ Node nodeF = Node.create("nodeF", Collections.singleton("::6"), Collections.emptySet(), "nodeF", Optional.of("host2"), flavorDocker, NodeType.tenant);
+ Node nodeFWithAllocation = nodeF.with(allocation);
+ nodes.add(nodeFWithAllocation);
+ nodes.remove(nodeC);
capacity = new DockerHostCapacity(nodes);
Collections.sort(nodes, capacity::compare);
assertEquals(host3, nodes.get(0));
assertEquals(host2, nodes.get(1));
assertEquals(host1, nodes.get(2));
-
- // Remove a node from host1 and it should be prioritized again
- nodes.remove(nodeA);
- capacity = new DockerHostCapacity(nodes);
- Collections.sort(nodes, capacity::compare);
- assertEquals(host3, nodes.get(0));
- assertEquals(host1, nodes.get(1));
- assertEquals(host2, nodes.get(2));
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index 89119441a93..24e9736906d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -14,7 +14,6 @@ import org.junit.Test;
import java.util.HashSet;
import java.util.List;
-import java.util.Optional;
import static org.junit.Assert.assertEquals;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
new file mode 100644
index 00000000000..4dff3cff60c
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
@@ -0,0 +1,139 @@
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.google.common.collect.ImmutableSet;
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.OutOfCapacityException;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.config.provisioning.FlavorsConfig;
+import com.yahoo.path.Path;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * @author mortent
+ */
+public class DynamicDockerProvisioningTest {
+
+ @Test
+ public void spare_capacity_used_only_when_replacement() {
+ // Use spare capacity only when replacement (i.e one node is failed)
+ // Test should allocate as much capacity as possible, verify that it is not possible to allocate one more unit
+ // Verify that there is still capacity (available spare)
+ // Fail one node and redeploy, Verify that one less node is empty.
+
+ // Setup test
+ ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
+ enableDynamicAllocation(tester);
+ ApplicationId application1 = tester.makeApplicationId();
+ tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
+ deployZoneApp(tester);
+ Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3");
+
+ // Deploy initial state (can max deploy 3 nodes due to redundancy requirements)
+ List<HostSpec> hosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100")),
+ 3, 1, flavor.canonicalName());
+ tester.activate(application1, ImmutableSet.copyOf(hosts));
+
+ DockerHostCapacity capacity = new DockerHostCapacity(tester.nodeRepository().getNodes(Node.State.values()));
+ assertThat(capacity.freeCapacityInFlavorEquivalence(flavor), greaterThan(0));
+
+ List<Node> initialSpareCapacity = findSpareCapacity(tester);
+ assertThat(initialSpareCapacity.size(), is(2));
+
+ try {
+ hosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100")),
+ 4, 1, flavor.canonicalName());
+ fail("Was able to deploy with 4 nodes, should not be able to use spare capacity");
+ } catch (OutOfCapacityException e) {
+ }
+
+ tester.fail(hosts.get(0));
+ hosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100")),
+ 3, 1, flavor.canonicalName());
+ tester.activate(application1, ImmutableSet.copyOf(hosts));
+
+ List<Node> finalSpareCapacity = findSpareCapacity(tester);
+
+ assertThat(finalSpareCapacity.size(), is(1));
+
+ // Uncomment the statement below to walk through the allocation events visually
+ //AllocationVisualizer.visualize(tester.getAllocationSnapshots());
+ }
+
+ @Test
+ public void non_prod_do_not_have_spares() {
+ ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig());
+ enableDynamicAllocation(tester);
+ tester.makeReadyNodes(3, "host-small", NodeType.host, 32);
+ deployZoneApp(tester);
+ Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-3");
+
+ ApplicationId application1 = tester.makeApplicationId();
+ List<HostSpec> hosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100")),
+ 3, 1, flavor.canonicalName());
+ tester.activate(application1, ImmutableSet.copyOf(hosts));
+
+ List<Node> initialSpareCapacity = findSpareCapacity(tester);
+ assertThat(initialSpareCapacity.size(), is(0));
+ }
+
+ private List<Node> findSpareCapacity(ProvisioningTester tester) {
+ List<Node> nodes = tester.nodeRepository().getNodes(Node.State.values());
+ NodeList nl = new NodeList(nodes);
+ return nodes.stream()
+ .filter(n -> n.type() == NodeType.host)
+ .filter(n -> nl.childNodes(n).size() == 0) // Nodes without children
+ .collect(Collectors.toList());
+ }
+
+ private FlavorsConfig flavorsConfig() {
+ FlavorConfigBuilder b = new FlavorConfigBuilder();
+ b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL);
+ b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL);
+ b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER);
+ b.addFlavor("d-3-cpu", 5, 3., 3, Flavor.Type.DOCKER_CONTAINER);
+ return b.build();
+ }
+
+ private List<HostSpec> deployZoneApp(ProvisioningTester tester) {
+ ApplicationId applicationId = tester.makeApplicationId();
+ List<HostSpec> list = tester.prepare(applicationId,
+ ClusterSpec.request(ClusterSpec.Type.container,
+ ClusterSpec.Id.from("node-admin"),
+ Version.fromString("6.42")),
+ Capacity.fromRequiredNodeType(NodeType.host),
+ 1);
+ tester.activate(applicationId, ImmutableSet.copyOf(list));
+ return list;
+ }
+
+ private void enableDynamicAllocation(ProvisioningTester tester) {
+ tester.getCurator().set(Path.fromString("/provision/v1/dynamicDockerAllocation"), new byte[0]);
+ }
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index bbf8967c254..5af13833a7a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -5,16 +5,18 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.test.ManualClock;
import com.yahoo.transaction.NestedTransaction;
-import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.curator.transaction.CuratorTransaction;
@@ -23,8 +25,6 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
@@ -34,6 +34,7 @@ import java.io.IOException;
import java.time.temporal.TemporalAmount;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -42,6 +43,7 @@ import java.util.Set;
import java.util.UUID;
import java.util.logging.Level;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -60,6 +62,7 @@ public class ProvisioningTester implements AutoCloseable {
private final NodeRepositoryProvisioner provisioner;
private final CapacityPolicies capacityPolicies;
private final ProvisionLogger provisionLogger;
+ private final List<AllocationSnapshot> allocationSnapshots = new ArrayList<>();
public ProvisioningTester(Zone zone) {
this(zone, createConfig());
@@ -75,7 +78,8 @@ public class ProvisioningTester implements AutoCloseable {
this.clock = new ManualClock();
this.curator = curator;
this.nodeRepository = new NodeRepository(nodeFlavors, curator, clock, zone, nameResolver);
- this.provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, clock);
+ this.provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, clock,
+ (x,y) -> allocationSnapshots.add(new AllocationSnapshot(new NodeList(x), "Provision tester", y)));
this.capacityPolicies = new CapacityPolicies(zone, nodeFlavors);
this.provisionLogger = new NullProvisionLogger();
}
@@ -111,6 +115,10 @@ public class ProvisioningTester implements AutoCloseable {
//testingServer.close();
}
+ public List<AllocationSnapshot> getAllocationSnapshots() {
+ return allocationSnapshots;
+ }
+
public void advanceTime(TemporalAmount duration) { clock.advance(duration); }
public NodeRepository nodeRepository() { return nodeRepository; }
public ManualClock clock() { return clock; }
@@ -213,13 +221,24 @@ public class ProvisioningTester implements AutoCloseable {
}
public List<Node> makeReadyNodes(int n, String flavor, NodeType type) {
+ return makeReadyNodes(n, flavor, type, 0);
+ }
+
+ public List<Node> makeReadyNodes(int n, String flavor, NodeType type, int additionalIps) {
List<Node> nodes = new ArrayList<>(n);
- for (int i = 0; i < n; i++)
+ for (int i = 0; i < n; i++) {
+ Set<String> ips = IntStream.range(additionalIps * i, additionalIps * (i+1))
+ .mapToObj(j -> String.format("127.0.0.%d", j))
+ .collect(Collectors.toSet());
+
nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(),
- UUID.randomUUID().toString(),
- Optional.empty(),
- nodeFlavors.getFlavorOrThrow(flavor),
- type));
+ UUID.randomUUID().toString(),
+ Collections.emptySet(),
+ ips,
+ Optional.empty(),
+ nodeFlavors.getFlavorOrThrow(flavor),
+ type));
+ }
nodes = nodeRepository.addNodes(nodes);
nodes = nodeRepository.setDirty(nodes);
return nodeRepository.setReady(nodes);