summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authortoby <smorgrav@yahoo-inc.com>2017-07-28 12:36:13 +0200
committertoby <smorgrav@yahoo-inc.com>2017-08-14 11:27:09 +0200
commit1d6a71127d2c5cdc939e455a6c4ace7420507033 (patch)
treeb5b53b5adb0915e6c66058d34d5234f0ebd1c19b /node-repository
parent038f412be82f91594119ec0f9440eb4213a7940a (diff)
More unit testing
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java37
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePriority.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java17
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java71
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java22
5 files changed, 113 insertions, 44 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 64093d69907..efa46dedce5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -22,10 +22,10 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
- * Builds up a priority queue of which nodes should be offered to the allocation.
- * <p>
- * Builds up a list of NodePriority objects and sorts them according to the
- * NodePriority::compare method.
+ * Builds up data structures necessary for node prioritization. It wraps each node
+ * up in a NodePriority object with attributes used in sorting.
+ *
+ * The actual sorting/prioritization is implemented in the NodePriority class as a compare method.
*
* @author smorgrav
*/
@@ -85,6 +85,12 @@ public class NodePrioritizer {
return null;
}
+ /**
+ * Spare hosts are the two hosts in the system with the most free capacity.
+ *
+ * We do not count retired or inactive nodes as used capacity (as they could have been
+ * moved to create space for the spare node in the first place).
+ */
private static Set<Node> findSpareHosts(List<Node> nodes, int spares) {
DockerHostCapacity capacity = new DockerHostCapacity(new ArrayList<>(nodes));
return nodes.stream()
@@ -96,6 +102,12 @@ public class NodePrioritizer {
.collect(Collectors.toSet());
}
+ /**
+ * Headroom are the nodes with the least but sufficient space for the requested headroom.
+ *
+ * If not enough headroom - the headroom violating hosts are the once that are closest to fulfull
+ * a headroom request.
+ */
private static Map<Node, Boolean> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
DockerHostCapacity capacity = new DockerHostCapacity(nodes);
Map<Node, Boolean> headroomNodesToViolation = new HashMap<>();
@@ -132,7 +144,6 @@ public class NodePrioritizer {
.limit(flavor.getIdealHeadroom() - tempHeadroom.size())
.collect(Collectors.toList());
- // TODO should we be selective on which application on the node that violates the headroom?
for (Node nodeViolatingHeadrom : violations) {
headroomNodesToViolation.put(nodeViolatingHeadrom, true);
}
@@ -143,12 +154,19 @@ public class NodePrioritizer {
return headroomNodesToViolation;
}
+ /**
+ * @return The list of nodes sorted by NodePriority::compare
+ */
List<NodePriority> prioritize() {
List<NodePriority> priorityList = new ArrayList<>(nodes.values());
Collections.sort(priorityList, (a, b) -> NodePriority.compare(a, b));
return priorityList;
}
+ /**
+ * Add nodes that have been previously reserved to the same application from
+ * an earlier downsizing of a cluster
+ */
void addSurplusNodes(List<Node> surplusNodes) {
for (Node node : surplusNodes) {
NodePriority nodePri = toNodePriority(node, true, false);
@@ -158,6 +176,9 @@ public class NodePrioritizer {
}
}
+ /**
+ * Add a node on each docker host with enough capacity for the requested flavor
+ */
void addNewDockerNodes() {
if (!isDocker) return;
DockerHostCapacity capacity = new DockerHostCapacity(allNodes);
@@ -192,6 +213,9 @@ public class NodePrioritizer {
}
}
+ /**
+ * Add existing nodes allocated to the application
+ */
void addApplicationNodes() {
List<Node.State> legalStates = Arrays.asList(Node.State.active, Node.State.inactive, Node.State.reserved);
allNodes.stream()
@@ -203,6 +227,9 @@ public class NodePrioritizer {
.forEach(nodePriority -> nodes.put(nodePriority.node, nodePriority));
}
+ /**
+ * Add nodes already provisioned, but not allocatied to any application
+ */
void addReadyNodes() {
allNodes.stream()
.filter(node -> node.type().equals(requestedNodes.type()))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePriority.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePriority.java
index 27a9f8f08b6..c9afa19c161 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePriority.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePriority.java
@@ -5,11 +5,11 @@ import com.yahoo.vespa.hosted.provision.Node;
import java.util.Optional;
/**
- * Encapsulates all the information nessesary to prioritize node for allocation.
+ * Encapsulates all the information necessary to prioritize node for allocation.
*
* @author smorgrav
*/
-public class NodePriority {
+class NodePriority {
Node node;
@@ -19,10 +19,10 @@ public class NodePriority {
/** The parent host (docker or hypervisor) */
Optional<Node> parent = Optional.empty();
- /** True if the node is allocated to a host that should be spare (without allocations */
+ /** True if the node is allocated to a host that should be dedicated as a spare */
boolean violatesSpares;
- /** True if the node is allocated on slots that should be headroom */
+ /** True if the node is allocated on slots that should be dedicated to headroom */
boolean violatesHeadroom;
/** True if this is a node that has been retired earlier in the allocation process */
@@ -37,8 +37,6 @@ public class NodePriority {
/**
* Compare two node priorities.
*
- * Sorting order accending - thus negative if higher priority
- *
* @return negative if first priority is higher than second node
*/
static int compare(NodePriority n1, NodePriority n2) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
index eae846abb2a..55e1ff8de9f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacityTest.java
@@ -1,15 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
-import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.node.Allocation;
-import com.yahoo.vespa.hosted.provision.node.Generation;
import org.junit.Before;
import org.junit.Test;
@@ -67,22 +63,11 @@ public class DockerHostCapacityTest {
@Test
public void compare_used_to_sort_in_decending_order() {
assertEquals(host1, nodes.get(0)); //Make sure it is unsorted here
+
Collections.sort(nodes, capacity::compare);
assertEquals(host3, nodes.get(0));
assertEquals(host1, nodes.get(1));
assertEquals(host2, nodes.get(2));
-
- // Replace a node for host 2 with a headroom node - host2 should then be prioritized
- Allocation allocation = new Allocation(app(DockerHostCapacity.HEADROOM_TENANT), ClusterMembership.from("container/id1/3", new Version()), Generation.inital(), false);
- Node nodeF = Node.create("nodeF", Collections.singleton("::6"), Collections.emptySet(), "nodeF", Optional.of("host2"), flavorDocker, NodeType.tenant);
- Node nodeFWithAllocation = nodeF.with(allocation);
- nodes.add(nodeFWithAllocation);
- nodes.remove(nodeC);
- capacity = new DockerHostCapacity(nodes);
- Collections.sort(nodes, capacity::compare);
- assertEquals(host3, nodes.get(0));
- assertEquals(host2, nodes.get(1));
- assertEquals(host1, nodes.get(2));
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
index 290e8436b1a..8254cd23030 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
@@ -20,19 +20,23 @@ import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.node.Agent;
import org.junit.Assert;
import org.junit.Test;
import java.time.Instant;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.greaterThan;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
@@ -78,7 +82,7 @@ public class DynamicDockerProvisioningTest {
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy one of the applications
- redeply(application1, clusterSpec1, flavor, tester);
+ deployapp(application1, clusterSpec1, flavor, tester, 2);
// Assert that the nodes are spread across all hosts (to allow headroom)
Set<String> hostsWithChildren = new HashSet<>();
@@ -127,8 +131,8 @@ public class DynamicDockerProvisioningTest {
addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
- redeply(application1, clusterSpec1, flavor, tester);
- redeply(application2, clusterSpec2, flavor, tester);
+ deployapp(application1, clusterSpec1, flavor, tester, 2);
+ deployapp(application2, clusterSpec2, flavor, tester, 2);
// Assert that we have two spare nodes (two hosts that are don't have allocations)
Set<String> hostsWithChildren = new HashSet<>();
@@ -141,6 +145,61 @@ public class DynamicDockerProvisioningTest {
}
/**
+ * Test an allocation workflow:
+ *
+ * 5 Hosts of capacity 3 (2 spares)
+ * - Allocate app with 3 nodes
+ * - Allocate app with 2 nodes
+ * - Fail host and check redistribution
+ */
+ @Test
+ public void reloacte_failed_nodes() {
+ ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
+ enableDynamicAllocation(tester);
+ tester.makeReadyNodes(5, "host", "host-small", NodeType.host, 32);
+ deployZoneApp(tester);
+ List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
+ Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
+
+ // Application 1
+ ApplicationId application1 = makeApplicationId("t1", "a1");
+ ClusterSpec clusterSpec1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"));
+ deployapp(application1, clusterSpec1, flavor, tester, 3);
+
+ // Application 2
+ ApplicationId application2 = makeApplicationId("t2", "a2");
+ ClusterSpec clusterSpec2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"));
+ deployapp(application2, clusterSpec2, flavor, tester, 2);
+
+ // Application 3
+ ApplicationId application3 = makeApplicationId("t3", "a3");
+ ClusterSpec clusterSpec3 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Version.fromString("6.100"));
+ deployapp(application3, clusterSpec3, flavor, tester, 2);
+
+ // App 2 and 3 should have been allocated to the same nodes - fail on of the parent hosts from there
+ String parent = tester.nodeRepository().getNodes(application2).stream().findAny().get().parentHostname().get();
+ tester.nodeRepository().failRecursively(parent, Agent.system, "Testing");
+
+ // Redeploy all applications
+ deployapp(application1, clusterSpec1, flavor, tester, 3);
+ deployapp(application2, clusterSpec2, flavor, tester, 2);
+ deployapp(application3, clusterSpec3, flavor, tester, 2);
+
+ Map<Integer, Integer> numberOfChildrenStat = new HashMap<>();
+ for (Node node : dockerHosts) {
+ int nofChildren = tester.nodeRepository().getChildNodes(node.hostname()).size();
+ if (!numberOfChildrenStat.containsKey(nofChildren)) {
+ numberOfChildrenStat.put(nofChildren, 0);
+ }
+ numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1);
+ }
+
+ assertEquals(3l, (long)numberOfChildrenStat.get(3));
+ assertEquals(1l, (long)numberOfChildrenStat.get(0));
+ assertEquals(1l, (long)numberOfChildrenStat.get(1));
+ }
+
+ /**
* Test redeployment of nodes that violates spare headroom - but without alternatives
*
* Setup 2 docker hosts and allocate one app with a container on each
@@ -169,7 +228,7 @@ public class DynamicDockerProvisioningTest {
addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
- redeply(application1, clusterSpec1, flavor, tester);
+ deployapp(application1, clusterSpec1, flavor, tester, 2);
// Assert that we have two spare nodes (two hosts that are don't have allocations)
Set<String> hostsWithChildren = new HashSet<>();
@@ -266,8 +325,8 @@ public class DynamicDockerProvisioningTest {
return ApplicationId.from(tenant, appName, "default");
}
- private void redeply(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester) {
- List<HostSpec> hostSpec = tester.prepare(id, spec, 2,1, flavor.canonicalName());
+ private void deployapp(ApplicationId id, ClusterSpec spec, Flavor flavor, ProvisioningTester tester, int nodecount) {
+ List<HostSpec> hostSpec = tester.prepare(id, spec, nodecount,1, flavor.canonicalName());
tester.activate(id, new HashSet<>(hostSpec));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
index 2a820308874..8a74f3aff24 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
@@ -312,33 +312,33 @@ public class RestApiTest {
assertResponse(new Request("http://localhost:8080/nodes/v2/state/ready/" + hostname,
new byte[0], Request.Method.PUT),
"{\"message\":\"Moved foo.yahoo.com to ready\"}");
- Pattern responsePattern = Pattern.compile("\\{\"trustedNodes\":\\[" +
+ Pattern responsePattern = Pattern.compile("\\{\"trustedNodes\":\\[.*" +
"\\{\"hostname\":\"cfg1\",\"ipAddress\":\".+?\",\"trustedBy\":\"foo.yahoo.com\"}," +
"\\{\"hostname\":\"cfg2\",\"ipAddress\":\".+?\",\"trustedBy\":\"foo.yahoo.com\"}," +
"\\{\"hostname\":\"cfg3\",\"ipAddress\":\".+?\",\"trustedBy\":\"foo.yahoo.com\"}" +
- "],\"trustedNetworks\":\\[\\]}");
+ ".*],\"trustedNetworks\":\\[\\]}");
assertResponseMatches(new Request("http://localhost:8080/nodes/v2/acl/" + hostname), responsePattern);
}
@Test
public void acl_request_by_config_server() throws Exception {
- Pattern responsePattern = Pattern.compile("\\{\"trustedNodes\":\\[" +
+ Pattern responsePattern = Pattern.compile("\\{\"trustedNodes\":\\[.*" +
"\\{\"hostname\":\"cfg1\",\"ipAddress\":\".+?\",\"trustedBy\":\"cfg1\"}," +
"\\{\"hostname\":\"cfg2\",\"ipAddress\":\".+?\",\"trustedBy\":\"cfg1\"}," +
"\\{\"hostname\":\"cfg3\",\"ipAddress\":\".+?\",\"trustedBy\":\"cfg1\"}" +
- "],\"trustedNetworks\":\\[\\]}");
+ ".*],\"trustedNetworks\":\\[\\]}");
assertResponseMatches(new Request("http://localhost:8080/nodes/v2/acl/cfg1"), responsePattern);
}
@Test
public void acl_request_by_docker_host() throws Exception {
Pattern responsePattern = Pattern.compile("\\{\"trustedNodes\":\\[" +
- "\\{\"hostname\":\"cfg1\",\"ipAddress\":\".+?\",\"trustedBy\":\"parent1.yahoo.com\"}," +
- "\\{\"hostname\":\"cfg2\",\"ipAddress\":\".+?\",\"trustedBy\":\"parent1.yahoo.com\"}," +
- "\\{\"hostname\":\"cfg3\",\"ipAddress\":\".+?\",\"trustedBy\":\"parent1.yahoo.com\"}]," +
+ "\\{\"hostname\":\"cfg1\",\"ipAddress\":\".+?\",\"trustedBy\":\"dockerhost1.yahoo.com\"}," +
+ "\\{\"hostname\":\"cfg2\",\"ipAddress\":\".+?\",\"trustedBy\":\"dockerhost1.yahoo.com\"}," +
+ "\\{\"hostname\":\"cfg3\",\"ipAddress\":\".+?\",\"trustedBy\":\"dockerhost1.yahoo.com\"}]," +
"\"trustedNetworks\":\\[" +
- "\\{\"network\":\"172.17.0.0/16\",\"trustedBy\":\"parent1.yahoo.com\"}]}");
- assertResponseMatches(new Request("http://localhost:8080/nodes/v2/acl/parent1.yahoo.com"), responsePattern);
+ "\\{\"network\":\"172.17.0.0/16\",\"trustedBy\":\"dockerhost1.yahoo.com\"}]}");
+ assertResponseMatches(new Request("http://localhost:8080/nodes/v2/acl/dockerhost1.yahoo.com"), responsePattern);
}
@Test
@@ -349,8 +349,8 @@ public class RestApiTest {
"\\{\"hostname\":\"cfg3\",\"ipAddress\":\".+?\",\"trustedBy\":\"host1.yahoo.com\"}," +
"\\{\"hostname\":\"host1.yahoo.com\",\"ipAddress\":\"::1\",\"trustedBy\":\"host1.yahoo.com\"}," +
"\\{\"hostname\":\"host1.yahoo.com\",\"ipAddress\":\"127.0.0.1\",\"trustedBy\":\"host1.yahoo.com\"}," +
- "\\{\"hostname\":\"host2.yahoo.com\",\"ipAddress\":\"::1\",\"trustedBy\":\"host1.yahoo.com\"}," +
- "\\{\"hostname\":\"host2.yahoo.com\",\"ipAddress\":\"127.0.0.1\",\"trustedBy\":\"host1.yahoo.com\"}" +
+ "\\{\"hostname\":\"host10.yahoo.com\",\"ipAddress\":\"::1\",\"trustedBy\":\"host1.yahoo.com\"}," +
+ "\\{\"hostname\":\"host10.yahoo.com\",\"ipAddress\":\"127.0.0.1\",\"trustedBy\":\"host1.yahoo.com\"}" +
"],\"trustedNetworks\":\\[\\]}");
assertResponseMatches(new Request("http://localhost:8080/nodes/v2/acl/host1.yahoo.com"), responsePattern);
}