aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarald Musum <musum@oath.com>2018-09-15 09:04:01 +0200
committerGitHub <noreply@github.com>2018-09-15 09:04:01 +0200
commit71a9b65e8c7f0f24a96a6779432806d10a3d8984 (patch)
tree96761d501a40d43738275e4f920168e5ccc1cea2
parent68a41c0ddd5af11d16e6bcbc7b6930e4187a3892 (diff)
parentfc36939b93ab83e8f1082019f8d80f6c9e671b17 (diff)
Merge pull request #6748 from vespa-engine/hmusum/remove-ideal-headroom-for-docker-hosts
Remove use of ideal headroom when allocating, has not been used for a…
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java2
-rw-r--r--config-provisioning/src/main/resources/configdefinitions/flavors.def1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java72
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/MetricsReporterTest.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java189
10 files changed, 11 insertions, 274 deletions
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java
index b0c04cea1b0..78507779585 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java
@@ -26,7 +26,7 @@ public class Flavor {
private final String description;
private final boolean retired;
private List<Flavor> replacesFlavors;
- private int idealHeadroom;
+ private int idealHeadroom; // Note: Not used after Vespa 6.282
/**
* Creates a Flavor, but does not set the replacesFlavors.
diff --git a/config-provisioning/src/main/resources/configdefinitions/flavors.def b/config-provisioning/src/main/resources/configdefinitions/flavors.def
index 57affc2f104..63b22958487 100644
--- a/config-provisioning/src/main/resources/configdefinitions/flavors.def
+++ b/config-provisioning/src/main/resources/configdefinitions/flavors.def
@@ -47,4 +47,5 @@ flavor[].description string default=""
flavor[].retired bool default=false
# The free capacity we would like to preserve for this flavor
+# Note: Not used after Vespa 6.282
flavor[].idealHeadroom int default=0
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index e387fb2d0ed..39a2787ca9b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -234,7 +234,6 @@ public class MetricsReporter extends Maintainer {
for (Flavor flavor : dockerFlavors) {
Metric.Context context = getContextAt("flavor", flavor.name());
metric.set("hostedVespa.docker.freeCapacityFlavor", capacity.freeCapacityInFlavorEquivalence(flavor), context);
- metric.set("hostedVespa.docker.idealHeadroomFlavor", flavor.getIdealHeadroom(), context);
metric.set("hostedVespa.docker.hostsAvailableFlavor", capacity.getNofHostsAvailableFor(flavor), context);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
index b52506c268c..cff62508ec6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
@@ -19,7 +19,7 @@ public class FlavorConfigBuilder {
return new FlavorsConfig(builder);
}
- public FlavorsConfig.Flavor.Builder addFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type, int headRoom) {
+ public FlavorsConfig.Flavor.Builder addFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) {
FlavorsConfig.Flavor.Builder flavor = new FlavorsConfig.Flavor.Builder();
flavor.name(flavorName);
flavor.description("Flavor-name-is-" + flavorName);
@@ -27,15 +27,10 @@ public class FlavorConfigBuilder {
flavor.minCpuCores(cpu);
flavor.minMainMemoryAvailableGb(mem);
flavor.environment(type.name());
- flavor.idealHeadroom(headRoom);
builder.flavor(flavor);
return flavor;
}
- public FlavorsConfig.Flavor.Builder addFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) {
- return addFlavor(flavorName, cpu, mem, disk, type, 0);
- }
-
public FlavorsConfig.Flavor.Builder addNonStockFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) {
FlavorsConfig.Flavor.Builder flavor = new FlavorsConfig.Flavor.Builder();
flavor.name(flavorName);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index dff6378a19a..eb3be9e6d80 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -52,7 +52,6 @@ public class GroupPreparer {
application,
cluster,
requestedNodes,
- nodeRepository.getAvailableFlavors(),
spareCount,
nodeRepository.nameResolver());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index b48d69447a8..a74f3b2d116 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.hosted.provision.Node;
@@ -15,7 +14,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -44,17 +42,15 @@ public class NodePrioritizer {
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final Set<Node> spareHosts;
- private final Map<Node, ResourceCapacity> headroomHosts;
NodePrioritizer(List<Node> allNodes, ApplicationId appId, ClusterSpec clusterSpec, NodeSpec nodeSpec,
- NodeFlavors nodeFlavors, int spares, NameResolver nameResolver) {
+ int spares, NameResolver nameResolver) {
this.allNodes = Collections.unmodifiableList(allNodes);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.appId = appId;
this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, spares);
- this.headroomHosts = findHeadroomHosts(allNodes, spareHosts, nodeFlavors);
this.capacity = new DockerHostCapacity(allNodes);
@@ -93,62 +89,6 @@ public class NodePrioritizer {
}
/**
- * Headroom hosts are the host with the least but sufficient capacity for the requested headroom.
- *
- * If not enough headroom - the headroom violating hosts are the once that are closest to fulfill
- * a headroom request.
- */
- private static Map<Node, ResourceCapacity> findHeadroomHosts(List<Node> nodes, Set<Node> spareNodes, NodeFlavors flavors) {
- DockerHostCapacity capacity = new DockerHostCapacity(nodes);
- Map<Node, ResourceCapacity> headroomHosts = new HashMap<>();
-
- List<Node> hostsSortedOnLeastCapacity = nodes.stream()
- .filter(n -> !spareNodes.contains(n))
- .filter(node -> node.type().equals(NodeType.host))
- .filter(dockerHost -> dockerHost.state().equals(Node.State.active))
- .filter(dockerHost -> capacity.freeIPs(dockerHost) > 0)
- .sorted((a, b) -> capacity.compareWithoutInactive(b, a))
- .collect(Collectors.toList());
-
- // For all flavors with ideal headroom - find which hosts this headroom should be allocated to
- for (Flavor flavor : flavors.getFlavors().stream().filter(f -> f.getIdealHeadroom() > 0).collect(Collectors.toList())) {
- Set<Node> tempHeadroom = new HashSet<>();
- Set<Node> notEnoughCapacity = new HashSet<>();
-
- ResourceCapacity headroomCapacity = ResourceCapacity.of(flavor);
-
- // Select hosts that has available capacity for both headroom and for new allocations
- for (Node host : hostsSortedOnLeastCapacity) {
- if (headroomHosts.containsKey(host)) continue;
- if (capacity.hasCapacityWhenRetiredAndInactiveNodesAreGone(host, headroomCapacity)) {
- headroomHosts.put(host, headroomCapacity);
- tempHeadroom.add(host);
- } else {
- notEnoughCapacity.add(host);
- }
-
- if (tempHeadroom.size() == flavor.getIdealHeadroom()) {
- break;
- }
- }
-
- // Now check if we have enough headroom - if not choose the nodes that almost has it
- if (tempHeadroom.size() < flavor.getIdealHeadroom()) {
- List<Node> violations = notEnoughCapacity.stream()
- .sorted((a, b) -> capacity.compare(b, a))
- .limit(flavor.getIdealHeadroom() - tempHeadroom.size())
- .collect(Collectors.toList());
-
- for (Node hostViolatingHeadrom : violations) {
- headroomHosts.put(hostViolatingHeadrom, headroomCapacity);
- }
- }
- }
-
- return headroomHosts;
- }
-
- /**
* @return The list of nodes sorted by PrioritizableNode::compare
*/
List<PrioritizableNode> prioritize() {
@@ -257,16 +197,6 @@ public class NodePrioritizer {
if (spareHosts.contains(parent)) {
pri.violatesSpares = true;
}
-
- if (headroomHosts.containsKey(parent) && isPreferredNodeToBeReloacted(allNodes, node, parent)) {
- ResourceCapacity neededCapacity = headroomHosts.get(parent);
-
- // If the node is new then we need to check the headroom requirement after it has been added
- if (isNewNode) {
- neededCapacity = ResourceCapacity.composite(neededCapacity, new ResourceCapacity(node));
- }
- pri.violatesHeadroom = !capacity.hasCapacity(parent, neededCapacity);
- }
}
return pri;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
index 40f76125064..d05f4e957c3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
@@ -14,7 +14,7 @@ class PrioritizableNode implements Comparable<PrioritizableNode> {
Node node;
- /** The free capacity excluding headroom, including retired allocations */
+ /** The free capacity, including retired allocations */
ResourceCapacity freeParentCapacity = new ResourceCapacity();
/** The parent host (docker or hypervisor) */
@@ -23,9 +23,6 @@ class PrioritizableNode implements Comparable<PrioritizableNode> {
/** True if the node is allocated to a host that should be dedicated as a spare */
boolean violatesSpares;
- /** True if the node is (or would be) allocated on slots that should be dedicated to headroom */
- boolean violatesHeadroom;
-
/** True if this is a node that has been retired earlier in the allocation process */
boolean isSurplusNode;
@@ -45,8 +42,6 @@ class PrioritizableNode implements Comparable<PrioritizableNode> {
// First always pick nodes without violation above nodes with violations
if (!this.violatesSpares && other.violatesSpares) return -1;
if (!other.violatesSpares && this.violatesSpares) return 1;
- if (!this.violatesHeadroom && other.violatesHeadroom) return -1;
- if (!other.violatesHeadroom && this.violatesHeadroom) return 1;
// Choose active nodes
if (this.node.state().equals(Node.State.active) && !other.node.state().equals(Node.State.active)) return -1;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/MetricsReporterTest.java
index 7157f893970..248299e7991 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/MetricsReporterTest.java
@@ -170,7 +170,6 @@ public class MetricsReporterTest {
assertEquals(4.0, metric.values.get("hostedVespa.docker.freeCapacityCpu"));
assertContext(metric, "hostedVespa.docker.freeCapacityFlavor", 1, 0);
- assertContext(metric, "hostedVespa.docker.idealHeadroomFlavor", 0, 0);
assertContext(metric, "hostedVespa.docker.hostsAvailableFlavor", 1l, 0l);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
index 62212447c2e..bf93e114c8d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationSimulator.java
@@ -44,8 +44,8 @@ public class AllocationSimulator {
// Setup flavors
//
FlavorConfigBuilder b = new FlavorConfigBuilder();
- b.addFlavor("host-large", 8., 8., 8, Flavor.Type.BARE_METAL).idealHeadroom(1);
- b.addFlavor("host-small", 5., 5., 5, Flavor.Type.BARE_METAL).idealHeadroom(2);
+ b.addFlavor("host-large", 8., 8., 8, Flavor.Type.BARE_METAL);
+ b.addFlavor("host-small", 5., 5., 5, Flavor.Type.BARE_METAL);
b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER);
b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER);
b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
index 3be56131a05..223a8bc83b0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisioningTest.java
@@ -45,57 +45,10 @@ import static org.junit.Assert.fail;
public class DynamicDockerProvisioningTest {
/**
- * Test relocation of nodes that violate headroom.
- * <p>
- * Setup 4 docker hosts and allocate one container on each (from two different applications)
- * No spares - only headroom (4xd-2)
- * <p>
- * One application is now violating headroom and need relocation
- * <p>
- * Initial allocation of app 1 and 2 --> final allocation (headroom marked as H):
- * <p>
- * | H | H | H | H | | | | | |
- * | H | H | H1a | H1b | --> | | | | |
- * | | | 2a | 2b | | 1a | 1b | 2a | 2b |
- */
- @Test
- public void relocate_nodes_from_headroom_hosts() {
- ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
- tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
- deployZoneApp(tester);
- List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
- Flavor flavor = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
-
- // Application 1
- ApplicationId application1 = makeApplicationId("t1", "a1");
- ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1");
- addAndAssignNode(application1, "1a", dockerHosts.get(2).hostname(), clusterSpec1, flavor, 0, tester);
- addAndAssignNode(application1, "1b", dockerHosts.get(3).hostname(), clusterSpec1, flavor, 1, tester);
-
- // Application 2
- ApplicationId application2 = makeApplicationId("t2", "a2");
- ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2");
- addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 0, tester);
- addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 1, tester);
-
- // Redeploy one of the applications
- deployapp(application1, clusterSpec1, flavor, tester, 2);
-
- // Assert that the nodes are spread across all hosts (to allow headroom)
- Set<String> hostsWithChildren = new HashSet<>();
- for (Node node : tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active)) {
- if (!isInactiveOrRetired(node)) {
- hostsWithChildren.add(node.parentHostname().get());
- }
- }
- Assert.assertEquals(4, hostsWithChildren.size());
- }
-
- /**
* Test relocation of nodes from spare hosts.
* <p>
* Setup 4 docker hosts and allocate one container on each (from two different applications)
- * No headroom defined - only getSpareCapacityProd() spares.
+ * getSpareCapacityProd() spares.
* <p>
* Check that it relocates containers away from the getSpareCapacityProd() spares
* <p>
@@ -141,132 +94,6 @@ public class DynamicDockerProvisioningTest {
}
/**
- * Test that new docker nodes that will result in headroom violations are
- * correctly marked as this.
- * <p>
- * When redeploying app1 - should not do anything (as moving app1 to host 0 and 1 would violate headroom).
- * Then redeploy app 2 - should cause a relocation.
- * <p>
- * | H | H | H2a | H2b | | H | H | H | H |
- * | H | H | H1a | H1b | --> | H | H | H1a | H1b |
- * | | | 1a | 1b | | 2a | 2b | 1a | 1b |
- */
- @Test
- public void new_docker_nodes_are_marked_as_headroom_violations() {
- ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
- tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
- deployZoneApp(tester);
- List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
- Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
- Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
-
- // Application 1
- ApplicationId application1 = makeApplicationId("t1", "1");
- ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1");
- String hostParent2 = dockerHosts.get(2).hostname();
- String hostParent3 = dockerHosts.get(3).hostname();
- addAndAssignNode(application1, "1a", hostParent2, clusterSpec1, flavorD2, 0, tester);
- addAndAssignNode(application1, "1b", hostParent3, clusterSpec1, flavorD2, 1, tester);
-
- // Application 2
- ApplicationId application2 = makeApplicationId("t2", "2");
- ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2");
- addAndAssignNode(application2, "2a", hostParent2, clusterSpec2, flavorD1, 0, tester);
- addAndAssignNode(application2, "2b", hostParent3, clusterSpec2, flavorD1, 1, tester);
-
- // Assert allocation placement - prior to re-deployment
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
-
- // Redeploy application 1
- deployapp(application1, clusterSpec1, flavorD2, tester, 2);
-
- // Re-assert allocation placement
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
-
- // Redeploy application 2
- deployapp(application2, clusterSpec2, flavorD1, tester, 2);
-
- // Now app2 should have re-located
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
- }
-
- /**
- * Test that we only relocate the smallest nodes from a host to free up headroom.
- * <p>
- * The reason we want to do this is that it is an cheap approximation for the optimal solution as we
- * pick headroom to be on the hosts were we are closest to fulfill the headroom requirement.
- *
- * Both applications could be moved here to free up headroom - but we want app2 (which is smallest) to be moved.
- * <p>
- * | H | H | H2a | H2b | | H | H | H | H |
- * | H | H | H1a | H1b | --> | H | H | H | H |
- * | | | 1a | 1b | | 2a | 2b | 1a | 1b |
- * | | | | | | | | 1a | 1b |
- */
- @Test
- public void only_preferred_container_is_moved_from_hosts_with_headroom_violations() {
- ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.perf, RegionName.from("us-east")), flavorsConfig(true));
- tester.makeReadyNodes(4, "host-medium", NodeType.host, 32);
- deployZoneApp(tester);
- List<Node> dockerHosts = tester.nodeRepository().getNodes(NodeType.host, Node.State.active);
- Flavor flavorD2 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-2");
- Flavor flavorD1 = tester.nodeRepository().getAvailableFlavors().getFlavorOrThrow("d-1");
-
- // Application 1
- ApplicationId application1 = makeApplicationId("t1", "1");
- ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1");
- String hostParent2 = dockerHosts.get(2).hostname();
- String hostParent3 = dockerHosts.get(3).hostname();
- addAndAssignNode(application1, "1a", hostParent2, clusterSpec1, flavorD2, 0, tester);
- addAndAssignNode(application1, "1b", hostParent3, clusterSpec1, flavorD2, 1, tester);
-
- // Application 2
- ApplicationId application2 = makeApplicationId("t2", "2");
- ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2");
- addAndAssignNode(application2, "2a", hostParent2, clusterSpec2, flavorD1, 0, tester);
- addAndAssignNode(application2, "2b", hostParent3, clusterSpec2, flavorD1, 1, tester);
-
- // Assert allocation placement - prior to re-deployment
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
-
- // Redeploy application 1
- deployapp(application1, clusterSpec1, flavorD2, tester, 2);
-
- // Re-assert allocation placement
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), hostParent2, hostParent3);
-
- // Redeploy application 2
- deployapp(application2, clusterSpec2, flavorD1, tester, 2);
-
- // Now app2 should have re-located
- assertApplicationHosts(tester.nodeRepository().getNodes(application1), hostParent2, hostParent3);
- assertApplicationHosts(tester.nodeRepository().getNodes(application2), dockerHosts.get(0).hostname(), dockerHosts.get(1).hostname());
- }
-
- private void assertApplicationHosts(List<Node> nodes, String... parents) {
- for (Node node : nodes) {
- // Ignore retired and non-active nodes
- if (!node.state().equals(Node.State.active) ||
- node.allocation().get().membership().retired()) {
- continue;
- }
- boolean found = false;
- for (String parent : parents) {
- if (node.parentHostname().get().equals(parent)) {
- found = true;
- break;
- }
- }
- Assert.assertTrue(found);
- }
- }
-
- /**
* Test an allocation workflow:
* <p>
* 5 Hosts of capacity 3 (2 spares)
@@ -323,8 +150,7 @@ public class DynamicDockerProvisioningTest {
/**
* Test redeployment of nodes that violates spare headroom - but without alternatives
* <p>
- * Setup 2 docker hosts and allocate one app with a container on each
- * No headroom defined - only 2 spares.
+ * Setup 2 docker hosts and allocate one app with a container on each. 2 spares
* <p>
* Initial allocation of app 1 --> final allocation:
* <p>
@@ -333,7 +159,7 @@ public class DynamicDockerProvisioningTest {
* | 1a | 1b | | 1a | 1b |
*/
@Test
- public void do_not_relocate_nodes_from_spare_if_no_where_to_reloacte_them() {
+ public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() {
ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), flavorsConfig());
tester.makeReadyNodes(2, "host-small", NodeType.host, 32);
deployZoneApp(tester);
@@ -475,16 +301,13 @@ public class DynamicDockerProvisioningTest {
.collect(Collectors.toList());
}
- private FlavorsConfig flavorsConfig(boolean includeHeadroom) {
+ private FlavorsConfig flavorsConfig() {
FlavorConfigBuilder b = new FlavorConfigBuilder();
b.addFlavor("host-large", 6., 6., 6, Flavor.Type.BARE_METAL);
b.addFlavor("host-small", 3., 3., 3, Flavor.Type.BARE_METAL);
b.addFlavor("host-medium", 4., 4., 4, Flavor.Type.BARE_METAL);
b.addFlavor("d-1", 1, 1., 1, Flavor.Type.DOCKER_CONTAINER);
b.addFlavor("d-2", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER);
- if (includeHeadroom) {
- b.addFlavor("d-2-4", 2, 2., 2, Flavor.Type.DOCKER_CONTAINER, 4);
- }
b.addFlavor("d-3", 3, 3., 3, Flavor.Type.DOCKER_CONTAINER);
b.addFlavor("d-3-disk", 3, 3., 5, Flavor.Type.DOCKER_CONTAINER);
b.addFlavor("d-3-mem", 3, 5., 3, Flavor.Type.DOCKER_CONTAINER);
@@ -492,10 +315,6 @@ public class DynamicDockerProvisioningTest {
return b.build();
}
- private FlavorsConfig flavorsConfig() {
- return flavorsConfig(false);
- }
-
private void deployZoneApp(ProvisioningTester tester) {
ApplicationId applicationId = tester.makeApplicationId();
List<HostSpec> list = tester.prepare(applicationId,