From c64e1429d48305c789546a7c1057989a8f772f94 Mon Sep 17 00:00:00 2001 From: Martin Polden Date: Thu, 24 Jun 2021 10:00:18 +0200 Subject: Less Docker --- ...ckerProvisioningCompleteHostCalculatorTest.java | 146 ------ .../provisioning/DynamicAllocationTest.java | 545 +++++++++++++++++++++ .../provisioning/DynamicDockerAllocationTest.java | 545 --------------------- .../provisioning/DynamicDockerProvisionTest.java | 453 ----------------- .../provisioning/DynamicProvisioningTest.java | 453 +++++++++++++++++ ...NodeProvisioningCompleteHostCalculatorTest.java | 146 ++++++ .../provisioning/VirtualNodeProvisioningTest.java | 24 +- 7 files changed, 1151 insertions(+), 1161 deletions(-) delete mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java create mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java delete mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java delete mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java create mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java create mode 100644 node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java (limited to 'node-repository') diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java deleted file mode 100644 index 36c08ad37d9..00000000000 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.provisioning; - -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Capacity; -import com.yahoo.config.provision.ClusterResources; -import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.Environment; -import com.yahoo.config.provision.Flavor; -import com.yahoo.config.provision.NodeResources; -import com.yahoo.config.provision.NodeType; -import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.Zone; -import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.Nodelike; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.assertEquals; - -/** - * @author bratseth - */ -public class DockerProvisioningCompleteHostCalculatorTest { - - @Test - public void changing_to_different_range_preserves_allocation() { - Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 1000, 4)); - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))) - .resourcesCalculator(new CompleteResourcesCalculator(hostFlavor)) - .flavors(List.of(hostFlavor)) - .build(); - tester.makeReadyHosts(9, hostFlavor.resources()).activateTenantHosts(); - - ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); - - var initialResources = new NodeResources(20, 16, 50, 1); - tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources), - new ClusterResources(2, 1, initialResources))); - tester.assertNodes("Initial allocation", - 2, 1, 20, 16, 50, 1.0, - app1, cluster1); - - var newMinResources = new NodeResources( 5, 4, 11, 1); - var newMaxResources = new NodeResources(20, 10, 30, 1); - tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources), - new ClusterResources(7, 1, newMaxResources))); - tester.assertNodes("New allocation preserves total resources", - 7, 1, 7, 4.6, 14.3, 1.0, - app1, cluster1); - - tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources), - new ClusterResources(7, 1, newMaxResources))); - tester.assertNodes("Redeploying the same ranges does not cause changes", - 7, 1, 7, 4.6, 14.3, 1.0, - app1, cluster1); - } - - @Test - public void testResourcesCalculator() { - Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4)); - var calculator = new CompleteResourcesCalculator(hostFlavor); - var originalReal = new NodeResources(0.7, 6.0, 12.9, 1.0); - var realToRequest = calculator.realToRequest(originalReal, false); - var requestToReal = calculator.requestToReal(realToRequest, false); - var realResourcesOf = calculator.realResourcesOf(realToRequest); - assertEquals(originalReal, requestToReal); - assertEquals(originalReal, realResourcesOf); - } - - private static class CompleteResourcesCalculator implements HostResourcesCalculator { - - private final Flavor hostFlavor; // Has the real resources - private final double memoryOverhead = 1; - private final double diskOverhead = 100; - - public CompleteResourcesCalculator(Flavor hostFlavor) { - this.hostFlavor = hostFlavor; - } - - @Override - public NodeResources realResourcesOf(Nodelike node, NodeRepository nodeRepository, boolean exclusive) { - if (node.parentHostname().isEmpty()) return node.resources(); // hosts use configured flavors - return realResourcesOf(node.resources()); - } - - NodeResources realResourcesOf(NodeResources advertisedResources) { - return advertisedResources.withMemoryGb(advertisedResources.memoryGb() - - memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), advertisedResources, false)) - .withDiskGb(advertisedResources.diskGb() - - diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), advertisedResources, false)); - } - - @Override - public NodeResources requestToReal(NodeResources advertisedResources, boolean exclusive) { - double memoryOverhead = memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), advertisedResources, false); - double diskOverhead = diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), advertisedResources, false); - return advertisedResources.withMemoryGb(advertisedResources.memoryGb() - memoryOverhead) - .withDiskGb(advertisedResources.diskGb() - diskOverhead); - } - - @Override - public NodeResources advertisedResourcesOf(Flavor flavor) { - if ( ! flavor.equals(hostFlavor)) return flavor.resources(); // Node 'flavors' just wrap the advertised resources - return hostFlavor.resources().withMemoryGb(hostFlavor.resources().memoryGb() + memoryOverhead) - .withDiskGb(hostFlavor.resources().diskGb() + diskOverhead); - } - - @Override - public NodeResources realToRequest(NodeResources realResources, boolean exclusive) { - double memoryOverhead = memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), realResources, true); - double diskOverhead = diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), realResources, true); - return realResources.withMemoryGb(realResources.memoryGb() + memoryOverhead) - .withDiskGb(realResources.diskGb() + diskOverhead); - } - - @Override - public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; } - - /** - * Returns the memory overhead resulting if the given advertised resources are placed on the given node - * - * @param real true if the given resources are in real values, false if they are in advertised - */ - private double memoryOverhead(double hostAdvertisedMemoryGb, NodeResources resources, boolean real) { - double memoryShare = resources.memoryGb() / - ( hostAdvertisedMemoryGb - (real ? memoryOverhead : 0)); - return memoryOverhead * memoryShare; - } - - /** - * Returns the disk overhead resulting if the given advertised resources are placed on the given node - * - * @param real true if the resources are in real values, false if they are in advertised - */ - private double diskOverhead(double hostAdvertisedDiskGb, NodeResources resources, boolean real) { - double diskShare = resources.diskGb() / - ( hostAdvertisedDiskGb - (real ? diskOverhead : 0) ); - return diskOverhead * diskShare; - } - - } - -} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java new file mode 100644 index 00000000000..761afed23cc --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java @@ -0,0 +1,545 @@ +// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.provision.provisioning; + +import com.google.common.collect.ImmutableSet; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.ClusterResources; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.Flavor; +import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.OutOfCapacityException; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.Zone; +import com.yahoo.config.provisioning.FlavorsConfig; +import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.curator.transaction.CuratorTransaction; +import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.Node.State; +import com.yahoo.vespa.hosted.provision.NodeList; +import com.yahoo.vespa.hosted.provision.node.Agent; +import com.yahoo.vespa.hosted.provision.node.IP; +import org.junit.Ignore; +import org.junit.Test; + +import java.time.Instant; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * @author mortent + */ +public class DynamicAllocationTest { + + /** + * Test relocation of nodes from spare hosts. + *

+ * Setup 4 hosts and allocate one container on each (from two different applications) + * getSpareCapacityProd() spares. + *

+ * Check that it relocates containers away from the getSpareCapacityProd() spares + *

+ * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): + *

+ * | | | | | | | | | | + * | | | | | --> | 2a | 2b | | | + * | 1a | 1b | 2a | 2b | | 1a | 1b | | | + */ + @Test + public void relocate_nodes_from_spare_hosts() { + int spareCount = 1; + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))) + .flavorsConfig(flavorsConfig()) + .spareCount(spareCount) + .build(); + tester.makeReadyNodes(4, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + List hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); + NodeResources flavor = new NodeResources(1, 4, 100, 1); + + // Application 1 + ApplicationId application1 = makeApplicationId("t1", "a1"); + ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); + addAndAssignNode(application1, "1a", hosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); + addAndAssignNode(application1, "1b", hosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); + + // Application 2 + ApplicationId application2 = makeApplicationId("t2", "a2"); + ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); + addAndAssignNode(application2, "2a", hosts.get(2).hostname(), clusterSpec2, flavor, 3, tester); + addAndAssignNode(application2, "2b", hosts.get(3).hostname(), clusterSpec2, flavor, 4, tester); + + // Redeploy both applications (to be agnostic on which hosts are picked as spares) + deployApp(application1, clusterSpec1, flavor, tester, 2); + deployApp(application2, clusterSpec2, flavor, tester, 2); + + // Assert that we have two spare nodes (two hosts that are don't have allocations) + Set hostsWithChildren = new HashSet<>(); + for (Node node : tester.nodeRepository().nodes().list(State.active).nodeType(NodeType.tenant).not().state(State.inactive).not().retired()) { + hostsWithChildren.add(node.parentHostname().get()); + } + assertEquals(4 - spareCount, hostsWithChildren.size()); + + } + + /** + * Test an allocation workflow: + *

+ * 5 Hosts of capacity 3 (2 spares) + * - Allocate app with 3 nodes + * - Allocate app with 2 nodes + * - Fail host and check redistribution + */ + @Test + public void relocate_failed_nodes() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(5, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + NodeList hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host); + NodeResources resources = new NodeResources(1, 4, 100, 0.3); + + // Application 1 + ApplicationId application1 = makeApplicationId("t1", "a1"); + ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); + deployApp(application1, clusterSpec1, resources, tester, 3); + + // Application 2 + ApplicationId application2 = makeApplicationId("t2", "a2"); + ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); + deployApp(application2, clusterSpec2, resources, tester, 2); + + // Application 3 + ApplicationId application3 = makeApplicationId("t3", "a3"); + ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); + deployApp(application3, clusterSpec3, resources, tester, 2); + + // App 2 and 3 should have been allocated to the same nodes - fail one of the parent hosts from there + String parent = "host-1.yahoo.com"; + tester.nodeRepository().nodes().failOrMarkRecursively(parent, Agent.system, "Testing"); + + // Redeploy all applications + deployApp(application1, clusterSpec1, resources, tester, 3); + deployApp(application2, clusterSpec2, resources, tester, 2); + deployApp(application3, clusterSpec3, resources, tester, 2); + + Map numberOfChildrenStat = new HashMap<>(); + for (Node host : hosts) { + int nofChildren = tester.nodeRepository().nodes().list().childrenOf(host).size(); + if (!numberOfChildrenStat.containsKey(nofChildren)) { + numberOfChildrenStat.put(nofChildren, 0); + } + numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); + } + + assertEquals(4, numberOfChildrenStat.get(2).intValue()); + assertEquals(1, numberOfChildrenStat.get(1).intValue()); + } + + @Test + public void test_allocation_balancing() { + // Here we test balancing between cpu and memory and ignore disk + + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(3, "flt", NodeType.host, 8); // cpu: 30, mem: 30 + tester.makeReadyNodes(3, "cpu", NodeType.host, 8); // cpu: 40, mem: 20 + tester.makeReadyNodes(3, "mem", NodeType.host, 8); // cpu: 20, mem: 40 + tester.activateTenantHosts(); + NodeResources fltResources = new NodeResources(6, 6, 10, 0.1); + NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1); + NodeResources memResources = new NodeResources(4, 8, 10, 0.1); + + // Cpu heavy application + ApplicationId application1 = makeApplicationId("t1", "a1"); + deployApp(application1, clusterSpec("c"), cpuResources, tester, 2); + tester.assertAllocatedOn("Cpu nodes cause least skew increase", "cpu", application1); + + // Mem heavy application + ApplicationId application2 = makeApplicationId("t2", "a2"); + deployApp(application2, clusterSpec("c"), memResources, tester, 2); + tester.assertAllocatedOn("Mem nodes cause least skew increase", "mem", application2); + + // Flat application + ApplicationId application3 = makeApplicationId("t3", "a3"); + deployApp(application3, clusterSpec("c"), fltResources, tester, 2); + tester.assertAllocatedOn("Flat nodes cause least skew increase", "flt", application3); + + // Mem heavy application which can't all be allocated on mem nodes + ApplicationId application4 = makeApplicationId("t4", "a4"); + deployApp(application4, clusterSpec("c"), memResources, tester, 3); + assertEquals(2, tester.hostFlavorCount("mem", application4)); + assertEquals(1, tester.hostFlavorCount("flt", application4)); + + } + + /** + * Test redeployment of nodes that violates spare headroom - but without alternatives + *

+ * Setup 2 hosts and allocate one app with a container on each. 2 spares + *

+ * Initial allocation of app 1 --> final allocation: + *

+ * | | | | | | + * | | | --> | | | + * | 1a | 1b | | 1a | 1b | + */ + @Test + public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + List hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); + NodeResources flavor = new NodeResources(1, 4, 100, 1); + + // Application 1 + ApplicationId application1 = makeApplicationId("t1", "a1"); + ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); + addAndAssignNode(application1, "1a", hosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); + addAndAssignNode(application1, "1b", hosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); + + // Redeploy both applications (to be agnostic on which hosts are picked as spares) + deployApp(application1, clusterSpec1, flavor, tester, 2); + + // Assert that we have two spare nodes (two hosts that are don't have allocations) + Set hostsWithChildren = new HashSet<>(); + for (Node node : tester.nodeRepository().nodes().list(State.active).nodeType(NodeType.tenant).not().state(State.inactive).not().retired()) { + hostsWithChildren.add(node.parentHostname().get()); + } + assertEquals(2, hostsWithChildren.size()); + } + + @Test(expected = OutOfCapacityException.class) + public void multiple_groups_are_on_separate_parent_hosts() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(5, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + + //Deploy an application having 6 nodes (3 nodes in 2 groups). We only have 5 hosts available + ApplicationId application1 = ProvisioningTester.applicationId(); + tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, new NodeResources(1, 4, 100, 1)); + + fail("Two groups have been allocated to the same parent host"); + } + + @Ignore // TODO: Re-enable if we reintroduce spare capacity requirement + @Test + public void spare_capacity_used_only_when_replacement() { + // Use spare capacity only when replacement (i.e one node is failed) + // Test should allocate as much capacity as possible, verify that it is not possible to allocate one more unit + // Verify that there is still capacity (available spare) + // Fail one node and redeploy, Verify that one less node is empty. + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + + // Setup test + ApplicationId application1 = ProvisioningTester.applicationId(); + tester.makeReadyNodes(5, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + NodeResources flavor = new NodeResources(1, 4, 100, 1); + + // Deploy initial state (can max deploy 3 nodes due to redundancy requirements) + ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); + List hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor); + tester.activate(application1, ImmutableSet.copyOf(hosts)); + + List initialSpareCapacity = findSpareCapacity(tester); + assertEquals(2, initialSpareCapacity.size()); + + try { + hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor); + fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); + } catch (OutOfCapacityException ignored) { } + + tester.fail(hosts.get(0)); + hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor); + tester.activate(application1, ImmutableSet.copyOf(hosts)); + + List finalSpareCapacity = findSpareCapacity(tester); + + assertEquals(1, finalSpareCapacity.size()); + } + + @Test + public void non_prod_zones_do_not_have_spares() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(3, "host-small", NodeType.host, 32); + tester.activateTenantHosts(); + ApplicationId application1 = ProvisioningTester.applicationId(); + List hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, new NodeResources(1, 4, 100, 1)); + tester.activate(application1, ImmutableSet.copyOf(hosts)); + + List initialSpareCapacity = findSpareCapacity(tester); + assertEquals(0, initialSpareCapacity.size()); + } + + @Test + public void cd_uses_slow_disk_hosts() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.cd, Environment.test, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(4, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); + tester.activateTenantHosts(); + ApplicationId application1 = ProvisioningTester.applicationId(); + List hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, new NodeResources(1, 4, 100, 1)); + tester.activate(application1, ImmutableSet.copyOf(hosts)); + } + + @Test(expected = OutOfCapacityException.class) + public void allocation_should_fail_when_host_is_not_in_allocatable_state() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32).forEach(node -> + tester.nodeRepository().nodes().fail(node.hostname(), Agent.system, getClass().getSimpleName())); + + ApplicationId application = ProvisioningTester.applicationId(); + tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, new NodeResources(1, 40, 100, 1)); + } + + @Test + public void provision_dual_stack_containers() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); + tester.activateTenantHosts(); + + ApplicationId application = ProvisioningTester.applicationId(); + List hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1)); + tester.activate(application, hosts); + + NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application); + assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary()); + assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary()); + } + + @Test + public void provisioning_fast_disk_speed_do_not_get_slow_nodes() { + provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.fast, true); + } + + @Test + public void provisioning_slow_disk_speed_do_not_get_fast_nodes() { + provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.slow, true); + } + + @Test + public void provisioning_any_disk_speed_gets_slow_and_fast_nodes() { + provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.any, false); + } + + @Test + public void slow_disk_nodes_are_preferentially_allocated() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); + tester.activateTenantHosts(); + + ApplicationId application = ProvisioningTester.applicationId(); + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); + NodeResources resources = new NodeResources(1, 4, 100, 1, NodeResources.DiskSpeed.any); + + List hosts = tester.prepare(application, cluster, 2, 1, resources); + assertEquals(2, hosts.size()); + assertEquals(NodeResources.DiskSpeed.slow, hosts.get(0).advertisedResources().diskSpeed()); + assertEquals(NodeResources.DiskSpeed.slow, hosts.get(1).advertisedResources().diskSpeed()); + tester.activate(application, hosts); + } + + private void provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed requestDiskSpeed, boolean expectOutOfCapacity) { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); + tester.activateTenantHosts(); + + ApplicationId application = ProvisioningTester.applicationId(); + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); + NodeResources resources = new NodeResources(1, 4, 100, 1, requestDiskSpeed); + + try { + List hosts = tester.prepare(application, cluster, 4, 1, resources); + if (expectOutOfCapacity) fail("Expected out of capacity"); + assertEquals(4, hosts.size()); + tester.activate(application, hosts); + } + catch (OutOfCapacityException e) { + if ( ! expectOutOfCapacity) throw e; + } + } + + @Test + public void nodeResourcesAreRelaxedInDev() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); + tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); + tester.activateTenantHosts(); + + ApplicationId application = ProvisioningTester.applicationId(); + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); + NodeResources resources = new NodeResources(1, 4, 100, 1, NodeResources.DiskSpeed.fast); + + List hosts = tester.prepare(application, cluster, 4, 1, resources); + assertEquals(1, hosts.size()); + tester.activate(application, hosts); + assertEquals(0.1, hosts.get(0).advertisedResources().vcpu(), 0.000001); + assertEquals(0.1, hosts.get(0).advertisedResources().bandwidthGbps(), 0.000001); + assertEquals("Slow nodes are allowed in dev and preferred because they are cheaper", + NodeResources.DiskSpeed.slow, hosts.get(0).advertisedResources().diskSpeed()); + } + + @Test + public void testSwitchingFromLegacyFlavorSyntaxToResourcesDoesNotCauseReallocation() { + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + tester.makeReadyNodes(2, new Flavor(new NodeResources(5, 20, 1400, 3)), NodeType.host, 10, true); + tester.activateTenantHosts(); + + ApplicationId application = ProvisioningTester.applicationId(); + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); + + List hosts1 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(2, 1, NodeResources.fromLegacyName("d-2-8-500")), false, true)); + tester.activate(application, hosts1); + + NodeResources resources = new NodeResources(1.5, 8, 500, 0.3); + List hosts2 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(2, 1, resources))); + tester.activate(application, hosts2); + + assertEquals(hosts1, hosts2); + } + + @Test + public void testPreferExclusiveNetworkSwitch() { + // Hosts are provisioned, without switch information + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); + NodeResources hostResources = new NodeResources(32, 128, 2000, 10); + List hosts0 = tester.makeReadyNodes(3, hostResources, NodeType.host, 5); + tester.activateTenantHosts(); + + // Application is deployed + ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); + NodeResources resources = new NodeResources(2, 4, 50, 1, NodeResources.DiskSpeed.any); + ApplicationId app1 = ApplicationId.from("t1", "a1", "i1"); + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); + tester.assertSwitches(Set.of(), app1, cluster.id()); + + // One host is provisioned on a known switch + String switch0 = "switch0"; + { + List hosts = tester.makeReadyNodes(1, hostResources, NodeType.host, 5); + tester.activateTenantHosts(); + tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch0)); + } + + // Redeploy does not change allocation as a host with switch information is no better or worse than hosts + // without switch information + NodeList allocatedNodes = tester.nodeRepository().nodes().list().owner(app1); + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); + + // Initial hosts are attached to the same switch + tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0)); + + // Redeploy does not change allocation + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); + + // One regular host and one slow-disk host are provisioned on the same switch + String switch1 = "switch1"; + Node hostWithSlowDisk; + { + NodeResources slowDisk = hostResources.with(NodeResources.DiskSpeed.slow); + List hosts = tester.makeReadyNodes(1, slowDisk, NodeType.host, 5); + hosts.addAll(tester.makeReadyNodes(1, hostResources, NodeType.host, 5)); + tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch1)); + tester.activateTenantHosts(); + hostWithSlowDisk = hosts.get(0); + } + + // Redeploy does not change allocation as we prefer to keep our already active nodes + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); + tester.assertSwitches(Set.of(switch0), app1, cluster.id()); + + // A node is retired + tester.patchNode(tester.nodeRepository().nodes().list().owner(app1).asList().get(0), + (node) -> node.withWantToRetire(true, Agent.system, tester.clock().instant())); + + // Redeploy allocates new node on a distinct switch, and the host with slowest disk (cheapest) on that switch + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); + tester.assertSwitches(Set.of(switch0, switch1), app1, cluster.id()); + assertTrue("Host with slow disk on " + switch1 + " is chosen", tester.nodeRepository().nodes().list().owner(app1).state(State.active).stream() + .anyMatch(node -> node.hasParent(hostWithSlowDisk.hostname()))); + + // Growing cluster picks new node on exclusive switch + String switch2 = "switch2"; + { + List hosts = tester.makeReadyNodes(1, hostResources, NodeType.host, 5); + tester.activateTenantHosts(); + tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch2)); + } + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(3, 1, resources)))); + tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster.id()); + + // Growing cluster further can reuse switches as we're now out of exclusive ones + tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(4, 1, resources)))); + tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster.id()); + + // Additional cluster can reuse switches of existing cluster + ClusterSpec cluster2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content")).vespaVersion("1").build(); + tester.activate(app1, tester.prepare(app1, cluster2, Capacity.from(new ClusterResources(3, 1, resources)))); + tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster2.id()); + + // Another application is deployed on exclusive switches + ApplicationId app2 = ApplicationId.from("t2", "a2", "i2"); + tester.activate(app2, tester.prepare(app2, cluster, Capacity.from(new ClusterResources(3, 1, resources)))); + tester.assertSwitches(Set.of(switch0, switch1, switch2), app2, cluster.id()); + } + + private ApplicationId makeApplicationId(String tenant, String appName) { + return ApplicationId.from(tenant, appName, "default"); + } + + private void deployApp(ApplicationId id, ClusterSpec spec, NodeResources flavor, ProvisioningTester tester, int nodeCount) { + List hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor); + tester.activate(id, new HashSet<>(hostSpec)); + } + + private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, NodeResources flavor, int index, ProvisioningTester tester) { + Node node1a = Node.create("open1", new IP.Config(Set.of("127.0.233." + index), Set.of()), hostname, + new Flavor(flavor), NodeType.tenant).parentHostname(parentHostname).build(); + ClusterMembership clusterMembership1 = ClusterMembership.from( + clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); // Need to add group here so that group is serialized in node allocation + Node node1aAllocation = node1a.allocate(id, clusterMembership1, node1a.resources(), Instant.now()); + + tester.nodeRepository().nodes().addNodes(Collections.singletonList(node1aAllocation), Agent.system); + NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); + tester.nodeRepository().nodes().activate(Collections.singletonList(node1aAllocation), transaction); + transaction.commit(); + } + + private List findSpareCapacity(ProvisioningTester tester) { + NodeList nodes = tester.nodeRepository().nodes().list(State.values()); + return nodes.nodeType(NodeType.host) + .matching(host -> nodes.childrenOf(host).size() == 0) // Hosts without children + .asList(); + } + + private FlavorsConfig flavorsConfig() { + FlavorConfigBuilder b = new FlavorConfigBuilder(); + b.addFlavor("host-large", 6, 24, 800, 6, Flavor.Type.BARE_METAL); + b.addFlavor("host-small", 3, 12, 400, 3, Flavor.Type.BARE_METAL); + b.addFlavor("flt", 30, 30, 400, 3, Flavor.Type.BARE_METAL); + b.addFlavor("cpu", 40, 20, 400, 3, Flavor.Type.BARE_METAL); + b.addFlavor("mem", 20, 40, 400, 3, Flavor.Type.BARE_METAL); + return b.build(); + } + + private ClusterSpec clusterSpec(String clusterId) { + return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").build(); + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java deleted file mode 100644 index 8d9e79218d4..00000000000 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.provisioning; - -import com.google.common.collect.ImmutableSet; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Capacity; -import com.yahoo.config.provision.ClusterMembership; -import com.yahoo.config.provision.ClusterResources; -import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.Environment; -import com.yahoo.config.provision.Flavor; -import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.NodeResources; -import com.yahoo.config.provision.NodeType; -import com.yahoo.config.provision.OutOfCapacityException; -import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.SystemName; -import com.yahoo.config.provision.Zone; -import com.yahoo.config.provisioning.FlavorsConfig; -import com.yahoo.transaction.NestedTransaction; -import com.yahoo.vespa.curator.transaction.CuratorTransaction; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.Node.State; -import com.yahoo.vespa.hosted.provision.NodeList; -import com.yahoo.vespa.hosted.provision.node.Agent; -import com.yahoo.vespa.hosted.provision.node.IP; -import org.junit.Ignore; -import org.junit.Test; - -import java.time.Instant; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * @author mortent - */ -public class DynamicDockerAllocationTest { - - /** - * Test relocation of nodes from spare hosts. - *

- * Setup 4 docker hosts and allocate one container on each (from two different applications) - * getSpareCapacityProd() spares. - *

- * Check that it relocates containers away from the getSpareCapacityProd() spares - *

- * Initial allocation of app 1 and 2 --> final allocation (example using 2 spares): - *

- * | | | | | | | | | | - * | | | | | --> | 2a | 2b | | | - * | 1a | 1b | 2a | 2b | | 1a | 1b | | | - */ - @Test - public void relocate_nodes_from_spare_hosts() { - int spareCount = 1; - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))) - .flavorsConfig(flavorsConfig()) - .spareCount(spareCount) - .build(); - tester.makeReadyNodes(4, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - List dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); - NodeResources flavor = new NodeResources(1, 4, 100, 1); - - // Application 1 - ApplicationId application1 = makeApplicationId("t1", "a1"); - ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); - addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); - addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); - - // Application 2 - ApplicationId application2 = makeApplicationId("t2", "a2"); - ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); - addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 3, tester); - addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 4, tester); - - // Redeploy both applications (to be agnostic on which hosts are picked as spares) - deployApp(application1, clusterSpec1, flavor, tester, 2); - deployApp(application2, clusterSpec2, flavor, tester, 2); - - // Assert that we have two spare nodes (two hosts that are don't have allocations) - Set hostsWithChildren = new HashSet<>(); - for (Node node : tester.nodeRepository().nodes().list(State.active).nodeType(NodeType.tenant).not().state(State.inactive).not().retired()) { - hostsWithChildren.add(node.parentHostname().get()); - } - assertEquals(4 - spareCount, hostsWithChildren.size()); - - } - - /** - * Test an allocation workflow: - *

- * 5 Hosts of capacity 3 (2 spares) - * - Allocate app with 3 nodes - * - Allocate app with 2 nodes - * - Fail host and check redistribution - */ - @Test - public void relocate_failed_nodes() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(5, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - NodeList dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host); - NodeResources resources = new NodeResources(1, 4, 100, 0.3); - - // Application 1 - ApplicationId application1 = makeApplicationId("t1", "a1"); - ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); - deployApp(application1, clusterSpec1, resources, tester, 3); - - // Application 2 - ApplicationId application2 = makeApplicationId("t2", "a2"); - ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2"); - deployApp(application2, clusterSpec2, resources, tester, 2); - - // Application 3 - ApplicationId application3 = makeApplicationId("t3", "a3"); - ClusterSpec clusterSpec3 = clusterSpec("myContent.t3.a3"); - deployApp(application3, clusterSpec3, resources, tester, 2); - - // App 2 and 3 should have been allocated to the same nodes - fail one of the parent hosts from there - String parent = "host-1.yahoo.com"; - tester.nodeRepository().nodes().failOrMarkRecursively(parent, Agent.system, "Testing"); - - // Redeploy all applications - deployApp(application1, clusterSpec1, resources, tester, 3); - deployApp(application2, clusterSpec2, resources, tester, 2); - deployApp(application3, clusterSpec3, resources, tester, 2); - - Map numberOfChildrenStat = new HashMap<>(); - for (Node host : dockerHosts) { - int nofChildren = tester.nodeRepository().nodes().list().childrenOf(host).size(); - if (!numberOfChildrenStat.containsKey(nofChildren)) { - numberOfChildrenStat.put(nofChildren, 0); - } - numberOfChildrenStat.put(nofChildren, numberOfChildrenStat.get(nofChildren) + 1); - } - - assertEquals(4, numberOfChildrenStat.get(2).intValue()); - assertEquals(1, numberOfChildrenStat.get(1).intValue()); - } - - @Test - public void test_allocation_balancing() { - // Here we test balancing between cpu and memory and ignore disk - - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(3, "flt", NodeType.host, 8); // cpu: 30, mem: 30 - tester.makeReadyNodes(3, "cpu", NodeType.host, 8); // cpu: 40, mem: 20 - tester.makeReadyNodes(3, "mem", NodeType.host, 8); // cpu: 20, mem: 40 - tester.activateTenantHosts(); - NodeResources fltResources = new NodeResources(6, 6, 10, 0.1); - NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1); - NodeResources memResources = new NodeResources(4, 8, 10, 0.1); - - // Cpu heavy application - ApplicationId application1 = makeApplicationId("t1", "a1"); - deployApp(application1, clusterSpec("c"), cpuResources, tester, 2); - tester.assertAllocatedOn("Cpu nodes cause least skew increase", "cpu", application1); - - // Mem heavy application - ApplicationId application2 = makeApplicationId("t2", "a2"); - deployApp(application2, clusterSpec("c"), memResources, tester, 2); - tester.assertAllocatedOn("Mem nodes cause least skew increase", "mem", application2); - - // Flat application - ApplicationId application3 = makeApplicationId("t3", "a3"); - deployApp(application3, clusterSpec("c"), fltResources, tester, 2); - tester.assertAllocatedOn("Flat nodes cause least skew increase", "flt", application3); - - // Mem heavy application which can't all be allocated on mem nodes - ApplicationId application4 = makeApplicationId("t4", "a4"); - deployApp(application4, clusterSpec("c"), memResources, tester, 3); - assertEquals(2, tester.hostFlavorCount("mem", application4)); - assertEquals(1, tester.hostFlavorCount("flt", application4)); - - } - - /** - * Test redeployment of nodes that violates spare headroom - but without alternatives - *

- * Setup 2 docker hosts and allocate one app with a container on each. 2 spares - *

- * Initial allocation of app 1 --> final allocation: - *

- * | | | | | | - * | | | --> | | | - * | 1a | 1b | | 1a | 1b | - */ - @Test - public void do_not_relocate_nodes_from_spare_if_no_where_to_relocate_them() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - List dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); - NodeResources flavor = new NodeResources(1, 4, 100, 1); - - // Application 1 - ApplicationId application1 = makeApplicationId("t1", "a1"); - ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1"); - addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester); - addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester); - - // Redeploy both applications (to be agnostic on which hosts are picked as spares) - deployApp(application1, clusterSpec1, flavor, tester, 2); - - // Assert that we have two spare nodes (two hosts that are don't have allocations) - Set hostsWithChildren = new HashSet<>(); - for (Node node : tester.nodeRepository().nodes().list(State.active).nodeType(NodeType.tenant).not().state(State.inactive).not().retired()) { - hostsWithChildren.add(node.parentHostname().get()); - } - assertEquals(2, hostsWithChildren.size()); - } - - @Test(expected = OutOfCapacityException.class) - public void multiple_groups_are_on_separate_parent_hosts() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(5, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - - //Deploy an application having 6 nodes (3 nodes in 2 groups). We only have 5 docker hosts available - ApplicationId application1 = ProvisioningTester.applicationId(); - tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, new NodeResources(1, 4, 100, 1)); - - fail("Two groups have been allocated to the same parent host"); - } - - @Ignore // TODO: Re-enable if we reintroduce spare capacity requirement - @Test - public void spare_capacity_used_only_when_replacement() { - // Use spare capacity only when replacement (i.e one node is failed) - // Test should allocate as much capacity as possible, verify that it is not possible to allocate one more unit - // Verify that there is still capacity (available spare) - // Fail one node and redeploy, Verify that one less node is empty. - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - - // Setup test - ApplicationId application1 = ProvisioningTester.applicationId(); - tester.makeReadyNodes(5, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - NodeResources flavor = new NodeResources(1, 4, 100, 1); - - // Deploy initial state (can max deploy 3 nodes due to redundancy requirements) - ClusterSpec clusterSpec = clusterSpec("myContent.t1.a1"); - List hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor); - tester.activate(application1, ImmutableSet.copyOf(hosts)); - - List initialSpareCapacity = findSpareCapacity(tester); - assertEquals(2, initialSpareCapacity.size()); - - try { - hosts = tester.prepare(application1, clusterSpec, 4, 1, flavor); - fail("Was able to deploy with 4 nodes, should not be able to use spare capacity"); - } catch (OutOfCapacityException ignored) { } - - tester.fail(hosts.get(0)); - hosts = tester.prepare(application1, clusterSpec, 3, 1, flavor); - tester.activate(application1, ImmutableSet.copyOf(hosts)); - - List finalSpareCapacity = findSpareCapacity(tester); - - assertEquals(1, finalSpareCapacity.size()); - } - - @Test - public void non_prod_zones_do_not_have_spares() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.perf, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(3, "host-small", NodeType.host, 32); - tester.activateTenantHosts(); - ApplicationId application1 = ProvisioningTester.applicationId(); - List hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, new NodeResources(1, 4, 100, 1)); - tester.activate(application1, ImmutableSet.copyOf(hosts)); - - List initialSpareCapacity = findSpareCapacity(tester); - assertEquals(0, initialSpareCapacity.size()); - } - - @Test - public void cd_uses_slow_disk_nodes_for_docker_hosts() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.cd, Environment.test, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(4, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); - tester.activateTenantHosts(); - ApplicationId application1 = ProvisioningTester.applicationId(); - List hosts = tester.prepare(application1, clusterSpec("myContent.t1.a1"), 3, 1, new NodeResources(1, 4, 100, 1)); - tester.activate(application1, ImmutableSet.copyOf(hosts)); - } - - @Test(expected = OutOfCapacityException.class) - public void allocation_should_fail_when_host_is_not_in_allocatable_state() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeProvisionedNodes(3, "host-small", NodeType.host, 32).forEach(node -> - tester.nodeRepository().nodes().fail(node.hostname(), Agent.system, getClass().getSimpleName())); - - ApplicationId application = ProvisioningTester.applicationId(); - tester.prepare(application, clusterSpec("myContent.t2.a2"), 2, 1, new NodeResources(1, 40, 100, 1)); - } - - @Test - public void provision_dual_stack_containers() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, "host-large", NodeType.host, 10, true); - tester.activateTenantHosts(); - - ApplicationId application = ProvisioningTester.applicationId(); - List hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1)); - tester.activate(application, hosts); - - NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application); - assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary()); - assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary()); - } - - @Test - public void provisioning_fast_disk_speed_do_not_get_slow_nodes() { - provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.fast, true); - } - - @Test - public void provisioning_slow_disk_speed_do_not_get_fast_nodes() { - provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.slow, true); - } - - @Test - public void provisioning_any_disk_speed_gets_slow_and_fast_nodes() { - provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed.any, false); - } - - @Test - public void slow_disk_nodes_are_preferentially_allocated() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); - tester.activateTenantHosts(); - - ApplicationId application = ProvisioningTester.applicationId(); - ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); - NodeResources resources = new NodeResources(1, 4, 100, 1, NodeResources.DiskSpeed.any); - - List hosts = tester.prepare(application, cluster, 2, 1, resources); - assertEquals(2, hosts.size()); - assertEquals(NodeResources.DiskSpeed.slow, hosts.get(0).advertisedResources().diskSpeed()); - assertEquals(NodeResources.DiskSpeed.slow, hosts.get(1).advertisedResources().diskSpeed()); - tester.activate(application, hosts); - } - - private void provisionFastAndSlowThenDeploy(NodeResources.DiskSpeed requestDiskSpeed, boolean expectOutOfCapacity) { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); - tester.activateTenantHosts(); - - ApplicationId application = ProvisioningTester.applicationId(); - ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); - NodeResources resources = new NodeResources(1, 4, 100, 1, requestDiskSpeed); - - try { - List hosts = tester.prepare(application, cluster, 4, 1, resources); - if (expectOutOfCapacity) fail("Expected out of capacity"); - assertEquals(4, hosts.size()); - tester.activate(application, hosts); - } - catch (OutOfCapacityException e) { - if ( ! expectOutOfCapacity) throw e; - } - } - - @Test - public void nodeResourcesAreRelaxedInDev() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.dev, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.fast)), NodeType.host, 10, true); - tester.makeReadyNodes(2, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true); - tester.activateTenantHosts(); - - ApplicationId application = ProvisioningTester.applicationId(); - ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); - NodeResources resources = new NodeResources(1, 4, 100, 1, NodeResources.DiskSpeed.fast); - - List hosts = tester.prepare(application, cluster, 4, 1, resources); - assertEquals(1, hosts.size()); - tester.activate(application, hosts); - assertEquals(0.1, hosts.get(0).advertisedResources().vcpu(), 0.000001); - assertEquals(0.1, hosts.get(0).advertisedResources().bandwidthGbps(), 0.000001); - assertEquals("Slow nodes are allowed in dev and preferred because they are cheaper", - NodeResources.DiskSpeed.slow, hosts.get(0).advertisedResources().diskSpeed()); - } - - @Test - public void testSwitchingFromLegacyFlavorSyntaxToResourcesDoesNotCauseReallocation() { - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - tester.makeReadyNodes(2, new Flavor(new NodeResources(5, 20, 1400, 3)), NodeType.host, 10, true); - tester.activateTenantHosts(); - - ApplicationId application = ProvisioningTester.applicationId(); - ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); - - List hosts1 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(2, 1, NodeResources.fromLegacyName("d-2-8-500")), false, true)); - tester.activate(application, hosts1); - - NodeResources resources = new NodeResources(1.5, 8, 500, 0.3); - List hosts2 = tester.prepare(application, cluster, Capacity.from(new ClusterResources(2, 1, resources))); - tester.activate(application, hosts2); - - assertEquals(hosts1, hosts2); - } - - @Test - public void testPreferExclusiveNetworkSwitch() { - // Hosts are provisioned, without switch information - ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); - NodeResources hostResources = new NodeResources(32, 128, 2000, 10); - List hosts0 = tester.makeReadyNodes(3, hostResources, NodeType.host, 5); - tester.activateTenantHosts(); - - // Application is deployed - ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("1").build(); - NodeResources resources = new NodeResources(2, 4, 50, 1, NodeResources.DiskSpeed.any); - ApplicationId app1 = ApplicationId.from("t1", "a1", "i1"); - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - tester.assertSwitches(Set.of(), app1, cluster.id()); - - // One host is provisioned on a known switch - String switch0 = "switch0"; - { - List hosts = tester.makeReadyNodes(1, hostResources, NodeType.host, 5); - tester.activateTenantHosts(); - tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch0)); - } - - // Redeploy does not change allocation as a host with switch information is no better or worse than hosts - // without switch information - NodeList allocatedNodes = tester.nodeRepository().nodes().list().owner(app1); - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); - - // Initial hosts are attached to the same switch - tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0)); - - // Redeploy does not change allocation - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); - - // One regular host and one slow-disk host are provisioned on the same switch - String switch1 = "switch1"; - Node hostWithSlowDisk; - { - NodeResources slowDisk = hostResources.with(NodeResources.DiskSpeed.slow); - List hosts = tester.makeReadyNodes(1, slowDisk, NodeType.host, 5); - hosts.addAll(tester.makeReadyNodes(1, hostResources, NodeType.host, 5)); - tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch1)); - tester.activateTenantHosts(); - hostWithSlowDisk = hosts.get(0); - } - - // Redeploy does not change allocation as we prefer to keep our already active nodes - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - tester.assertSwitches(Set.of(switch0), app1, cluster.id()); - - // A node is retired - tester.patchNode(tester.nodeRepository().nodes().list().owner(app1).asList().get(0), - (node) -> node.withWantToRetire(true, Agent.system, tester.clock().instant())); - - // Redeploy allocates new node on a distinct switch, and the host with slowest disk (cheapest) on that switch - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - tester.assertSwitches(Set.of(switch0, switch1), app1, cluster.id()); - assertTrue("Host with slow disk on " + switch1 + " is chosen", tester.nodeRepository().nodes().list().owner(app1).state(State.active).stream() - .anyMatch(node -> node.hasParent(hostWithSlowDisk.hostname()))); - - // Growing cluster picks new node on exclusive switch - String switch2 = "switch2"; - { - List hosts = tester.makeReadyNodes(1, hostResources, NodeType.host, 5); - tester.activateTenantHosts(); - tester.patchNodes(hosts, (host) -> host.withSwitchHostname(switch2)); - } - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(3, 1, resources)))); - tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster.id()); - - // Growing cluster further can reuse switches as we're now out of exclusive ones - tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(4, 1, resources)))); - tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster.id()); - - // Additional cluster can reuse switches of existing cluster - ClusterSpec cluster2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content")).vespaVersion("1").build(); - tester.activate(app1, tester.prepare(app1, cluster2, Capacity.from(new ClusterResources(3, 1, resources)))); - tester.assertSwitches(Set.of(switch0, switch1, switch2), app1, cluster2.id()); - - // Another application is deployed on exclusive switches - ApplicationId app2 = ApplicationId.from("t2", "a2", "i2"); - tester.activate(app2, tester.prepare(app2, cluster, Capacity.from(new ClusterResources(3, 1, resources)))); - tester.assertSwitches(Set.of(switch0, switch1, switch2), app2, cluster.id()); - } - - private ApplicationId makeApplicationId(String tenant, String appName) { - return ApplicationId.from(tenant, appName, "default"); - } - - private void deployApp(ApplicationId id, ClusterSpec spec, NodeResources flavor, ProvisioningTester tester, int nodeCount) { - List hostSpec = tester.prepare(id, spec, nodeCount, 1, flavor); - tester.activate(id, new HashSet<>(hostSpec)); - } - - private void addAndAssignNode(ApplicationId id, String hostname, String parentHostname, ClusterSpec clusterSpec, NodeResources flavor, int index, ProvisioningTester tester) { - Node node1a = Node.create("open1", new IP.Config(Set.of("127.0.233." + index), Set.of()), hostname, - new Flavor(flavor), NodeType.tenant).parentHostname(parentHostname).build(); - ClusterMembership clusterMembership1 = ClusterMembership.from( - clusterSpec.with(Optional.of(ClusterSpec.Group.from(0))), index); // Need to add group here so that group is serialized in node allocation - Node node1aAllocation = node1a.allocate(id, clusterMembership1, node1a.resources(), Instant.now()); - - tester.nodeRepository().nodes().addNodes(Collections.singletonList(node1aAllocation), Agent.system); - NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(tester.getCurator())); - tester.nodeRepository().nodes().activate(Collections.singletonList(node1aAllocation), transaction); - transaction.commit(); - } - - private List findSpareCapacity(ProvisioningTester tester) { - NodeList nodes = tester.nodeRepository().nodes().list(State.values()); - return nodes.nodeType(NodeType.host) - .matching(host -> nodes.childrenOf(host).size() == 0) // Hosts without children - .asList(); - } - - private FlavorsConfig flavorsConfig() { - FlavorConfigBuilder b = new FlavorConfigBuilder(); - b.addFlavor("host-large", 6, 24, 800, 6, Flavor.Type.BARE_METAL); - b.addFlavor("host-small", 3, 12, 400, 3, Flavor.Type.BARE_METAL); - b.addFlavor("flt", 30, 30, 400, 3, Flavor.Type.BARE_METAL); - b.addFlavor("cpu", 40, 20, 400, 3, Flavor.Type.BARE_METAL); - b.addFlavor("mem", 20, 40, 400, 3, Flavor.Type.BARE_METAL); - return b.build(); - } - - private ClusterSpec clusterSpec(String clusterId) { - return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").build(); - } - -} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java deleted file mode 100644 index 029c9ffa559..00000000000 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.provisioning; - -import com.yahoo.component.Version; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.Capacity; -import com.yahoo.config.provision.Cloud; -import com.yahoo.config.provision.ClusterResources; -import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.Environment; -import com.yahoo.config.provision.Flavor; -import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.NodeResources; -import com.yahoo.config.provision.NodeResources.DiskSpeed; -import com.yahoo.config.provision.NodeResources.StorageType; -import com.yahoo.config.provision.NodeType; -import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.SystemName; -import com.yahoo.config.provision.Zone; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeList; -import com.yahoo.vespa.hosted.provision.node.Agent; -import com.yahoo.vespa.hosted.provision.node.IP; -import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing; -import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner; -import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver; -import org.junit.Test; - -import java.time.Instant; -import java.util.List; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast; -import static com.yahoo.config.provision.NodeResources.StorageType.local; -import static com.yahoo.config.provision.NodeResources.StorageType.remote; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -/** - * @author freva - * @author bratseth - */ -public class DynamicDockerProvisionTest { - - private static final Zone zone = new Zone( - Cloud.builder().dynamicProvisioning(true).build(), - SystemName.main, - Environment.prod, - RegionName.from("us-east")); - private final MockNameResolver nameResolver = new MockNameResolver().mockAnyLookup(); - private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); - private final ProvisioningTester tester = new ProvisioningTester.Builder() - .zone(zone).hostProvisioner(hostProvisioner).nameResolver(nameResolver).build(); - - @Test - public void dynamically_provision_with_empty_node_repo() { - assertEquals(0, tester.nodeRepository().nodes().list().size()); - - ApplicationId application1 = ProvisioningTester.applicationId(); - NodeResources resources = new NodeResources(1, 4, 10, 1); - - mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - verify(hostProvisioner).provisionHosts(List.of(100, 101, 102, 103), NodeType.host, resources, application1, - Version.emptyVersion, HostSharing.any); - - // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes - assertEquals(8, tester.nodeRepository().nodes().list().size()); - assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); - assertEquals(Set.of("host-100-1", "host-101-1", "host-102-1", "host-103-1"), - tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames()); - - // Deploy new application - ApplicationId application2 = ProvisioningTester.applicationId(); - prepareAndActivate(application2, clusterSpec("mycluster"), 4, 1, resources); - - // Total of 12 nodes should now be in node-repo, 4 active hosts and 8 active nodes - assertEquals(12, tester.nodeRepository().nodes().list().size()); - assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); - assertEquals(Set.of("host-100-1", "host-100-2", "host-101-1", "host-101-2", "host-102-1", "host-102-2", - "host-103-1", "host-103-2"), - tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames()); - - // Deploy new exclusive application - ApplicationId application3 = ProvisioningTester.applicationId(); - mockHostProvisioner(hostProvisioner, "large", 3, application3); - prepareAndActivate(application3, clusterSpec("mycluster", true), 4, 1, resources); - verify(hostProvisioner).provisionHosts(List.of(104, 105, 106, 107), NodeType.host, resources, application3, - Version.emptyVersion, HostSharing.exclusive); - - // Total of 20 nodes should now be in node-repo, 8 active hosts and 12 active nodes - assertEquals(20, tester.nodeRepository().nodes().list().size()); - assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); - assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); - - verifyNoMoreInteractions(hostProvisioner); - } - - @Test - public void in_place_resize_not_allowed_on_exclusive_to_hosts() { - NodeResources initialResources = new NodeResources(2, 8, 10, 1); - NodeResources smallResources = new NodeResources(1, 4, 10, 1); - - ApplicationId application1 = ProvisioningTester.applicationId(); - mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, initialResources); - - ApplicationId application2 = ProvisioningTester.applicationId(); - mockHostProvisioner(hostProvisioner, "large", 3, application2); // Provision exclusive hosts - prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, initialResources); - - // Total of 16 nodes should now be in node-repo, 8 active hosts and 8 active nodes - assertEquals(16, tester.nodeRepository().nodes().list().size()); - assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); - - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallResources); - prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, smallResources); - - // 24 nodes: 4 shared hosts with 4 app1 nodes + 8 exclusive hosts with 8 nodes of app2, 4 of which are retired - NodeList nodes = tester.nodeRepository().nodes().list(); - assertEquals(24, nodes.size()); - assertEquals(12, nodes.nodeType(NodeType.host).state(Node.State.active).size()); - assertEquals(12, nodes.nodeType(NodeType.tenant).state(Node.State.active).size()); - assertEquals(4, nodes.retired().size()); - } - - @Test - public void avoids_allocating_to_empty_hosts() { - tester.makeReadyHosts(6, new NodeResources(12, 12, 200, 12)); - tester.activateTenantHosts(); - - NodeResources resources = new NodeResources(1, 4, 10, 4); - - ApplicationId application1 = ProvisioningTester.applicationId(); - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - - ApplicationId application2 = ProvisioningTester.applicationId(); - prepareAndActivate(application2, clusterSpec("mycluster"), 3, 1, resources); - - ApplicationId application3 = ProvisioningTester.applicationId(); - prepareAndActivate(application3, clusterSpec("mycluster"), 3, 1, resources); - assertEquals(4, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); - - ApplicationId application4 = ProvisioningTester.applicationId(); - prepareAndActivate(application4, clusterSpec("mycluster"), 3, 1, resources); - assertEquals(5, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); - } - - @Test - public void retires_on_exclusivity_violation() { - ApplicationId application1 = ProvisioningTester.applicationId(); - NodeResources resources = new NodeResources(1, 4, 10, 1); - - mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - NodeList initialNodes = tester.nodeRepository().nodes().list().owner(application1); - assertEquals(4, initialNodes.size()); - - // Redeploy same application with exclusive=true - mockHostProvisioner(hostProvisioner, "large", 3, application1); - prepareAndActivate(application1, clusterSpec("mycluster", true), 4, 1, resources); - assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); - assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired()); - - // Redeploy without exclusive again is no-op - prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); - assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired()); - } - - @Test - public void node_indices_are_unique_even_when_a_node_is_left_in_reserved_state() { - NodeResources resources = new NodeResources(10, 10, 10, 10); - ApplicationId app = ProvisioningTester.applicationId(); - - Function retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now())); - Function getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream() - .filter(node -> node.allocation().get().membership().cluster().group().get().index() == group) - .findAny().orElseThrow(); - - // Allocate 10 hosts - tester.makeReadyNodes(10, resources, NodeType.host, 1); - tester.activateTenantHosts(); - - // Prepare & activate an application with 8 nodes and 2 groups - tester.activate(app, tester.prepare(app, clusterSpec("content"), 8, 2, resources)); - - // Retire a node in group 1 and prepare the application - retireNode.apply(getNodeInGroup.apply(1)); - tester.prepare(app, clusterSpec("content"), 8, 2, resources); - // App is not activated, to leave node '8' in reserved state - - // Retire a node in group 0 and prepare the application - retireNode.apply(getNodeInGroup.apply(0)); - tester.prepare(app, clusterSpec("content"), 8, 2, resources); - - // Verify that nodes have unique indices from 0..9 - var indices = tester.nodeRepository().nodes().list().owner(app).stream() - .map(node -> node.allocation().get().membership().index()) - .collect(Collectors.toSet()); - assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList()))); - } - - @Test - public void test_capacity_is_in_advertised_amounts() { - int memoryTax = 3; - List flavors = List.of(new Flavor("2x", - new NodeResources(2, 17, 200, 10, fast, remote))); - - ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) - .flavors(flavors) - .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) - .nameResolver(nameResolver) - .resourcesCalculator(memoryTax, 0) - .build(); - - tester.activateTenantHosts(); - - ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); - - // Deploy using real memory amount (17) - try { - tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 17, 40), - resources(4, 1, 2, 17, 40))); - fail("Expected exception"); - } - catch (IllegalArgumentException e) { - // Success - String expected = "No allocation possible within limits"; - assertEquals(expected, e.getMessage().substring(0, expected.length())); - } - - // Deploy using advertised memory amount (17 + 3 (see MockResourcesCalculator) - tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40), - resources(4, 1, 2, 20, 40))); - tester.assertNodes("Allocation specifies memory in the advertised amount", - 2, 1, 2, 20, 40, - app1, cluster1); - - // Redeploy the same - tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40), - resources(4, 1, 2, 20, 40))); - tester.assertNodes("Allocation specifies memory in the advertised amount", - 2, 1, 2, 20, 40, - app1, cluster1); - } - - @Test - public void test_changing_limits() { - int memoryTax = 3; - List flavors = List.of(new Flavor("1x", new NodeResources(1, 10 - memoryTax, 100, 0.1, fast, remote)), - new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), - new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote))); - - ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) - .flavors(flavors) - .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) - .nameResolver(nameResolver) - .resourcesCalculator(memoryTax, 0) - .build(); - - tester.activateTenantHosts(); - - ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); - - // Limits where each number is within flavor limits but but which don't contain any flavor leads to an error - try { - tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 3.8, 20, 40), - resources(10, 5, 5, 25, 50))); - fail("Expected exception"); - } - catch (IllegalArgumentException e) { - // success - } - - // Initial deployment - tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 20), - resources(6, 3, 4, 20, 40))); - tester.assertNodes("Initial allocation at first actual flavor above min (except for disk)", - 4, 2, 1, 10, 20, - app1, cluster1); - - - // Move window above current allocation - tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 3.8, 20, 40), - resources(10, 5, 5, 45, 50))); - tester.assertNodes("New allocation at new smallest flavor above limits", - 8, 4, 4, 40, 40, - app1, cluster1); - - // Move window below current allocation - tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20), - resources(6, 3, 3, 25, 25))); - tester.assertNodes("New allocation at new max", - 6, 2, 2, 20, 25, - app1, cluster1); - - // Widening window does not change allocation - tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 1, 5, 15), - resources(8, 4, 4, 20, 30))); - tester.assertNodes("No change", - 6, 2, 2, 20, 25, - app1, cluster1); - - // Force 1 more groups: Reducing to 2 nodes per group to preserve node count is rejected - // since it will reduce total group memory from 60 to 40. - tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 1, 5, 10), - resources(9, 3, 5, 20, 15))); - tester.assertNodes("Group size is preserved", - 9, 3, 2, 20, 15, - app1, cluster1); - - // Stop specifying node resources - tester.activate(app1, cluster1, Capacity.from(new ClusterResources(6, 3, NodeResources.unspecified()), - new ClusterResources(9, 3, NodeResources.unspecified()))); - tester.assertNodes("Existing allocation is preserved", - 9, 3, 2, 20, 15, - app1, cluster1); - } - - @Test - public void test_changing_storage_type() { - int memoryTax = 3; - List flavors = List.of(new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), - new Flavor("2xl", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, local)), - new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote)), - new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, local))); - - ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) - .flavors(flavors) - .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) - .nameResolver(nameResolver) - .resourcesCalculator(memoryTax, 0) - .build(); - - tester.activateTenantHosts(); - - ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); - - tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, local), - resources(6, 3, 3, 25, 400, fast, local))); - tester.assertNodes("Initial deployment: Local disk", - 4, 2, 2, 20, 200, fast, local, - app1, cluster1); - - tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, remote), - resources(6, 3, 3, 25, 400, fast, remote))); - tester.assertNodes("Change from local to remote disk", - 4, 2, 2, 20, 200, fast, remote, - app1, cluster1); - } - - @Test - public void test_any_disk_prefers_remote() { - int memoryTax = 3; - int localDiskTax = 55; - // Disk tax is not included in flavor resources but memory tax is - List flavors = List.of(new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, local)), - new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, local)), - new Flavor("2xl", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), - new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote))); - - ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) - .flavors(flavors) - .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) - .nameResolver(nameResolver) - .resourcesCalculator(memoryTax, localDiskTax) - .build(); - - tester.activateTenantHosts(); - - ApplicationId app1 = ProvisioningTester.applicationId("app1"); - ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); - - tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, StorageType.any), - resources(6, 3, 3, 25, 400, fast, StorageType.any))); - tester.assertNodes("'any' selects a flavor with remote storage since it produces higher fulfilment", - 4, 2, 2, 20, 200, fast, remote, - app1, cluster1); - } - - private void prepareAndActivate(ApplicationId application, ClusterSpec clusterSpec, int nodes, int groups, NodeResources resources) { - List prepared = tester.prepare(application, clusterSpec, nodes, groups, resources); - NodeList provisionedHosts = tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host); - if (!provisionedHosts.isEmpty()) { - tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicDockerProvisionTest.class.getSimpleName()); - tester.activateTenantHosts(); - } - tester.activate(application, prepared); - } - - private static ClusterSpec clusterSpec(String clusterId) { - return clusterSpec(clusterId, false); - } - - private static ClusterSpec clusterSpec(String clusterId, boolean exclusive) { - return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").exclusive(exclusive).build(); - } - - private static ClusterResources resources(int nodes, int groups, double vcpu, double memory, double disk) { - return new ClusterResources(nodes, groups, new NodeResources(vcpu, memory, disk, 0.1, - DiskSpeed.getDefault(), StorageType.getDefault())); - } - - private static ClusterResources resources(int nodes, int groups, double vcpu, double memory, double disk, - DiskSpeed diskSpeed, StorageType storageType) { - return new ClusterResources(nodes, groups, new NodeResources(vcpu, memory, disk, 0.1, diskSpeed, storageType)); - } - - @SuppressWarnings("unchecked") - private void mockHostProvisioner(HostProvisioner hostProvisioner, String hostFlavorName, int numIps, ApplicationId exclusiveTo) { - doAnswer(invocation -> { - Flavor hostFlavor = tester.nodeRepository().flavors().getFlavorOrThrow(hostFlavorName); - List provisionIndexes = (List) invocation.getArguments()[0]; - NodeResources nodeResources = (NodeResources) invocation.getArguments()[2]; - - return provisionIndexes.stream() - .map(hostIndex -> { - String hostHostname = "host-" + hostIndex; - String hostIp = "::" + hostIndex + ":0"; - nameResolver.addRecord(hostHostname, hostIp); - Set pool = IntStream.range(1, numIps + 1).mapToObj(i -> { - String ip = "::" + hostIndex + ":" + i; - nameResolver.addRecord(hostHostname + "-" + i, ip); - return ip; - }).collect(Collectors.toSet()); - - Node parent = Node.create(hostHostname, new IP.Config(Set.of(hostIp), pool), hostHostname, hostFlavor, NodeType.host) - .exclusiveTo(exclusiveTo).build(); - Node child = Node.reserve(Set.of("::" + hostIndex + ":1"), hostHostname + "-1", hostHostname, nodeResources, NodeType.tenant).build(); - ProvisionedHost provisionedHost = mock(ProvisionedHost.class); - when(provisionedHost.generateHost()).thenReturn(parent); - when(provisionedHost.generateNode()).thenReturn(child); - return provisionedHost; - }) - .collect(Collectors.toList()); - }).when(hostProvisioner).provisionHosts(any(), any(), any(), any(), any(), any()); - } - -} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java new file mode 100644 index 00000000000..6f1e2630434 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java @@ -0,0 +1,453 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.provision.provisioning; + +import com.yahoo.component.Version; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.Cloud; +import com.yahoo.config.provision.ClusterResources; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.Flavor; +import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.NodeResources.DiskSpeed; +import com.yahoo.config.provision.NodeResources.StorageType; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; +import com.yahoo.vespa.hosted.provision.node.Agent; +import com.yahoo.vespa.hosted.provision.node.IP; +import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing; +import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner; +import com.yahoo.vespa.hosted.provision.testutils.MockNameResolver; +import org.junit.Test; + +import java.time.Instant; +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast; +import static com.yahoo.config.provision.NodeResources.StorageType.local; +import static com.yahoo.config.provision.NodeResources.StorageType.remote; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +/** + * @author freva + * @author bratseth + */ +public class DynamicProvisioningTest { + + private static final Zone zone = new Zone( + Cloud.builder().dynamicProvisioning(true).build(), + SystemName.main, + Environment.prod, + RegionName.from("us-east")); + private final MockNameResolver nameResolver = new MockNameResolver().mockAnyLookup(); + private final HostProvisioner hostProvisioner = mock(HostProvisioner.class); + private final ProvisioningTester tester = new ProvisioningTester.Builder() + .zone(zone).hostProvisioner(hostProvisioner).nameResolver(nameResolver).build(); + + @Test + public void dynamically_provision_with_empty_node_repo() { + assertEquals(0, tester.nodeRepository().nodes().list().size()); + + ApplicationId application1 = ProvisioningTester.applicationId(); + NodeResources resources = new NodeResources(1, 4, 10, 1); + + mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); + verify(hostProvisioner).provisionHosts(List.of(100, 101, 102, 103), NodeType.host, resources, application1, + Version.emptyVersion, HostSharing.any); + + // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes + assertEquals(8, tester.nodeRepository().nodes().list().size()); + assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); + assertEquals(Set.of("host-100-1", "host-101-1", "host-102-1", "host-103-1"), + tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames()); + + // Deploy new application + ApplicationId application2 = ProvisioningTester.applicationId(); + prepareAndActivate(application2, clusterSpec("mycluster"), 4, 1, resources); + + // Total of 12 nodes should now be in node-repo, 4 active hosts and 8 active nodes + assertEquals(12, tester.nodeRepository().nodes().list().size()); + assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); + assertEquals(Set.of("host-100-1", "host-100-2", "host-101-1", "host-101-2", "host-102-1", "host-102-2", + "host-103-1", "host-103-2"), + tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).hostnames()); + + // Deploy new exclusive application + ApplicationId application3 = ProvisioningTester.applicationId(); + mockHostProvisioner(hostProvisioner, "large", 3, application3); + prepareAndActivate(application3, clusterSpec("mycluster", true), 4, 1, resources); + verify(hostProvisioner).provisionHosts(List.of(104, 105, 106, 107), NodeType.host, resources, application3, + Version.emptyVersion, HostSharing.exclusive); + + // Total of 20 nodes should now be in node-repo, 8 active hosts and 12 active nodes + assertEquals(20, tester.nodeRepository().nodes().list().size()); + assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); + assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + + verifyNoMoreInteractions(hostProvisioner); + } + + @Test + public void in_place_resize_not_allowed_on_exclusive_to_hosts() { + NodeResources initialResources = new NodeResources(2, 8, 10, 1); + NodeResources smallResources = new NodeResources(1, 4, 10, 1); + + ApplicationId application1 = ProvisioningTester.applicationId(); + mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, initialResources); + + ApplicationId application2 = ProvisioningTester.applicationId(); + mockHostProvisioner(hostProvisioner, "large", 3, application2); // Provision exclusive hosts + prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, initialResources); + + // Total of 16 nodes should now be in node-repo, 8 active hosts and 8 active nodes + assertEquals(16, tester.nodeRepository().nodes().list().size()); + assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallResources); + prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, smallResources); + + // 24 nodes: 4 shared hosts with 4 app1 nodes + 8 exclusive hosts with 8 nodes of app2, 4 of which are retired + NodeList nodes = tester.nodeRepository().nodes().list(); + assertEquals(24, nodes.size()); + assertEquals(12, nodes.nodeType(NodeType.host).state(Node.State.active).size()); + assertEquals(12, nodes.nodeType(NodeType.tenant).state(Node.State.active).size()); + assertEquals(4, nodes.retired().size()); + } + + @Test + public void avoids_allocating_to_empty_hosts() { + tester.makeReadyHosts(6, new NodeResources(12, 12, 200, 12)); + tester.activateTenantHosts(); + + NodeResources resources = new NodeResources(1, 4, 10, 4); + + ApplicationId application1 = ProvisioningTester.applicationId(); + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); + + ApplicationId application2 = ProvisioningTester.applicationId(); + prepareAndActivate(application2, clusterSpec("mycluster"), 3, 1, resources); + + ApplicationId application3 = ProvisioningTester.applicationId(); + prepareAndActivate(application3, clusterSpec("mycluster"), 3, 1, resources); + assertEquals(4, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); + + ApplicationId application4 = ProvisioningTester.applicationId(); + prepareAndActivate(application4, clusterSpec("mycluster"), 3, 1, resources); + assertEquals(5, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); + } + + @Test + public void retires_on_exclusivity_violation() { + ApplicationId application1 = ProvisioningTester.applicationId(); + NodeResources resources = new NodeResources(1, 4, 10, 1); + + mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); + NodeList initialNodes = tester.nodeRepository().nodes().list().owner(application1); + assertEquals(4, initialNodes.size()); + + // Redeploy same application with exclusive=true + mockHostProvisioner(hostProvisioner, "large", 3, application1); + prepareAndActivate(application1, clusterSpec("mycluster", true), 4, 1, resources); + assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); + assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired()); + + // Redeploy without exclusive again is no-op + prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); + assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); + assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired()); + } + + @Test + public void node_indices_are_unique_even_when_a_node_is_left_in_reserved_state() { + NodeResources resources = new NodeResources(10, 10, 10, 10); + ApplicationId app = ProvisioningTester.applicationId(); + + Function retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now())); + Function getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream() + .filter(node -> node.allocation().get().membership().cluster().group().get().index() == group) + .findAny().orElseThrow(); + + // Allocate 10 hosts + tester.makeReadyNodes(10, resources, NodeType.host, 1); + tester.activateTenantHosts(); + + // Prepare & activate an application with 8 nodes and 2 groups + tester.activate(app, tester.prepare(app, clusterSpec("content"), 8, 2, resources)); + + // Retire a node in group 1 and prepare the application + retireNode.apply(getNodeInGroup.apply(1)); + tester.prepare(app, clusterSpec("content"), 8, 2, resources); + // App is not activated, to leave node '8' in reserved state + + // Retire a node in group 0 and prepare the application + retireNode.apply(getNodeInGroup.apply(0)); + tester.prepare(app, clusterSpec("content"), 8, 2, resources); + + // Verify that nodes have unique indices from 0..9 + var indices = tester.nodeRepository().nodes().list().owner(app).stream() + .map(node -> node.allocation().get().membership().index()) + .collect(Collectors.toSet()); + assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList()))); + } + + @Test + public void test_capacity_is_in_advertised_amounts() { + int memoryTax = 3; + List flavors = List.of(new Flavor("2x", + new NodeResources(2, 17, 200, 10, fast, remote))); + + ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) + .flavors(flavors) + .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) + .nameResolver(nameResolver) + .resourcesCalculator(memoryTax, 0) + .build(); + + tester.activateTenantHosts(); + + ApplicationId app1 = ProvisioningTester.applicationId("app1"); + ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); + + // Deploy using real memory amount (17) + try { + tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 17, 40), + resources(4, 1, 2, 17, 40))); + fail("Expected exception"); + } + catch (IllegalArgumentException e) { + // Success + String expected = "No allocation possible within limits"; + assertEquals(expected, e.getMessage().substring(0, expected.length())); + } + + // Deploy using advertised memory amount (17 + 3 (see MockResourcesCalculator) + tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40), + resources(4, 1, 2, 20, 40))); + tester.assertNodes("Allocation specifies memory in the advertised amount", + 2, 1, 2, 20, 40, + app1, cluster1); + + // Redeploy the same + tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 2, 20, 40), + resources(4, 1, 2, 20, 40))); + tester.assertNodes("Allocation specifies memory in the advertised amount", + 2, 1, 2, 20, 40, + app1, cluster1); + } + + @Test + public void test_changing_limits() { + int memoryTax = 3; + List flavors = List.of(new Flavor("1x", new NodeResources(1, 10 - memoryTax, 100, 0.1, fast, remote)), + new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), + new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote))); + + ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) + .flavors(flavors) + .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) + .nameResolver(nameResolver) + .resourcesCalculator(memoryTax, 0) + .build(); + + tester.activateTenantHosts(); + + ApplicationId app1 = ProvisioningTester.applicationId("app1"); + ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); + + // Limits where each number is within flavor limits but but which don't contain any flavor leads to an error + try { + tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 3.8, 20, 40), + resources(10, 5, 5, 25, 50))); + fail("Expected exception"); + } + catch (IllegalArgumentException e) { + // success + } + + // Initial deployment + tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 20), + resources(6, 3, 4, 20, 40))); + tester.assertNodes("Initial allocation at first actual flavor above min (except for disk)", + 4, 2, 1, 10, 20, + app1, cluster1); + + + // Move window above current allocation + tester.activate(app1, cluster1, Capacity.from(resources(8, 4, 3.8, 20, 40), + resources(10, 5, 5, 45, 50))); + tester.assertNodes("New allocation at new smallest flavor above limits", + 8, 4, 4, 40, 40, + app1, cluster1); + + // Move window below current allocation + tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 20), + resources(6, 3, 3, 25, 25))); + tester.assertNodes("New allocation at new max", + 6, 2, 2, 20, 25, + app1, cluster1); + + // Widening window does not change allocation + tester.activate(app1, cluster1, Capacity.from(resources(2, 1, 1, 5, 15), + resources(8, 4, 4, 20, 30))); + tester.assertNodes("No change", + 6, 2, 2, 20, 25, + app1, cluster1); + + // Force 1 more groups: Reducing to 2 nodes per group to preserve node count is rejected + // since it will reduce total group memory from 60 to 40. + tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 1, 5, 10), + resources(9, 3, 5, 20, 15))); + tester.assertNodes("Group size is preserved", + 9, 3, 2, 20, 15, + app1, cluster1); + + // Stop specifying node resources + tester.activate(app1, cluster1, Capacity.from(new ClusterResources(6, 3, NodeResources.unspecified()), + new ClusterResources(9, 3, NodeResources.unspecified()))); + tester.assertNodes("Existing allocation is preserved", + 9, 3, 2, 20, 15, + app1, cluster1); + } + + @Test + public void test_changing_storage_type() { + int memoryTax = 3; + List flavors = List.of(new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), + new Flavor("2xl", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, local)), + new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote)), + new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, local))); + + ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) + .flavors(flavors) + .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) + .nameResolver(nameResolver) + .resourcesCalculator(memoryTax, 0) + .build(); + + tester.activateTenantHosts(); + + ApplicationId app1 = ProvisioningTester.applicationId("app1"); + ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); + + tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, local), + resources(6, 3, 3, 25, 400, fast, local))); + tester.assertNodes("Initial deployment: Local disk", + 4, 2, 2, 20, 200, fast, local, + app1, cluster1); + + tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, remote), + resources(6, 3, 3, 25, 400, fast, remote))); + tester.assertNodes("Change from local to remote disk", + 4, 2, 2, 20, 200, fast, remote, + app1, cluster1); + } + + @Test + public void test_any_disk_prefers_remote() { + int memoryTax = 3; + int localDiskTax = 55; + // Disk tax is not included in flavor resources but memory tax is + List flavors = List.of(new Flavor("2x", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, local)), + new Flavor("4x", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, local)), + new Flavor("2xl", new NodeResources(2, 20 - memoryTax, 200, 0.1, fast, remote)), + new Flavor("4xl", new NodeResources(4, 40 - memoryTax, 400, 0.1, fast, remote))); + + ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone) + .flavors(flavors) + .hostProvisioner(new MockHostProvisioner(flavors, memoryTax)) + .nameResolver(nameResolver) + .resourcesCalculator(memoryTax, localDiskTax) + .build(); + + tester.activateTenantHosts(); + + ApplicationId app1 = ProvisioningTester.applicationId("app1"); + ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); + + tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 2, 10, 200, fast, StorageType.any), + resources(6, 3, 3, 25, 400, fast, StorageType.any))); + tester.assertNodes("'any' selects a flavor with remote storage since it produces higher fulfilment", + 4, 2, 2, 20, 200, fast, remote, + app1, cluster1); + } + + private void prepareAndActivate(ApplicationId application, ClusterSpec clusterSpec, int nodes, int groups, NodeResources resources) { + List prepared = tester.prepare(application, clusterSpec, nodes, groups, resources); + NodeList provisionedHosts = tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host); + if (!provisionedHosts.isEmpty()) { + tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicProvisioningTest.class.getSimpleName()); + tester.activateTenantHosts(); + } + tester.activate(application, prepared); + } + + private static ClusterSpec clusterSpec(String clusterId) { + return clusterSpec(clusterId, false); + } + + private static ClusterSpec clusterSpec(String clusterId, boolean exclusive) { + return ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from(clusterId)).vespaVersion("6.42").exclusive(exclusive).build(); + } + + private static ClusterResources resources(int nodes, int groups, double vcpu, double memory, double disk) { + return new ClusterResources(nodes, groups, new NodeResources(vcpu, memory, disk, 0.1, + DiskSpeed.getDefault(), StorageType.getDefault())); + } + + private static ClusterResources resources(int nodes, int groups, double vcpu, double memory, double disk, + DiskSpeed diskSpeed, StorageType storageType) { + return new ClusterResources(nodes, groups, new NodeResources(vcpu, memory, disk, 0.1, diskSpeed, storageType)); + } + + @SuppressWarnings("unchecked") + private void mockHostProvisioner(HostProvisioner hostProvisioner, String hostFlavorName, int numIps, ApplicationId exclusiveTo) { + doAnswer(invocation -> { + Flavor hostFlavor = tester.nodeRepository().flavors().getFlavorOrThrow(hostFlavorName); + List provisionIndexes = (List) invocation.getArguments()[0]; + NodeResources nodeResources = (NodeResources) invocation.getArguments()[2]; + + return provisionIndexes.stream() + .map(hostIndex -> { + String hostHostname = "host-" + hostIndex; + String hostIp = "::" + hostIndex + ":0"; + nameResolver.addRecord(hostHostname, hostIp); + Set pool = IntStream.range(1, numIps + 1).mapToObj(i -> { + String ip = "::" + hostIndex + ":" + i; + nameResolver.addRecord(hostHostname + "-" + i, ip); + return ip; + }).collect(Collectors.toSet()); + + Node parent = Node.create(hostHostname, new IP.Config(Set.of(hostIp), pool), hostHostname, hostFlavor, NodeType.host) + .exclusiveTo(exclusiveTo).build(); + Node child = Node.reserve(Set.of("::" + hostIndex + ":1"), hostHostname + "-1", hostHostname, nodeResources, NodeType.tenant).build(); + ProvisionedHost provisionedHost = mock(ProvisionedHost.class); + when(provisionedHost.generateHost()).thenReturn(parent); + when(provisionedHost.generateNode()).thenReturn(child); + return provisionedHost; + }) + .collect(Collectors.toList()); + }).when(hostProvisioner).provisionHosts(any(), any(), any(), any(), any(), any()); + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java new file mode 100644 index 00000000000..5f2d567bd24 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java @@ -0,0 +1,146 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.provision.provisioning; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterResources; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.Flavor; +import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.hosted.provision.NodeRepository; +import com.yahoo.vespa.hosted.provision.Nodelike; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * @author bratseth + */ +public class VirtualNodeProvisioningCompleteHostCalculatorTest { + + @Test + public void changing_to_different_range_preserves_allocation() { + Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 1000, 4)); + ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))) + .resourcesCalculator(new CompleteResourcesCalculator(hostFlavor)) + .flavors(List.of(hostFlavor)) + .build(); + tester.makeReadyHosts(9, hostFlavor.resources()).activateTenantHosts(); + + ApplicationId app1 = ProvisioningTester.applicationId("app1"); + ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build(); + + var initialResources = new NodeResources(20, 16, 50, 1); + tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources), + new ClusterResources(2, 1, initialResources))); + tester.assertNodes("Initial allocation", + 2, 1, 20, 16, 50, 1.0, + app1, cluster1); + + var newMinResources = new NodeResources( 5, 4, 11, 1); + var newMaxResources = new NodeResources(20, 10, 30, 1); + tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources), + new ClusterResources(7, 1, newMaxResources))); + tester.assertNodes("New allocation preserves total resources", + 7, 1, 7, 4.6, 14.3, 1.0, + app1, cluster1); + + tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources), + new ClusterResources(7, 1, newMaxResources))); + tester.assertNodes("Redeploying the same ranges does not cause changes", + 7, 1, 7, 4.6, 14.3, 1.0, + app1, cluster1); + } + + @Test + public void testResourcesCalculator() { + Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4)); + var calculator = new CompleteResourcesCalculator(hostFlavor); + var originalReal = new NodeResources(0.7, 6.0, 12.9, 1.0); + var realToRequest = calculator.realToRequest(originalReal, false); + var requestToReal = calculator.requestToReal(realToRequest, false); + var realResourcesOf = calculator.realResourcesOf(realToRequest); + assertEquals(originalReal, requestToReal); + assertEquals(originalReal, realResourcesOf); + } + + private static class CompleteResourcesCalculator implements HostResourcesCalculator { + + private final Flavor hostFlavor; // Has the real resources + private final double memoryOverhead = 1; + private final double diskOverhead = 100; + + public CompleteResourcesCalculator(Flavor hostFlavor) { + this.hostFlavor = hostFlavor; + } + + @Override + public NodeResources realResourcesOf(Nodelike node, NodeRepository nodeRepository, boolean exclusive) { + if (node.parentHostname().isEmpty()) return node.resources(); // hosts use configured flavors + return realResourcesOf(node.resources()); + } + + NodeResources realResourcesOf(NodeResources advertisedResources) { + return advertisedResources.withMemoryGb(advertisedResources.memoryGb() - + memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), advertisedResources, false)) + .withDiskGb(advertisedResources.diskGb() - + diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), advertisedResources, false)); + } + + @Override + public NodeResources requestToReal(NodeResources advertisedResources, boolean exclusive) { + double memoryOverhead = memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), advertisedResources, false); + double diskOverhead = diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), advertisedResources, false); + return advertisedResources.withMemoryGb(advertisedResources.memoryGb() - memoryOverhead) + .withDiskGb(advertisedResources.diskGb() - diskOverhead); + } + + @Override + public NodeResources advertisedResourcesOf(Flavor flavor) { + if ( ! flavor.equals(hostFlavor)) return flavor.resources(); // Node 'flavors' just wrap the advertised resources + return hostFlavor.resources().withMemoryGb(hostFlavor.resources().memoryGb() + memoryOverhead) + .withDiskGb(hostFlavor.resources().diskGb() + diskOverhead); + } + + @Override + public NodeResources realToRequest(NodeResources realResources, boolean exclusive) { + double memoryOverhead = memoryOverhead(advertisedResourcesOf(hostFlavor).memoryGb(), realResources, true); + double diskOverhead = diskOverhead(advertisedResourcesOf(hostFlavor).diskGb(), realResources, true); + return realResources.withMemoryGb(realResources.memoryGb() + memoryOverhead) + .withDiskGb(realResources.diskGb() + diskOverhead); + } + + @Override + public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; } + + /** + * Returns the memory overhead resulting if the given advertised resources are placed on the given node + * + * @param real true if the given resources are in real values, false if they are in advertised + */ + private double memoryOverhead(double hostAdvertisedMemoryGb, NodeResources resources, boolean real) { + double memoryShare = resources.memoryGb() / + ( hostAdvertisedMemoryGb - (real ? memoryOverhead : 0)); + return memoryOverhead * memoryShare; + } + + /** + * Returns the disk overhead resulting if the given advertised resources are placed on the given node + * + * @param real true if the resources are in real values, false if they are in advertised + */ + private double diskOverhead(double hostAdvertisedDiskGb, NodeResources resources, boolean real) { + double diskShare = resources.diskGb() / + ( hostAdvertisedDiskGb - (real ? diskOverhead : 0) ); + return diskOverhead * diskShare; + } + + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java index 01b7930ffa6..18fcb56d87f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java @@ -235,7 +235,7 @@ public class VirtualNodeProvisioningTest { } @Test - public void docker_application_deployment() { + public void application_deployment() { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); tester.makeReadyHosts(10, resources2).activateTenantHosts(); ApplicationId application1 = ProvisioningTester.applicationId("app1"); @@ -335,7 +335,7 @@ public class VirtualNodeProvisioningTest { /** Exclusive app first, then non-exclusive: Should give the same result as below */ @Test - public void docker_application_deployment_with_exclusive_app_first() { + public void application_deployment_with_exclusive_app_first() { NodeResources hostResources = new NodeResources(10, 40, 1000, 10); NodeResources nodeResources = new NodeResources(1, 4, 100, 1); ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); @@ -354,7 +354,7 @@ public class VirtualNodeProvisioningTest { /** Non-exclusive app first, then an exclusive: Should give the same result as above */ @Test - public void docker_application_deployment_with_exclusive_app_last() { + public void application_deployment_with_exclusive_app_last() { NodeResources hostResources = new NodeResources(10, 40, 1000, 10); NodeResources nodeResources = new NodeResources(1, 4, 100, 1); ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); @@ -373,21 +373,11 @@ public class VirtualNodeProvisioningTest { /** Test making an application exclusive */ @Test - public void docker_application_deployment_change_to_exclusive_and_back() { + public void application_deployment_change_to_exclusive_and_back() { NodeResources hostResources = new NodeResources(10, 40, 1000, 10); NodeResources nodeResources = new NodeResources(1, 4, 100, 1); ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build(); tester.makeReadyHosts(4, hostResources).activateTenantHosts(); - /* - for (int i = 1; i <= 4; i++) - tester.makeReadyVirtualDockerNode(i, dockerResources, "host1"); - for (int i = 5; i <= 8; i++) - tester.makeReadyVirtualDockerNode(i, dockerResources, "host2"); - for (int i = 9; i <= 12; i++) - tester.makeReadyVirtualDockerNode(i, dockerResources, "host3"); - for (int i = 13; i <= 16; i++) - tester.makeReadyVirtualDockerNode(i, dockerResources, "host4"); - */ ApplicationId application1 = ProvisioningTester.applicationId(); prepareAndActivate(application1, 2, false, nodeResources, tester); @@ -407,7 +397,7 @@ public class VirtualNodeProvisioningTest { /** Non-exclusive app first, then an exclusive: Should give the same result as above */ @Test - public void docker_application_deployment_with_exclusive_app_causing_allocation_failure() { + public void application_deployment_with_exclusive_app_causing_allocation_failure() { ApplicationId application1 = ApplicationId.from("tenant1", "app1", "default"); ApplicationId application2 = ApplicationId.from("tenant2", "app2", "default"); ApplicationId application3 = ApplicationId.from("tenant1", "app3", "default"); @@ -444,8 +434,8 @@ public class VirtualNodeProvisioningTest { ProvisioningTester tester = new ProvisioningTester.Builder() .zone(new Zone(Environment.prod, RegionName.from("us-east-1"))).build(); ApplicationId application1 = ProvisioningTester.applicationId("app1"); - tester.makeReadyChildren(1, resources2, "dockerHost1"); - tester.makeReadyChildren(1, resources2, "dockerHost2"); + tester.makeReadyChildren(1, resources2, "host1"); + tester.makeReadyChildren(1, resources2, "host2"); tester.prepare(application1, ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion("6.42").build(), -- cgit v1.2.3