summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorValerij Fredriksen <freva@users.noreply.github.com>2021-06-24 11:49:10 +0200
committerGitHub <noreply@github.com>2021-06-24 11:49:10 +0200
commitd25995ae103887c9509cb8622429a50b9138a7f7 (patch)
tree2b895fa9dfced8bb367a86429df042da477fe4a1 /node-repository
parent64885b882fc37a326e7d844ea9a02cc6e3efbc17 (diff)
parentc64e1429d48305c789546a7c1057989a8f772f94 (diff)
Merge pull request #18391 from vespa-engine/mpolden/cleanup
Less Docker and thin pool
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/EmptyProvisionServiceProvider.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostResourcesCalculator.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java475
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java)30
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java)4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java)4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java600
11 files changed, 541 insertions, 596 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/EmptyProvisionServiceProvider.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/EmptyProvisionServiceProvider.java
index 49e1dfa8c8f..599f6b8bcb0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/EmptyProvisionServiceProvider.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/EmptyProvisionServiceProvider.java
@@ -47,7 +47,7 @@ public class EmptyProvisionServiceProvider implements ProvisionServiceProvider {
public NodeResources realToRequest(NodeResources resources, boolean exclusive) { return resources; }
@Override
- public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
+ public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostResourcesCalculator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostResourcesCalculator.java
index 70c4170f0f4..fc7023be743 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostResourcesCalculator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostResourcesCalculator.java
@@ -9,8 +9,9 @@ import com.yahoo.vespa.hosted.provision.Nodelike;
/**
* Some cloud providers advertise that a certain amount of resources are available in a flavor
- * but then actually provide somewhat less. This service provides the mapping between real and advertised
- * resources for all clouds.
+ * but then actually provide less.
+ *
+ * This class converts between real and advertised resources for all clouds.
*
* @author freva
* @author bratseth
@@ -36,8 +37,9 @@ public interface HostResourcesCalculator {
NodeResources realToRequest(NodeResources realResources, boolean exclusive);
/**
- * Returns the needed thin pool size in base2 Gb.
+ * Returns the disk space to reserve in base2 GB. This space is reserved for use by the host, e.g. for storing
+ * container images.
*/
- long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost);
+ long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index d23b3c782c8..59556418fb5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -76,13 +76,13 @@ public class NodeResourceLimits {
}
private double minAdvertisedDiskGb(NodeResources requested, boolean exclusive) {
- return minRealDiskGb() + getThinPoolSize(requested.storageType(), exclusive);
+ return minRealDiskGb() + reservedDiskSpaceGb(requested.storageType(), exclusive);
}
// Note: Assumes node type 'host'
- private long getThinPoolSize(NodeResources.StorageType storageType, boolean exclusive) {
+ private long reservedDiskSpaceGb(NodeResources.StorageType storageType, boolean exclusive) {
if (storageType == NodeResources.StorageType.local && zone().getCloud().dynamicProvisioning())
- return nodeRepository.resourcesCalculator().thinPoolSizeInBase2Gb(NodeType.host, ! exclusive);
+ return nodeRepository.resourcesCalculator().reservedDiskSpaceInBase2Gb(NodeType.host, ! exclusive);
else
return 4;
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 41a399c5e2f..240422df8b6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -732,7 +732,7 @@ public class AutoscalingTest {
}
@Override
- public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
+ public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index f96679b7195..7a1c6152d03 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -357,7 +357,7 @@ class AutoscalingTester {
}
@Override
- public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
+ public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
deleted file mode 100644
index fd8cf9ea00f..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationName;
-import com.yahoo.config.provision.ApplicationTransaction;
-import com.yahoo.config.provision.Capacity;
-import com.yahoo.config.provision.ClusterResources;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.HostSpec;
-import com.yahoo.config.provision.InstanceName;
-import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.OutOfCapacityException;
-import com.yahoo.config.provision.ProvisionLock;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.transaction.NestedTransaction;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeList;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import org.junit.Test;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Tests deployment to docker images which share the same physical host.
- *
- * @author bratseth
- */
-public class DockerProvisioningTest {
-
- private static final NodeResources dockerResources = new NodeResources(1, 4, 100, 1,
- NodeResources.DiskSpeed.fast, NodeResources.StorageType.local);
-
- @Test
- public void docker_application_deployment() {
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyHosts(10, dockerResources).activateTenantHosts();
- ApplicationId application1 = ProvisioningTester.applicationId("app1");
-
- Version wantedVespaVersion = Version.fromString("6.39");
- int nodeCount = 7;
- List<HostSpec> hosts = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- nodeCount, 1, dockerResources);
- tester.activate(application1, new HashSet<>(hosts));
-
- NodeList nodes = tester.getNodes(application1, Node.State.active);
- assertEquals(nodeCount, nodes.size());
- assertEquals(dockerResources, nodes.asList().get(0).resources());
-
- // Upgrade Vespa version on nodes
- Version upgradedWantedVespaVersion = Version.fromString("6.40");
- List<HostSpec> upgradedHosts = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(upgradedWantedVespaVersion).build(),
- nodeCount, 1, dockerResources);
- tester.activate(application1, new HashSet<>(upgradedHosts));
- NodeList upgradedNodes = tester.getNodes(application1, Node.State.active);
- assertEquals(nodeCount, upgradedNodes.size());
- assertEquals(dockerResources, upgradedNodes.asList().get(0).resources());
- assertEquals(hosts, upgradedHosts);
- }
-
- @Test
- public void refuses_to_activate_on_non_active_host() {
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
-
- List<Node> parents = tester.makeReadyNodes(10, new NodeResources(2, 4, 20, 2), NodeType.host, 1);
- for (Node parent : parents)
- tester.makeReadyChildren(1, dockerResources, parent.hostname());
-
- ApplicationId application1 = ProvisioningTester.applicationId();
- Version wantedVespaVersion = Version.fromString("6.39");
- int nodeCount = 7;
- try {
- List<HostSpec> nodes = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- nodeCount, 1, dockerResources);
- fail("Expected the allocation to fail due to parent hosts not being active yet");
- } catch (OutOfCapacityException expected) { }
-
- // Activate the hosts, thereby allocating the parents
- tester.activateTenantHosts();
-
- // Try allocating tenants again
- List<HostSpec> nodes = tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- nodeCount, 1, dockerResources);
- tester.activate(application1, new HashSet<>(nodes));
-
- NodeList activeNodes = tester.getNodes(application1, Node.State.active);
- assertEquals(nodeCount, activeNodes.size());
- }
-
- @Test
- public void reservations_are_respected() {
- NodeResources resources = new NodeResources(10, 10, 100, 10);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- TenantName tenant1 = TenantName.from("tenant1");
- TenantName tenant2 = TenantName.from("tenant2");
- ApplicationId application1_1 = ApplicationId.from(tenant1, ApplicationName.from("application1"), InstanceName.defaultName());
- ApplicationId application2_1 = ApplicationId.from(tenant2, ApplicationName.from("application1"), InstanceName.defaultName());
- ApplicationId application2_2 = ApplicationId.from(tenant2, ApplicationName.from("application2"), InstanceName.defaultName());
-
- tester.makeReadyNodes(10, resources, Optional.of(tenant1), NodeType.host, 1);
- tester.makeReadyNodes(10, resources, Optional.empty(), NodeType.host, 1);
- tester.activateTenantHosts();
-
- Version wantedVespaVersion = Version.fromString("6.39");
- List<HostSpec> nodes = tester.prepare(application2_1,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- 6, 1, resources);
- assertHostSpecParentReservation(nodes, Optional.empty(), tester); // We do not get nodes on hosts reserved to tenant1
- tester.activate(application2_1, nodes);
-
- try {
- tester.prepare(application2_2,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- 5, 1, resources);
- fail("Expected exception");
- }
- catch (OutOfCapacityException e) {
- // Success: Not enough nonreserved hosts left
- }
-
- nodes = tester.prepare(application1_1,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
- 10, 1, resources);
- assertHostSpecParentReservation(nodes, Optional.of(tenant1), tester);
- tester.activate(application1_1, nodes);
- assertNodeParentReservation(tester.getNodes(application1_1).asList(), Optional.empty(), tester); // Reservation is cleared after activation
- }
-
- /** Exclusive app first, then non-exclusive: Should give the same result as below */
- @Test
- public void docker_application_deployment_with_exclusive_app_first() {
- NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyHosts(4, hostResources).activateTenantHosts();
- ApplicationId application1 = ProvisioningTester.applicationId("app1");
- prepareAndActivate(application1, 2, true, nodeResources, tester);
- assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
- hostsOf(tester.getNodes(application1, Node.State.active)));
-
- ApplicationId application2 = ProvisioningTester.applicationId("app2");
- prepareAndActivate(application2, 2, false, nodeResources, tester);
- assertEquals("Application is assigned to separate hosts",
- Set.of("host-3.yahoo.com", "host-4.yahoo.com"),
- hostsOf(tester.getNodes(application2, Node.State.active)));
- }
-
- /** Non-exclusive app first, then an exclusive: Should give the same result as above */
- @Test
- public void docker_application_deployment_with_exclusive_app_last() {
- NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyHosts(4, hostResources).activateTenantHosts();
- ApplicationId application1 = ProvisioningTester.applicationId("app1");
- prepareAndActivate(application1, 2, false, nodeResources, tester);
- assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
- hostsOf(tester.getNodes(application1, Node.State.active)));
-
- ApplicationId application2 = ProvisioningTester.applicationId("app2");
- prepareAndActivate(application2, 2, true, nodeResources, tester);
- assertEquals("Application is assigned to separate hosts",
- Set.of("host-3.yahoo.com", "host-4.yahoo.com"),
- hostsOf(tester.getNodes(application2, Node.State.active)));
- }
-
- /** Test making an application exclusive */
- @Test
- public void docker_application_deployment_change_to_exclusive_and_back() {
- NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyHosts(4, hostResources).activateTenantHosts();
- /*
- for (int i = 1; i <= 4; i++)
- tester.makeReadyVirtualDockerNode(i, dockerResources, "host1");
- for (int i = 5; i <= 8; i++)
- tester.makeReadyVirtualDockerNode(i, dockerResources, "host2");
- for (int i = 9; i <= 12; i++)
- tester.makeReadyVirtualDockerNode(i, dockerResources, "host3");
- for (int i = 13; i <= 16; i++)
- tester.makeReadyVirtualDockerNode(i, dockerResources, "host4");
- */
-
- ApplicationId application1 = ProvisioningTester.applicationId();
- prepareAndActivate(application1, 2, false, nodeResources, tester);
- for (Node node : tester.getNodes(application1, Node.State.active))
- assertFalse(node.allocation().get().membership().cluster().isExclusive());
-
- prepareAndActivate(application1, 2, true, nodeResources, tester);
- assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"), hostsOf(tester.getNodes(application1, Node.State.active)));
- for (Node node : tester.getNodes(application1, Node.State.active))
- assertTrue(node.allocation().get().membership().cluster().isExclusive());
-
- prepareAndActivate(application1, 2, false, nodeResources, tester);
- assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"), hostsOf(tester.getNodes(application1, Node.State.active)));
- for (Node node : tester.getNodes(application1, Node.State.active))
- assertFalse(node.allocation().get().membership().cluster().isExclusive());
- }
-
- /** Non-exclusive app first, then an exclusive: Should give the same result as above */
- @Test
- public void docker_application_deployment_with_exclusive_app_causing_allocation_failure() {
- ApplicationId application1 = ApplicationId.from("tenant1", "app1", "default");
- ApplicationId application2 = ApplicationId.from("tenant2", "app2", "default");
- ApplicationId application3 = ApplicationId.from("tenant1", "app3", "default");
- NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
- NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyHosts(4, hostResources).activateTenantHosts();
-
- prepareAndActivate(application1, 2, true, nodeResources, tester);
- assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
- hostsOf(tester.getNodes(application1, Node.State.active)));
-
- try {
- prepareAndActivate(application2, 3, false, nodeResources, tester);
- fail("Expected allocation failure");
- }
- catch (Exception e) {
- assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
- "Could not satisfy request for 3 nodes with " +
- "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps] " +
- "in tenant2.app2 container cluster 'myContainer' 6.39: " +
- "Out of capacity on group 0: " +
- "Not enough nodes available due to host exclusivity constraints",
- e.getMessage());
- }
-
- // Adding 3 nodes of another application for the same tenant works
- prepareAndActivate(application3, 2, true, nodeResources, tester);
- }
-
- @Test
- public void storage_type_must_match() {
- try {
- ProvisioningTester tester = new ProvisioningTester.Builder()
- .zone(new Zone(Environment.prod, RegionName.from("us-east-1"))).build();
- ApplicationId application1 = ProvisioningTester.applicationId("app1");
- tester.makeReadyChildren(1, dockerResources, "dockerHost1");
- tester.makeReadyChildren(1, dockerResources, "dockerHost2");
-
- tester.prepare(application1,
- ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion("6.42").build(),
- 2, 1,
- dockerResources.with(NodeResources.StorageType.remote));
- }
- catch (OutOfCapacityException e) {
- assertEquals("Could not satisfy request for 2 nodes with " +
- "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote] " +
- "in tenant.app1 content cluster 'myContent'" +
- " 6.42: Out of capacity on group 0",
- e.getMessage());
- }
- }
-
- @Test
- public void initial_allocation_is_within_limits() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .resourcesCalculator(3, 0)
- .flavors(List.of(hostFlavor))
- .build();
- tester.makeReadyHosts(2, hostFlavor.resources()).activateTenantHosts();
-
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.container, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
-
- var resources = new NodeResources(1, 8, 10, 1);
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
- new ClusterResources(4, 1, resources)));
- tester.assertNodes("Initial allocation at min with default resources",
- 2, 1, 1, 8, 10, 1.0,
- app1, cluster1);
- }
-
- @Test
- public void changing_to_different_range_preserves_allocation() {
- Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 100, 4));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .resourcesCalculator(3, 0)
- .flavors(List.of(hostFlavor))
- .build();
- tester.makeReadyHosts(9, hostFlavor.resources()).activateTenantHosts();
-
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
-
- var initialResources = new NodeResources(20, 16, 50, 1);
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
- new ClusterResources(2, 1, initialResources)));
- tester.assertNodes("Initial allocation",
- 2, 1, 20, 16, 50, 1.0,
- app1, cluster1);
-
- var newMinResources = new NodeResources( 5, 6, 11, 1);
- var newMaxResources = new NodeResources(20, 10, 30, 1);
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
- new ClusterResources(7, 1, newMaxResources)));
- tester.assertNodes("New allocation preserves total resources",
- 7, 1, 7, 6.7, 14.3, 1.0,
- app1, cluster1);
-
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
- new ClusterResources(7, 1, newMaxResources)));
- tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 7, 6.7, 14.3, 1.0,
- app1, cluster1);
- }
-
- @Test
- public void too_few_real_resources_causes_failure() {
- try {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .resourcesCalculator(3, 0)
- .flavors(List.of(hostFlavor))
- .build();
- tester.makeReadyHosts(2, hostFlavor.resources()).activateTenantHosts();
-
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content,
- new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
-
- // 5 Gb requested memory becomes 5-3=2 Gb real memory, which is an illegally small amount
- var resources = new NodeResources(1, 5, 10, 1);
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
- new ClusterResources(4, 1, resources)));
- }
- catch (IllegalArgumentException e) {
- assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps] " +
- "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps]",
- e.getMessage());
- }
- }
-
- @Test
- public void exclusive_resources_not_matching_host_causes_failure() {
- try {
- Flavor hostFlavor1 = new Flavor(new NodeResources(20, 40, 100, 4));
- Flavor hostFlavor2 = new Flavor(new NodeResources(30, 40, 100, 4));
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .flavors(List.of(hostFlavor1, hostFlavor2))
- .build();
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content,
- new ClusterSpec.Id("cluster1")).exclusive(true).vespaVersion("7").build();
-
- var resources = new NodeResources(20, 37, 100, 1);
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
- new ClusterResources(4, 1, resources)));
- }
- catch (IllegalArgumentException e) {
- assertEquals("No allocation possible within limits: " +
- "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps] " +
- "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps]. " +
- "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote]",
- e.getMessage());
- }
- }
-
- @Test
- public void test_startup_redeployment_with_inactive_nodes() {
- NodeResources r = new NodeResources(20, 40, 100, 4);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .flavors(List.of(new Flavor(r)))
- .build();
- tester.makeReadyHosts(5, r).activateTenantHosts();
-
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
-
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(5, 1, r)));
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
-
- var tx = new ApplicationTransaction(new ProvisionLock(app1, tester.nodeRepository().nodes().lock(app1)), new NestedTransaction());
- tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(Node.State.active).owner(app1).retired().asList(), tx);
- tx.nested().commit();
-
- assertEquals(2, tester.getNodes(app1, Node.State.active).size());
- assertEquals(3, tester.getNodes(app1, Node.State.inactive).size());
-
- // Startup deployment: Not failable
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r), false, false));
- // ... causes no change
- assertEquals(2, tester.getNodes(app1, Node.State.active).size());
- assertEquals(3, tester.getNodes(app1, Node.State.inactive).size());
- }
-
- @Test
- public void inactive_container_nodes_are_not_reused() {
- assertInactiveReuse(ClusterSpec.Type.container, false);
- }
-
- @Test
- public void inactive_content_nodes_are_reused() {
- assertInactiveReuse(ClusterSpec.Type.content, true);
- }
-
- private void assertInactiveReuse(ClusterSpec.Type clusterType, boolean expectedReuse) {
- NodeResources r = new NodeResources(20, 40, 100, 4);
- ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
- .flavors(List.of(new Flavor(r)))
- .build();
- tester.makeReadyHosts(4, r).activateTenantHosts();
-
- ApplicationId app1 = ProvisioningTester.applicationId("app1");
- ClusterSpec cluster1 = ClusterSpec.request(clusterType, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
-
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
-
- // Deactivate any retired nodes - usually done by the RetiredExpirer
- tester.nodeRepository().nodes().setRemovable(app1, tester.getNodes(app1).retired().asList());
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
-
- if (expectedReuse) {
- assertEquals(2, tester.getNodes(app1, Node.State.inactive).size());
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
- assertEquals(0, tester.getNodes(app1, Node.State.inactive).size());
- }
- else {
- assertEquals(0, tester.getNodes(app1, Node.State.inactive).size());
- assertEquals(2, tester.nodeRepository().nodes().list(Node.State.dirty).size());
- tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "test");
- tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
- }
-
- }
-
-
- private Set<String> hostsOf(NodeList nodes) {
- return nodes.asList().stream().map(Node::parentHostname).map(Optional::get).collect(Collectors.toSet());
- }
-
- private void prepareAndActivate(ApplicationId application, int nodeCount, boolean exclusive, NodeResources resources, ProvisioningTester tester) {
- Set<HostSpec> hosts = new HashSet<>(tester.prepare(application,
- ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer")).vespaVersion("6.39").exclusive(exclusive).build(),
- Capacity.from(new ClusterResources(nodeCount, 1, resources), false, true)));
- tester.activate(application, hosts);
- }
-
- private void assertNodeParentReservation(List<Node> nodes, Optional<TenantName> reservation, ProvisioningTester tester) {
- for (Node node : nodes)
- assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
- }
-
- private void assertHostSpecParentReservation(List<HostSpec> hostSpecs, Optional<TenantName> reservation, ProvisioningTester tester) {
- for (HostSpec hostSpec : hostSpecs) {
- Node node = tester.nodeRepository().nodes().node(hostSpec.hostname()).get();
- assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
- }
- }
-
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java
index 8d9e79218d4..761afed23cc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicAllocationTest.java
@@ -43,12 +43,12 @@ import static org.junit.Assert.fail;
/**
* @author mortent
*/
-public class DynamicDockerAllocationTest {
+public class DynamicAllocationTest {
/**
* Test relocation of nodes from spare hosts.
* <p>
- * Setup 4 docker hosts and allocate one container on each (from two different applications)
+ * Setup 4 hosts and allocate one container on each (from two different applications)
* getSpareCapacityProd() spares.
* <p>
* Check that it relocates containers away from the getSpareCapacityProd() spares
@@ -68,20 +68,20 @@ public class DynamicDockerAllocationTest {
.build();
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- List<Node> dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
+ List<Node> hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
NodeResources flavor = new NodeResources(1, 4, 100, 1);
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1");
- addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester);
- addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester);
+ addAndAssignNode(application1, "1a", hosts.get(0).hostname(), clusterSpec1, flavor, 0, tester);
+ addAndAssignNode(application1, "1b", hosts.get(1).hostname(), clusterSpec1, flavor, 1, tester);
// Application 2
ApplicationId application2 = makeApplicationId("t2", "a2");
ClusterSpec clusterSpec2 = clusterSpec("myContent.t2.a2");
- addAndAssignNode(application2, "2a", dockerHosts.get(2).hostname(), clusterSpec2, flavor, 3, tester);
- addAndAssignNode(application2, "2b", dockerHosts.get(3).hostname(), clusterSpec2, flavor, 4, tester);
+ addAndAssignNode(application2, "2a", hosts.get(2).hostname(), clusterSpec2, flavor, 3, tester);
+ addAndAssignNode(application2, "2b", hosts.get(3).hostname(), clusterSpec2, flavor, 4, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
deployApp(application1, clusterSpec1, flavor, tester, 2);
@@ -109,7 +109,7 @@ public class DynamicDockerAllocationTest {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- NodeList dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host);
+ NodeList hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host);
NodeResources resources = new NodeResources(1, 4, 100, 0.3);
// Application 1
@@ -137,7 +137,7 @@ public class DynamicDockerAllocationTest {
deployApp(application3, clusterSpec3, resources, tester, 2);
Map<Integer, Integer> numberOfChildrenStat = new HashMap<>();
- for (Node host : dockerHosts) {
+ for (Node host : hosts) {
int nofChildren = tester.nodeRepository().nodes().list().childrenOf(host).size();
if (!numberOfChildrenStat.containsKey(nofChildren)) {
numberOfChildrenStat.put(nofChildren, 0);
@@ -188,7 +188,7 @@ public class DynamicDockerAllocationTest {
/**
* Test redeployment of nodes that violates spare headroom - but without alternatives
* <p>
- * Setup 2 docker hosts and allocate one app with a container on each. 2 spares
+ * Setup 2 hosts and allocate one app with a container on each. 2 spares
* <p>
* Initial allocation of app 1 --> final allocation:
* <p>
@@ -201,14 +201,14 @@ public class DynamicDockerAllocationTest {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
tester.makeReadyNodes(2, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- List<Node> dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
+ List<Node> hosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
NodeResources flavor = new NodeResources(1, 4, 100, 1);
// Application 1
ApplicationId application1 = makeApplicationId("t1", "a1");
ClusterSpec clusterSpec1 = clusterSpec("myContent.t1.a1");
- addAndAssignNode(application1, "1a", dockerHosts.get(0).hostname(), clusterSpec1, flavor, 0, tester);
- addAndAssignNode(application1, "1b", dockerHosts.get(1).hostname(), clusterSpec1, flavor, 1, tester);
+ addAndAssignNode(application1, "1a", hosts.get(0).hostname(), clusterSpec1, flavor, 0, tester);
+ addAndAssignNode(application1, "1b", hosts.get(1).hostname(), clusterSpec1, flavor, 1, tester);
// Redeploy both applications (to be agnostic on which hosts are picked as spares)
deployApp(application1, clusterSpec1, flavor, tester, 2);
@@ -227,7 +227,7 @@ public class DynamicDockerAllocationTest {
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- //Deploy an application having 6 nodes (3 nodes in 2 groups). We only have 5 docker hosts available
+ //Deploy an application having 6 nodes (3 nodes in 2 groups). We only have 5 hosts available
ApplicationId application1 = ProvisioningTester.applicationId();
tester.prepare(application1, clusterSpec("myContent.t1.a1"), 6, 2, new NodeResources(1, 4, 100, 1));
@@ -285,7 +285,7 @@ public class DynamicDockerAllocationTest {
}
@Test
- public void cd_uses_slow_disk_nodes_for_docker_hosts() {
+ public void cd_uses_slow_disk_hosts() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.cd, Environment.test, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
tester.makeReadyNodes(4, new Flavor(new NodeResources(1, 8, 120, 1, NodeResources.DiskSpeed.slow)), NodeType.host, 10, true);
tester.activateTenantHosts();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
index 029c9ffa559..6f1e2630434 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java
@@ -50,7 +50,7 @@ import static org.mockito.Mockito.when;
* @author freva
* @author bratseth
*/
-public class DynamicDockerProvisionTest {
+public class DynamicProvisioningTest {
private static final Zone zone = new Zone(
Cloud.builder().dynamicProvisioning(true).build(),
@@ -396,7 +396,7 @@ public class DynamicDockerProvisionTest {
List<HostSpec> prepared = tester.prepare(application, clusterSpec, nodes, groups, resources);
NodeList provisionedHosts = tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host);
if (!provisionedHosts.isEmpty()) {
- tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicDockerProvisionTest.class.getSimpleName());
+ tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicProvisioningTest.class.getSimpleName());
tester.activateTenantHosts();
}
tester.activate(application, prepared);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index a53c7469a25..21e9058cd7c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -771,7 +771,7 @@ public class ProvisioningTester {
}
@Override
- public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
+ public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java
index afbd44a346f..5f2d567bd24 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningCompleteHostCalculatorTest.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertEquals;
/**
* @author bratseth
*/
-public class DockerProvisioningCompleteHostCalculatorTest {
+public class VirtualNodeProvisioningCompleteHostCalculatorTest {
@Test
public void changing_to_different_range_preserves_allocation() {
@@ -117,7 +117,7 @@ public class DockerProvisioningCompleteHostCalculatorTest {
}
@Override
- public long thinPoolSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
+ public long reservedDiskSpaceInBase2Gb(NodeType nodeType, boolean sharedHost) { return 0; }
/**
* Returns the memory overhead resulting if the given advertised resources are placed on the given node
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
index 9ad5161ed59..18fcb56d87f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java
@@ -1,28 +1,43 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
+import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.ApplicationTransaction;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.OutOfCapacityException;
+import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.node.Agent;
import org.junit.Test;
-import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* Tests provisioning of virtual nodes
@@ -30,37 +45,38 @@ import static org.junit.Assert.assertNotNull;
* @author hmusum
* @author mpolden
*/
-// Note: Some of the tests here should be moved to DockerProvisioningTest if we stop using VMs and want
-// to remove these tests
public class VirtualNodeProvisioningTest {
- private static final NodeResources resources = new NodeResources(4, 8, 100, 1);
+ private static final NodeResources resources1 = new NodeResources(4, 8, 100, 1);
+ private static final NodeResources resources2 = new NodeResources(1, 4, 100, 1,
+ NodeResources.DiskSpeed.fast, NodeResources.StorageType.local);
private static final ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion("6.42").build();
private static final ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer")).vespaVersion("6.42").build();
- private ProvisioningTester tester = new ProvisioningTester.Builder().build();
private final ApplicationId applicationId = ProvisioningTester.applicationId("test");
@Test
public void distinct_parent_host_for_each_node_in_a_cluster() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+
tester.makeReadyHosts(4, new NodeResources(8, 16, 200, 2))
.activateTenantHosts();
int containerNodeCount = 4;
int contentNodeCount = 3;
int groups = 1;
- List<HostSpec> containerHosts = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources);
- List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources);
- activate(containerHosts, contentHosts);
+ List<HostSpec> containerHosts = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources1);
+ List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, concat(containerHosts, contentHosts));
- List<Node> nodes = getNodes(applicationId);
+ NodeList nodes = tester.getNodes(applicationId, Node.State.active);
assertEquals(contentNodeCount + containerNodeCount, nodes.size());
assertDistinctParentHosts(nodes, ClusterSpec.Type.container, containerNodeCount);
assertDistinctParentHosts(nodes, ClusterSpec.Type.content, contentNodeCount);
// Go down to 3 nodes in container cluster
- List<HostSpec> containerHosts2 = tester.prepare(applicationId, containerClusterSpec, containerNodeCount - 1, groups, resources);
- activate(containerHosts2, contentHosts);
- List<Node> nodes2 = getNodes(applicationId);
+ List<HostSpec> containerHosts2 = tester.prepare(applicationId, containerClusterSpec, containerNodeCount - 1, groups, resources1);
+ tester.activate(applicationId, containerHosts2);
+ NodeList nodes2 = tester.getNodes(applicationId, Node.State.active);
assertDistinctParentHosts(nodes2, ClusterSpec.Type.container, containerNodeCount - 1);
// The surplus node is dirtied and then readied for new allocations
@@ -69,14 +85,16 @@ public class VirtualNodeProvisioningTest {
tester.nodeRepository().nodes().setReady(dirtyNode, Agent.system, getClass().getSimpleName());
// Go up to 4 nodes again in container cluster
- List<HostSpec> containerHosts3 = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources);
- activate(containerHosts3, contentHosts);
- List<Node> nodes3 = getNodes(applicationId);
+ List<HostSpec> containerHosts3 = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources1);
+ tester.activate(applicationId, containerHosts3);
+ NodeList nodes3 = tester.getNodes(applicationId, Node.State.active);
assertDistinctParentHosts(nodes3, ClusterSpec.Type.container, containerNodeCount);
}
@Test
public void allow_same_parent_host_for_nodes_in_a_cluster_in_cd_and_non_prod() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+
final int containerNodeCount = 2;
final int contentNodeCount = 2;
final int groups = 1;
@@ -88,91 +106,95 @@ public class VirtualNodeProvisioningTest {
tester.makeReadyNodes(4, flavor, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(ProvisioningTester.applicationId(), NodeType.host);
- List<HostSpec> containerHosts = prepare(containerClusterSpec, containerNodeCount, groups, flavor);
- List<HostSpec> contentHosts = prepare(contentClusterSpec, contentNodeCount, groups, flavor);
- activate(containerHosts, contentHosts);
+ List<HostSpec> containerHosts = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, flavor);
+ List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, flavor);
+ tester.activate(applicationId, concat(containerHosts, contentHosts));
// downscaled to 1 node per cluster in dev, so 2 in total
- assertEquals(2, getNodes(applicationId).size());
+ assertEquals(2, tester.getNodes(applicationId, Node.State.active).size());
}
// Allowed to use same parent host for several nodes in same cluster in CD (even if prod env)
{
tester = new ProvisioningTester.Builder().zone(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east"))).build();
- tester.makeReadyNodes(4, resources, NodeType.host, 1);
+ tester.makeReadyNodes(4, resources1, NodeType.host, 1);
tester.prepareAndActivateInfraApplication(ProvisioningTester.applicationId(), NodeType.host);
- List<HostSpec> containerHosts = prepare(containerClusterSpec, containerNodeCount, groups);
- List<HostSpec> contentHosts = prepare(contentClusterSpec, contentNodeCount, groups);
- activate(containerHosts, contentHosts);
+ List<HostSpec> containerHosts = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources1);
+ List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, concat(containerHosts, contentHosts));
- assertEquals(4, getNodes(applicationId).size());
+ assertEquals(4, tester.getNodes(applicationId, Node.State.active).size());
}
}
@Test
public void will_retire_clashing_active() {
- tester.makeReadyHosts(4, resources).activateTenantHosts();
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+
+ tester.makeReadyHosts(4, resources1).activateTenantHosts();
int containerNodeCount = 2;
int contentNodeCount = 2;
int groups = 1;
- List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources);
- List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources);
- activate(containerNodes, contentNodes);
+ List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources1);
+ List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, concat(containerNodes, contentNodes));
- List<Node> nodes = getNodes(applicationId);
+ NodeList nodes = tester.getNodes(applicationId, Node.State.active);
assertEquals(4, nodes.size());
assertDistinctParentHosts(nodes, ClusterSpec.Type.container, containerNodeCount);
assertDistinctParentHosts(nodes, ClusterSpec.Type.content, contentNodeCount);
- tester.patchNodes(nodes, (n) -> n.withParentHostname("clashing"));
- containerNodes = prepare(containerClusterSpec, containerNodeCount, groups);
- contentNodes = prepare(contentClusterSpec, contentNodeCount, groups);
- activate(containerNodes, contentNodes);
+ tester.patchNodes(nodes.asList(), (n) -> n.withParentHostname("clashing"));
+ containerNodes = tester.prepare(applicationId, containerClusterSpec, containerNodeCount, groups, resources1);
+ contentNodes = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, concat(containerNodes, contentNodes));
- nodes = getNodes(applicationId);
+ nodes = tester.getNodes(applicationId, Node.State.active);
assertEquals(6, nodes.size());
assertEquals(2, nodes.stream().filter(n -> n.allocation().get().membership().retired()).count());
}
@Test(expected = OutOfCapacityException.class)
public void fail_when_too_few_distinct_parent_hosts() {
- tester.makeReadyChildren(2, resources, "parentHost1");
- tester.makeReadyChildren(1, resources, "parentHost2");
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+ tester.makeReadyChildren(2, resources1, "parentHost1");
+ tester.makeReadyChildren(1, resources1, "parentHost2");
int contentNodeCount = 3;
- List<HostSpec> hosts = prepare(contentClusterSpec, contentNodeCount, 1);
- activate(hosts);
+ List<HostSpec> hosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, 1, resources1);
+ tester.activate(applicationId, hosts);
- List<Node> nodes = getNodes(applicationId);
+ NodeList nodes = tester.getNodes(applicationId, Node.State.active);
assertDistinctParentHosts(nodes, ClusterSpec.Type.content, contentNodeCount);
}
@Test
public void indistinct_distribution_with_known_ready_nodes() {
- tester.makeReadyChildren(3, resources);
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+ tester.makeReadyChildren(3, resources1);
- final int contentNodeCount = 3;
- final int groups = 1;
- final List<HostSpec> contentHosts = prepare(contentClusterSpec, contentNodeCount, groups);
- activate(contentHosts);
+ int contentNodeCount = 3;
+ int groups = 1;
+ List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, contentHosts);
- List<Node> nodes = getNodes(applicationId);
+ NodeList nodes = tester.getNodes(applicationId, Node.State.active);
assertEquals(3, nodes.size());
// Set indistinct parents
- tester.patchNode(nodes.get(0), (n) -> n.withParentHostname("parentHost1"));
- tester.patchNode(nodes.get(1), (n) -> n.withParentHostname("parentHost1"));
- tester.patchNode(nodes.get(2), (n) -> n.withParentHostname("parentHost2"));
- nodes = getNodes(applicationId);
+ tester.patchNode(nodes.asList().get(0), (n) -> n.withParentHostname("parentHost1"));
+ tester.patchNode(nodes.asList().get(1), (n) -> n.withParentHostname("parentHost1"));
+ tester.patchNode(nodes.asList().get(2), (n) -> n.withParentHostname("parentHost2"));
+ nodes = tester.getNodes(applicationId, Node.State.active);
assertEquals(3, nodes.stream().filter(n -> n.parentHostname().isPresent()).count());
- tester.makeReadyChildren(1, resources, "parentHost1");
- tester.makeReadyChildren(2, resources, "parentHost2");
+ tester.makeReadyChildren(1, resources1, "parentHost1");
+ tester.makeReadyChildren(2, resources1, "parentHost2");
OutOfCapacityException expectedException = null;
try {
- prepare(contentClusterSpec, contentNodeCount, groups);
+ tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
} catch (OutOfCapacityException e) {
expectedException = e;
}
@@ -181,71 +203,467 @@ public class VirtualNodeProvisioningTest {
@Test
public void unknown_distribution_with_known_ready_nodes() {
- tester.makeReadyChildren(3, resources);
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+ tester.makeReadyChildren(3, resources1);
final int contentNodeCount = 3;
final int groups = 1;
- final List<HostSpec> contentHosts = prepare(contentClusterSpec, contentNodeCount, groups);
- activate(contentHosts);
- assertEquals(3, getNodes(applicationId).size());
-
- tester.makeReadyChildren(1, resources, "parentHost1");
- tester.makeReadyChildren(1, resources, "parentHost2");
- tester.makeReadyChildren(1, resources, "parentHost3");
- assertEquals(contentHosts, prepare(contentClusterSpec, contentNodeCount, groups));
+ final List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, contentHosts);
+ assertEquals(3, tester.getNodes(applicationId, Node.State.active).size());
+
+ tester.makeReadyChildren(1, resources1, "parentHost1");
+ tester.makeReadyChildren(1, resources1, "parentHost2");
+ tester.makeReadyChildren(1, resources1, "parentHost3");
+ assertEquals(contentHosts, tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1));
}
@Test
public void unknown_distribution_with_known_and_unknown_ready_nodes() {
- tester.makeReadyChildren(3, resources);
+ ProvisioningTester tester = new ProvisioningTester.Builder().build();
+ tester.makeReadyChildren(3, resources1);
int contentNodeCount = 3;
int groups = 1;
- List<HostSpec> contentHosts = prepare(contentClusterSpec, contentNodeCount, groups);
- activate(contentHosts);
- assertEquals(3, getNodes(applicationId).size());
+ List<HostSpec> contentHosts = tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1);
+ tester.activate(applicationId, contentHosts);
+ assertEquals(3, tester.getNodes(applicationId, Node.State.active).size());
+
+ tester.makeReadyChildren(1, resources1, "parentHost1");
+ tester.makeReadyChildren(1, resources1);
+ assertEquals(contentHosts, tester.prepare(applicationId, contentClusterSpec, contentNodeCount, groups, resources1));
+ }
- tester.makeReadyChildren(1, resources, "parentHost1");
- tester.makeReadyChildren(1, resources);
- assertEquals(contentHosts, prepare(contentClusterSpec, contentNodeCount, groups));
+ @Test
+ public void application_deployment() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(10, resources2).activateTenantHosts();
+ ApplicationId application1 = ProvisioningTester.applicationId("app1");
+
+ Version wantedVespaVersion = Version.fromString("6.39");
+ int nodeCount = 7;
+ List<HostSpec> hosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ nodeCount, 1, resources2);
+ tester.activate(application1, new HashSet<>(hosts));
+
+ NodeList nodes = tester.getNodes(application1, Node.State.active);
+ assertEquals(nodeCount, nodes.size());
+ assertEquals(resources2, nodes.asList().get(0).resources());
+
+ // Upgrade Vespa version on nodes
+ Version upgradedWantedVespaVersion = Version.fromString("6.40");
+ List<HostSpec> upgradedHosts = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(upgradedWantedVespaVersion).build(),
+ nodeCount, 1, resources2);
+ tester.activate(application1, new HashSet<>(upgradedHosts));
+ NodeList upgradedNodes = tester.getNodes(application1, Node.State.active);
+ assertEquals(nodeCount, upgradedNodes.size());
+ assertEquals(resources2, upgradedNodes.asList().get(0).resources());
+ assertEquals(hosts, upgradedHosts);
}
- private void assertDistinctParentHosts(List<Node> nodes, ClusterSpec.Type clusterType, int expectedCount) {
- List<String> parentHosts = getParentHostsFromNodes(nodes, Optional.of(clusterType));
+ @Test
+ public void refuses_to_activate_on_non_active_host() {
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- assertEquals(expectedCount, parentHosts.size());
- assertEquals(expectedCount, Set.copyOf(parentHosts).size());
+ List<Node> parents = tester.makeReadyNodes(10, new NodeResources(2, 4, 20, 2), NodeType.host, 1);
+ for (Node parent : parents)
+ tester.makeReadyChildren(1, resources2, parent.hostname());
+
+ ApplicationId application1 = ProvisioningTester.applicationId();
+ Version wantedVespaVersion = Version.fromString("6.39");
+ int nodeCount = 7;
+ try {
+ List<HostSpec> nodes = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ nodeCount, 1, resources2);
+ fail("Expected the allocation to fail due to parent hosts not being active yet");
+ } catch (OutOfCapacityException expected) { }
+
+ // Activate the hosts, thereby allocating the parents
+ tester.activateTenantHosts();
+
+ // Try allocating tenants again
+ List<HostSpec> nodes = tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ nodeCount, 1, resources2);
+ tester.activate(application1, new HashSet<>(nodes));
+
+ NodeList activeNodes = tester.getNodes(application1, Node.State.active);
+ assertEquals(nodeCount, activeNodes.size());
}
- private List<String> getParentHostsFromNodes(List<Node> nodes, Optional<ClusterSpec.Type> clusterType) {
- List<String> parentHosts = new ArrayList<>();
- for (Node node : nodes) {
- if (node.parentHostname().isPresent() && (clusterType.isPresent() && clusterType.get() == node.allocation().get().membership().cluster().type())) {
- parentHosts.add(node.parentHostname().get());
- }
+ @Test
+ public void reservations_are_respected() {
+ NodeResources resources = new NodeResources(10, 10, 100, 10);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ TenantName tenant1 = TenantName.from("tenant1");
+ TenantName tenant2 = TenantName.from("tenant2");
+ ApplicationId application1_1 = ApplicationId.from(tenant1, ApplicationName.from("application1"), InstanceName.defaultName());
+ ApplicationId application2_1 = ApplicationId.from(tenant2, ApplicationName.from("application1"), InstanceName.defaultName());
+ ApplicationId application2_2 = ApplicationId.from(tenant2, ApplicationName.from("application2"), InstanceName.defaultName());
+
+ tester.makeReadyNodes(10, resources, Optional.of(tenant1), NodeType.host, 1);
+ tester.makeReadyNodes(10, resources, Optional.empty(), NodeType.host, 1);
+ tester.activateTenantHosts();
+
+ Version wantedVespaVersion = Version.fromString("6.39");
+ List<HostSpec> nodes = tester.prepare(application2_1,
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ 6, 1, resources);
+ assertHostSpecParentReservation(nodes, Optional.empty(), tester); // We do not get nodes on hosts reserved to tenant1
+ tester.activate(application2_1, nodes);
+
+ try {
+ tester.prepare(application2_2,
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ 5, 1, resources);
+ fail("Expected exception");
+ }
+ catch (OutOfCapacityException e) {
+ // Success: Not enough nonreserved hosts left
}
- return parentHosts;
+
+ nodes = tester.prepare(application1_1,
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContent")).vespaVersion(wantedVespaVersion).build(),
+ 10, 1, resources);
+ assertHostSpecParentReservation(nodes, Optional.of(tenant1), tester);
+ tester.activate(application1_1, nodes);
+ assertNodeParentReservation(tester.getNodes(application1_1).asList(), Optional.empty(), tester); // Reservation is cleared after activation
+ }
+
+ /** Exclusive app first, then non-exclusive: Should give the same result as below */
+ @Test
+ public void application_deployment_with_exclusive_app_first() {
+ NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
+ NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(4, hostResources).activateTenantHosts();
+ ApplicationId application1 = ProvisioningTester.applicationId("app1");
+ prepareAndActivate(application1, 2, true, nodeResources, tester);
+ assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
+ hostsOf(tester.getNodes(application1, Node.State.active)));
+
+ ApplicationId application2 = ProvisioningTester.applicationId("app2");
+ prepareAndActivate(application2, 2, false, nodeResources, tester);
+ assertEquals("Application is assigned to separate hosts",
+ Set.of("host-3.yahoo.com", "host-4.yahoo.com"),
+ hostsOf(tester.getNodes(application2, Node.State.active)));
}
- private List<Node> getNodes(ApplicationId applicationId) {
- return tester.getNodes(applicationId, Node.State.active).asList();
+ /** Non-exclusive app first, then an exclusive: Should give the same result as above */
+ @Test
+ public void application_deployment_with_exclusive_app_last() {
+ NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
+ NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(4, hostResources).activateTenantHosts();
+ ApplicationId application1 = ProvisioningTester.applicationId("app1");
+ prepareAndActivate(application1, 2, false, nodeResources, tester);
+ assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
+ hostsOf(tester.getNodes(application1, Node.State.active)));
+
+ ApplicationId application2 = ProvisioningTester.applicationId("app2");
+ prepareAndActivate(application2, 2, true, nodeResources, tester);
+ assertEquals("Application is assigned to separate hosts",
+ Set.of("host-3.yahoo.com", "host-4.yahoo.com"),
+ hostsOf(tester.getNodes(application2, Node.State.active)));
}
- private List<HostSpec> prepare(ClusterSpec clusterSpec, int nodeCount, int groups) {
- return tester.prepare(applicationId, clusterSpec, nodeCount, groups, resources);
+ /** Test making an application exclusive */
+ @Test
+ public void application_deployment_change_to_exclusive_and_back() {
+ NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
+ NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(4, hostResources).activateTenantHosts();
+
+ ApplicationId application1 = ProvisioningTester.applicationId();
+ prepareAndActivate(application1, 2, false, nodeResources, tester);
+ for (Node node : tester.getNodes(application1, Node.State.active))
+ assertFalse(node.allocation().get().membership().cluster().isExclusive());
+
+ prepareAndActivate(application1, 2, true, nodeResources, tester);
+ assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"), hostsOf(tester.getNodes(application1, Node.State.active)));
+ for (Node node : tester.getNodes(application1, Node.State.active))
+ assertTrue(node.allocation().get().membership().cluster().isExclusive());
+
+ prepareAndActivate(application1, 2, false, nodeResources, tester);
+ assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"), hostsOf(tester.getNodes(application1, Node.State.active)));
+ for (Node node : tester.getNodes(application1, Node.State.active))
+ assertFalse(node.allocation().get().membership().cluster().isExclusive());
}
- private List<HostSpec> prepare(ClusterSpec clusterSpec, int nodeCount, int groups, NodeResources flavor) {
- return tester.prepare(applicationId, clusterSpec, nodeCount, groups, flavor);
+ /** Non-exclusive app first, then an exclusive: Should give the same result as above */
+ @Test
+ public void application_deployment_with_exclusive_app_causing_allocation_failure() {
+ ApplicationId application1 = ApplicationId.from("tenant1", "app1", "default");
+ ApplicationId application2 = ApplicationId.from("tenant2", "app2", "default");
+ ApplicationId application3 = ApplicationId.from("tenant1", "app3", "default");
+ NodeResources hostResources = new NodeResources(10, 40, 1000, 10);
+ NodeResources nodeResources = new NodeResources(1, 4, 100, 1);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
+ tester.makeReadyHosts(4, hostResources).activateTenantHosts();
+
+ prepareAndActivate(application1, 2, true, nodeResources, tester);
+ assertEquals(Set.of("host-1.yahoo.com", "host-2.yahoo.com"),
+ hostsOf(tester.getNodes(application1, Node.State.active)));
+
+ try {
+ prepareAndActivate(application2, 3, false, nodeResources, tester);
+ fail("Expected allocation failure");
+ }
+ catch (Exception e) {
+ assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
+ "Could not satisfy request for 3 nodes with " +
+ "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps] " +
+ "in tenant2.app2 container cluster 'myContainer' 6.39: " +
+ "Out of capacity on group 0: " +
+ "Not enough nodes available due to host exclusivity constraints",
+ e.getMessage());
+ }
+
+ // Adding 3 nodes of another application for the same tenant works
+ prepareAndActivate(application3, 2, true, nodeResources, tester);
}
- @SafeVarargs
- private void activate(List<HostSpec>... hostLists) {
- HashSet<HostSpec> hosts = new HashSet<>();
- for (List<HostSpec> h : hostLists) {
- hosts.addAll(h);
+ @Test
+ public void storage_type_must_match() {
+ try {
+ ProvisioningTester tester = new ProvisioningTester.Builder()
+ .zone(new Zone(Environment.prod, RegionName.from("us-east-1"))).build();
+ ApplicationId application1 = ProvisioningTester.applicationId("app1");
+ tester.makeReadyChildren(1, resources2, "host1");
+ tester.makeReadyChildren(1, resources2, "host2");
+
+ tester.prepare(application1,
+ ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent")).vespaVersion("6.42").build(),
+ 2, 1,
+ resources2.with(NodeResources.StorageType.remote));
}
- tester.activate(applicationId, hosts);
+ catch (OutOfCapacityException e) {
+ assertEquals("Could not satisfy request for 2 nodes with " +
+ "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote] " +
+ "in tenant.app1 content cluster 'myContent'" +
+ " 6.42: Out of capacity on group 0",
+ e.getMessage());
+ }
+ }
+
+ @Test
+ public void initial_allocation_is_within_limits() {
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .resourcesCalculator(3, 0)
+ .flavors(List.of(hostFlavor))
+ .build();
+ tester.makeReadyHosts(2, hostFlavor.resources()).activateTenantHosts();
+
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.container, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ var resources = new NodeResources(1, 8, 10, 1);
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
+ new ClusterResources(4, 1, resources)));
+ tester.assertNodes("Initial allocation at min with default resources",
+ 2, 1, 1, 8, 10, 1.0,
+ app1, cluster1);
+ }
+
+ @Test
+ public void changing_to_different_range_preserves_allocation() {
+ Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 100, 4));
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .resourcesCalculator(3, 0)
+ .flavors(List.of(hostFlavor))
+ .build();
+ tester.makeReadyHosts(9, hostFlavor.resources()).activateTenantHosts();
+
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ var initialResources = new NodeResources(20, 16, 50, 1);
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
+ new ClusterResources(2, 1, initialResources)));
+ tester.assertNodes("Initial allocation",
+ 2, 1, 20, 16, 50, 1.0,
+ app1, cluster1);
+
+ var newMinResources = new NodeResources( 5, 6, 11, 1);
+ var newMaxResources = new NodeResources(20, 10, 30, 1);
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
+ new ClusterResources(7, 1, newMaxResources)));
+ tester.assertNodes("New allocation preserves total resources",
+ 7, 1, 7, 6.7, 14.3, 1.0,
+ app1, cluster1);
+
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
+ new ClusterResources(7, 1, newMaxResources)));
+ tester.assertNodes("Redeploying does not cause changes",
+ 7, 1, 7, 6.7, 14.3, 1.0,
+ app1, cluster1);
+ }
+
+ @Test
+ public void too_few_real_resources_causes_failure() {
+ try {
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .resourcesCalculator(3, 0)
+ .flavors(List.of(hostFlavor))
+ .build();
+ tester.makeReadyHosts(2, hostFlavor.resources()).activateTenantHosts();
+
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content,
+ new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ // 5 Gb requested memory becomes 5-3=2 Gb real memory, which is an illegally small amount
+ var resources = new NodeResources(1, 5, 10, 1);
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
+ new ClusterResources(4, 1, resources)));
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("No allocation possible within limits: " +
+ "from 2 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps] " +
+ "to 4 nodes with [vcpu: 1.0, memory: 5.0 Gb, disk 10.0 Gb, bandwidth: 1.0 Gbps]",
+ e.getMessage());
+ }
+ }
+
+ @Test
+ public void exclusive_resources_not_matching_host_causes_failure() {
+ try {
+ Flavor hostFlavor1 = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor2 = new Flavor(new NodeResources(30, 40, 100, 4));
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .flavors(List.of(hostFlavor1, hostFlavor2))
+ .build();
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content,
+ new ClusterSpec.Id("cluster1")).exclusive(true).vespaVersion("7").build();
+
+ var resources = new NodeResources(20, 37, 100, 1);
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, resources),
+ new ClusterResources(4, 1, resources)));
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("No allocation possible within limits: " +
+ "from 2 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps] " +
+ "to 4 nodes with [vcpu: 20.0, memory: 37.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps]. " +
+ "Nearest allowed node resources: [vcpu: 20.0, memory: 40.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote]",
+ e.getMessage());
+ }
+ }
+
+ @Test
+ public void test_startup_redeployment_with_inactive_nodes() {
+ NodeResources r = new NodeResources(20, 40, 100, 4);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .flavors(List.of(new Flavor(r)))
+ .build();
+ tester.makeReadyHosts(5, r).activateTenantHosts();
+
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(5, 1, r)));
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
+
+ var tx = new ApplicationTransaction(new ProvisionLock(app1, tester.nodeRepository().nodes().lock(app1)), new NestedTransaction());
+ tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(Node.State.active).owner(app1).retired().asList(), tx);
+ tx.nested().commit();
+
+ assertEquals(2, tester.getNodes(app1, Node.State.active).size());
+ assertEquals(3, tester.getNodes(app1, Node.State.inactive).size());
+
+ // Startup deployment: Not failable
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r), false, false));
+ // ... causes no change
+ assertEquals(2, tester.getNodes(app1, Node.State.active).size());
+ assertEquals(3, tester.getNodes(app1, Node.State.inactive).size());
+ }
+
+ @Test
+ public void inactive_container_nodes_are_not_reused() {
+ assertInactiveReuse(ClusterSpec.Type.container, false);
+ }
+
+ @Test
+ public void inactive_content_nodes_are_reused() {
+ assertInactiveReuse(ClusterSpec.Type.content, true);
+ }
+
+ private void assertInactiveReuse(ClusterSpec.Type clusterType, boolean expectedReuse) {
+ NodeResources r = new NodeResources(20, 40, 100, 4);
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .flavors(List.of(new Flavor(r)))
+ .build();
+ tester.makeReadyHosts(4, r).activateTenantHosts();
+
+ ApplicationId app1 = ProvisioningTester.applicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(clusterType, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
+
+ // Deactivate any retired nodes - usually done by the RetiredExpirer
+ tester.nodeRepository().nodes().setRemovable(app1, tester.getNodes(app1).retired().asList());
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
+
+ if (expectedReuse) {
+ assertEquals(2, tester.getNodes(app1, Node.State.inactive).size());
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
+ assertEquals(0, tester.getNodes(app1, Node.State.inactive).size());
+ }
+ else {
+ assertEquals(0, tester.getNodes(app1, Node.State.inactive).size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.dirty).size());
+ tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "test");
+ tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
+ }
+
+ }
+
+ private Set<String> hostsOf(NodeList nodes) {
+ return hostsOf(nodes, Optional.empty());
+ }
+
+ private Set<String> hostsOf(NodeList nodes, Optional<ClusterSpec.Type> clusterType) {
+ return nodes.asList().stream()
+ .filter(node -> clusterType.isEmpty() ||
+ clusterType.get() == node.allocation().get().membership().cluster().type())
+ .flatMap(node -> node.parentHostname().stream())
+ .collect(Collectors.toSet());
+ }
+
+ private void assertDistinctParentHosts(NodeList nodes, ClusterSpec.Type clusterType, int expectedCount) {
+ Set<String> parentHosts = hostsOf(nodes, Optional.of(clusterType));
+ assertEquals(expectedCount, parentHosts.size());
+ }
+
+ private void prepareAndActivate(ApplicationId application, int nodeCount, boolean exclusive, NodeResources resources, ProvisioningTester tester) {
+ Set<HostSpec> hosts = new HashSet<>(tester.prepare(application,
+ ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer")).vespaVersion("6.39").exclusive(exclusive).build(),
+ Capacity.from(new ClusterResources(nodeCount, 1, resources), false, true)));
+ tester.activate(application, hosts);
+ }
+
+ private void assertNodeParentReservation(List<Node> nodes, Optional<TenantName> reservation, ProvisioningTester tester) {
+ for (Node node : nodes)
+ assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
+ }
+
+ private void assertHostSpecParentReservation(List<HostSpec> hostSpecs, Optional<TenantName> reservation, ProvisioningTester tester) {
+ for (HostSpec hostSpec : hostSpecs) {
+ Node node = tester.nodeRepository().nodes().node(hostSpec.hostname()).get();
+ assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
+ }
+ }
+
+ private static <T> List<T> concat(List<T> list1, List<T> list2) {
+ return Stream.concat(list1.stream(), list2.stream()).collect(Collectors.toList());
}
}