summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorValerij Fredriksen <freva@users.noreply.github.com>2020-05-19 23:45:33 +0200
committerGitHub <noreply@github.com>2020-05-19 23:45:33 +0200
commit25dd6a302f8c06996801e811398292a1639bdb8f (patch)
tree98c4a5b59cbd5a95957e8b2315176e675be526e2 /node-repository
parent0e8f55a17106adaf6d7d134fd9752c170deb1e4f (diff)
parent47371c7bfc2e17ff167558069b9067231f827550 (diff)
Merge pull request #13312 from vespa-engine/bratseth/retire-when-leaving-group
Bratseth/retire when leaving group
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java37
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java11
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java19
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java34
12 files changed, 107 insertions, 71 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
index ce696903a53..6097688df72 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
@@ -142,6 +142,11 @@ public class NodeList extends AbstractFilteringList<Node, NodeList> {
.collect(collectingAndThen(Collectors.toList(), NodeList::copyOf));
}
+ public NodeList group(int index) {
+ return matching(n -> ( n.allocation().isPresent() &&
+ n.allocation().get().membership().cluster().group().equals(Optional.of(ClusterSpec.Group.from(index)))));
+ }
+
/** Returns the parent node of the given child node */
public Optional<Node> parentOf(Node child) {
return child.parentHostname()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index bb06db3e78b..490fed681f9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -792,13 +792,13 @@ public class NodeRepository extends AbstractComponent {
public boolean canAllocateTenantNodeTo(Node host) {
if ( ! host.type().canRun(NodeType.tenant)) return false;
+ if (host.status().wantToRetire()) return false;
+ if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false;
- // Do not allocate to hosts we want to retire or are currently retiring
- if (host.status().wantToRetire() || host.allocation().map(alloc -> alloc.membership().retired()).orElse(false))
- return false;
-
- if ( ! zone.getCloud().dynamicProvisioning()) return host.state() == State.active;
- else return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
+ if ( zone.getCloud().dynamicProvisioning())
+ return EnumSet.of(State.active, State.ready, State.provisioned).contains(host.state());
+ else
+ return host.state() == State.active;
}
/** Returns the time keeper of this system */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
index 672be25c5be..b508198db3a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/DockerHostCapacity.java
@@ -11,8 +11,8 @@ import java.util.Objects;
/**
* Capacity calculation for docker hosts.
* <p>
- * The calculations is based on an immutable copy of nodes that represents
- * all capacities in the system - i.e. all nodes in the node repo give or take.
+ * The calculations are based on an immutable copy of nodes that represents
+ * all capacities in the system - i.e. all nodes in the node repo.
*
* @author smorgrav
*/
@@ -30,7 +30,7 @@ public class DockerHostCapacity {
int result = compare(freeCapacityOf(hostB, true), freeCapacityOf(hostA, true));
if (result != 0) return result;
- // If resources are equal we want to assign to the one with the most IPaddresses free
+ // If resources are equal we want to assign to the one with the most IP addresses free
return freeIPs(hostB) - freeIPs(hostA);
}
@@ -65,9 +65,9 @@ public class DockerHostCapacity {
NodeResources freeCapacityOf(Node host, boolean excludeInactive) {
// Only hosts have free capacity
- if (!host.type().canRun(NodeType.tenant)) return new NodeResources(0, 0, 0, 0);
- NodeResources hostResources = hostResourcesCalculator.advertisedResourcesOf(host.flavor());
+ if ( ! host.type().canRun(NodeType.tenant)) return new NodeResources(0, 0, 0, 0);
+ NodeResources hostResources = hostResourcesCalculator.advertisedResourcesOf(host.flavor());
return allNodes.childrenOf(host).asList().stream()
.filter(node -> !(excludeInactive && isInactiveOrRetired(node)))
.map(node -> node.flavor().resources().justNumbers())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 30f9001093d..a1d8ffb03d3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -67,24 +67,22 @@ public class GroupPreparer {
try (Mutex allocationLock = nodeRepository.lockUnallocated()) {
// Create a prioritized set of nodes
- LockedNodeList nodeList = nodeRepository.list(allocationLock);
- NodePrioritizer prioritizer = new NodePrioritizer(nodeList,
+ LockedNodeList allNodes = nodeRepository.list(allocationLock);
+ NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requestedNodes,
+ highestIndex, nodeRepository);
+
+ NodePrioritizer prioritizer = new NodePrioritizer(allNodes,
application,
cluster,
requestedNodes,
spareCount,
wantedGroups,
- nodeRepository.nameResolver(),
- nodeRepository.resourcesCalculator(),
- allocateFully);
-
+ allocateFully,
+ nodeRepository);
prioritizer.addApplicationNodes();
prioritizer.addSurplusNodes(surplusActiveNodes);
prioritizer.addReadyNodes();
- prioritizer.addNewDockerNodes(nodeRepository::canAllocateTenantNodeTo);
- // Allocate from the prioritized list
- NodeAllocation allocation = new NodeAllocation(nodeList, application, cluster, requestedNodes,
- highestIndex, nodeRepository);
+ prioritizer.addNewDockerNodes();
allocation.offer(prioritizer.prioritize());
if (dynamicProvisioningEnabled) {
@@ -114,15 +112,15 @@ public class GroupPreparer {
}
if (! allocation.fulfilled() && requestedNodes.canFail())
- throw new OutOfCapacityException("Could not satisfy " + requestedNodes + " for " + cluster +
- " in " + application.toShortString() +
+ throw new OutOfCapacityException((cluster.group().isPresent() ? "Out of capacity on " + cluster.group().get() :"") +
allocation.outOfCapacityDetails());
// Carry out and return allocation
nodeRepository.reserve(allocation.reservableNodes());
nodeRepository.addDockerNodes(new LockedNodeList(allocation.newNodes(), allocationLock));
- surplusActiveNodes.removeAll(allocation.surplusNodes());
- return allocation.finalNodes(surplusActiveNodes);
+ List<Node> acceptedNodes = allocation.finalNodes();
+ surplusActiveNodes.removeAll(acceptedNodes);
+ return acceptedNodes;
}
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 83c68c91fc5..47d1b30a8e7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -316,10 +316,9 @@ class NodeAllocation {
* Prefer to retire nodes of the wrong flavor.
* Make as few changes to the retired set as possible.
*
- * @param surplusNodes this will add nodes not any longer needed by this group to this list
* @return the final list of nodes
*/
- List<Node> finalNodes(List<Node> surplusNodes) {
+ List<Node> finalNodes() {
int currentRetiredCount = (int) nodes.stream().filter(node -> node.node.allocation().get().membership().retired()).count();
int deltaRetiredCount = requestedNodes.idealRetiredCount(nodes.size(), currentRetiredCount) - currentRetiredCount;
@@ -327,7 +326,6 @@ class NodeAllocation {
for (PrioritizableNode node : byDecreasingIndex(nodes)) {
if ( ! node.node.allocation().get().membership().retired() && node.node.state() == Node.State.active) {
node.node = node.node.retire(Agent.application, nodeRepository.clock().instant());
- surplusNodes.add(node.node); // offer this node to other groups
if (--deltaRetiredCount == 0) break;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index a7d83bbfad9..8a15c058ff4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -11,6 +11,7 @@ import java.util.logging.Level;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
@@ -42,7 +43,7 @@ public class NodePrioritizer {
private final NodeSpec requestedNodes;
private final ApplicationId application;
private final ClusterSpec clusterSpec;
- private final NameResolver nameResolver;
+ private final NodeRepository nodeRepository;
private final boolean isDocker;
private final boolean isAllocatingForReplacement;
private final boolean isTopologyChange;
@@ -52,16 +53,15 @@ public class NodePrioritizer {
private final Set<Node> spareHosts;
NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
- int spares, int wantedGroups, NameResolver nameResolver, HostResourcesCalculator hostResourcesCalculator,
- boolean allocateFully) {
+ int spares, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) {
this.allNodes = allNodes;
- this.capacity = new DockerHostCapacity(allNodes, hostResourcesCalculator);
+ this.capacity = new DockerHostCapacity(allNodes, nodeRepository.resourcesCalculator());
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
this.application = application;
- this.nameResolver = nameResolver;
this.spareHosts = findSpareHosts(allNodes, capacity, spares);
this.allocateFully = allocateFully;
+ this.nodeRepository = nodeRepository;
NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
@@ -78,9 +78,8 @@ public class NodePrioritizer {
.filter(clusterSpec.group()::equals)
.count();
- this.isAllocatingForReplacement = isReplacement(
- nodesInCluster.size(),
- nodesInCluster.state(Node.State.failed).size());
+ this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(),
+ nodesInCluster.state(Node.State.failed).size());
this.isDocker = resources(requestedNodes) != null;
}
@@ -119,11 +118,11 @@ public class NodePrioritizer {
}
/** Add a node on each docker host with enough capacity for the requested flavor */
- void addNewDockerNodes(Predicate<Node> canAllocateTenantNodeTo) {
+ void addNewDockerNodes() {
if ( ! isDocker) return;
LockedNodeList candidates = allNodes
- .filter(node -> node.type() != NodeType.host || canAllocateTenantNodeTo.test(node))
+ .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node))
.filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant()));
if (allocateFully) {
@@ -142,25 +141,20 @@ public class NodePrioritizer {
}
private void addNewDockerNodesOn(LockedNodeList candidates) {
- NodeResources wantedResources = resources(requestedNodes);
-
for (Node host : candidates) {
- boolean hostHasCapacityForWantedFlavor = capacity.hasCapacity(host, wantedResources);
- boolean conflictingCluster = allNodes.childrenOf(host).owner(application).asList().stream()
- .anyMatch(child -> child.allocation().get().membership().cluster().id().equals(clusterSpec.id()));
-
- if (!hostHasCapacityForWantedFlavor || conflictingCluster) continue;
+ if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue;
+ if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
- log.log(Level.FINE, "Trying to add new Docker node on " + host);
Optional<IP.Allocation> allocation;
try {
- allocation = host.ipConfig().pool().findAllocation(allNodes, nameResolver);
+ allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver());
if (allocation.isEmpty()) continue; // No free addresses in this pool
} catch (Exception e) {
log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname(), e);
continue;
}
+ log.log(Level.FINE, "Creating new docker node on " + host);
Node newNode = Node.createDockerNode(allocation.get().addresses(),
allocation.get().hostname(),
host.hostname(),
@@ -204,9 +198,8 @@ public class NodePrioritizer {
* parameters to the priority sorting procedure.
*/
private PrioritizableNode toPrioritizable(Node node, boolean isSurplusNode, boolean isNewNode) {
- PrioritizableNode.Builder builder = new PrioritizableNode.Builder(node)
- .surplusNode(isSurplusNode)
- .newNode(isNewNode);
+ PrioritizableNode.Builder builder = new PrioritizableNode.Builder(node).surplusNode(isSurplusNode)
+ .newNode(isNewNode);
allNodes.parentOf(node).ifPresent(parent -> {
NodeResources parentCapacity = capacity.freeCapacityOf(parent, false);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 913357b16ca..bd92357ea79 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -74,7 +74,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
provisionServiceProvider.getHostProvisioner(),
- provisionServiceProvider.getHostResourcesCalculator(),
flagSource,
loadBalancerProvisioner);
this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index f88caffa6c6..4452fc21c52 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.lang.MutableInteger;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.provision.Node;
@@ -28,7 +29,7 @@ class Preparer {
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
- HostResourcesCalculator hostResourcesCalculator, FlagSource flagSource,
+ FlagSource flagSource,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
@@ -38,9 +39,17 @@ class Preparer {
/** Prepare all required resources for the given application and cluster */
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
- var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
- prepareLoadBalancer(application, cluster, requestedNodes);
- return nodes;
+ try {
+ var nodes = prepareNodes(application, cluster, requestedNodes, wantedGroups);
+ prepareLoadBalancer(application, cluster, requestedNodes);
+ return nodes;
+ }
+ catch (OutOfCapacityException e) {
+ throw new OutOfCapacityException("Could not satisfy " + requestedNodes +
+ ( wantedGroups > 1 ? " (in " + wantedGroups + " groups)" : "") +
+ " in " + application + " " + cluster +
+ ": " + e.getMessage());
+ }
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
index 8cfcbcb3797..a60dee6bb0c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/PrioritizableNode.java
@@ -33,7 +33,7 @@ class PrioritizableNode implements Comparable<PrioritizableNode> {
/** True if the node is allocated to a host that should be dedicated as a spare */
final boolean violatesSpares;
- /** True if this is a node that has been retired earlier in the allocation process */
+ /** True if this node belongs to a group which will not be needed after this deployment */
final boolean isSurplusNode;
/** This node does not exist in the node repository yet */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index 441538fc305..e4d464840ea 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -249,7 +249,8 @@ public class DockerProvisioningTest {
assertEquals("No room for 3 nodes as 2 of 4 hosts are exclusive",
"Could not satisfy request for 3 nodes with " +
"[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: local] " +
- "for container cluster 'myContainer' group 0 6.39 in tenant1.app1: " +
+ "in tenant1.app1 container cluster 'myContainer' 6.39: " +
+ "Out of capacity on group 0: " +
"Not enough nodes available due to host exclusivity constraints, " +
"insufficient nodes available on separate physical hosts",
e.getMessage());
@@ -282,7 +283,7 @@ public class DockerProvisioningTest {
try {
ProvisioningTester tester = new ProvisioningTester.Builder()
.zone(new Zone(Environment.prod, RegionName.from("us-east-1"))).build();
- ApplicationId application1 = tester.makeApplicationId();
+ ApplicationId application1 = tester.makeApplicationId("app1");
tester.makeReadyVirtualDockerNodes(1, dockerFlavor, "dockerHost1");
tester.makeReadyVirtualDockerNodes(1, dockerFlavor, "dockerHost2");
@@ -292,7 +293,11 @@ public class DockerProvisioningTest {
dockerFlavor.with(NodeResources.StorageType.remote));
}
catch (OutOfCapacityException e) {
- assertTrue(e.getMessage().startsWith("Could not satisfy request for 2 nodes with [vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote]"));
+ assertEquals("Could not satisfy request for 2 nodes with " +
+ "[vcpu: 1.0, memory: 4.0 Gb, disk 100.0 Gb, bandwidth: 1.0 Gbps, storage type: remote] " +
+ "in tenant.app1 content cluster 'myContent'" +
+ " 6.42: Out of capacity on group 0",
+ e.getMessage());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
index 050f3b7e865..3b03e4e9d91 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
@@ -40,9 +40,9 @@ public class MultigroupProvisioningTest {
public void test_provisioning_of_multiple_groups() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- ApplicationId application1 = tester.makeApplicationId();
+ ApplicationId application1 = tester.makeApplicationId("app1");
- tester.makeReadyNodes(21, small);
+ tester.makeReadyNodes(31, small);
deploy(application1, 6, 1, small, tester);
deploy(application1, 6, 2, small, tester);
@@ -86,10 +86,10 @@ public class MultigroupProvisioningTest {
public void test_provisioning_of_multiple_groups_after_flavor_migration() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- ApplicationId application1 = tester.makeApplicationId();
+ ApplicationId application1 = tester.makeApplicationId("app1");
tester.makeReadyNodes(10, small);
- tester.makeReadyNodes(10, large);
+ tester.makeReadyNodes(16, large);
deploy(application1, 8, 1, small, tester);
deploy(application1, 8, 1, large, tester);
@@ -125,10 +125,10 @@ public class MultigroupProvisioningTest {
public void test_provisioning_of_multiple_groups_after_flavor_migration_and_exiration() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();
- ApplicationId application1 = tester.makeApplicationId();
+ ApplicationId application1 = tester.makeApplicationId("app1");
tester.makeReadyNodes(10, small);
- tester.makeReadyNodes(10, large);
+ tester.makeReadyNodes(16, large);
deploy(application1, 8, 1, small, tester);
deploy(application1, 8, 1, large, tester);
@@ -164,13 +164,8 @@ public class MultigroupProvisioningTest {
int nodeCount = capacity.minResources().nodes();
NodeResources nodeResources = capacity.minResources().nodeResources();
- int previousActiveNodeCount = tester.getNodes(application, Node.State.active).resources(nodeResources).size();
-
tester.activate(application, prepare(application, capacity, tester));
- assertEquals("Superfluous nodes are retired, but no others - went from " + previousActiveNodeCount + " to " + nodeCount + " nodes",
- Math.max(0, previousActiveNodeCount - capacity.minResources().nodes()),
- tester.getNodes(application, Node.State.active).retired().resources(nodeResources).size());
- assertEquals("Other flavors are retired",
+ assertEquals("Nodes of wrong size are retired",
0, tester.getNodes(application, Node.State.active).not().retired().not().resources(nodeResources).size());
// Check invariants for all nodes
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 03c07515cd5..44688c61b34 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -486,6 +486,40 @@ public class ProvisioningTest {
app1, cluster1);
}
+ /**
+ * When increasing the number of groups without changing node count, we need to provison new nodes for
+ * the new groups since although we can remove nodes from existing groups without losing data we
+ * cannot do so without losing coverage.
+ */
+ @Test
+ public void test_change_group_layout() {
+ Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
+ .flavors(List.of(hostFlavor))
+ .build();
+ tester.makeReadyHosts(6, hostFlavor.resources()).deployZoneApp();
+
+ ApplicationId app1 = tester.makeApplicationId("app1");
+ ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
+
+ // Deploy with 1 group
+ System.out.println("--- Deploying 1 group");
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 1, 10, 30, 10)));
+ assertEquals(4, tester.getNodes(app1, Node.State.active).size());
+ assertEquals(4, tester.getNodes(app1, Node.State.active).group(0).size());
+ assertEquals(0, tester.getNodes(app1, Node.State.active).group(0).retired().size());
+
+ // Split into 2 groups
+ System.out.println("--- Deploying 2 groups");
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 10, 30, 10)));
+ assertEquals(6, tester.getNodes(app1, Node.State.active).size());
+ assertEquals(4, tester.getNodes(app1, Node.State.active).group(0).size());
+ assertEquals(2, tester.getNodes(app1, Node.State.active).group(0).retired().size());
+ assertEquals(2, tester.getNodes(app1, Node.State.active).group(1).size());
+ assertEquals(0, tester.getNodes(app1, Node.State.active).group(1).retired().size());
+ }
+
+
@Test(expected = IllegalArgumentException.class)
public void prod_deployment_requires_redundancy() {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).build();