aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorMartin Polden <mpolden@mpolden.no>2024-01-22 11:15:09 +0100
committerMartin Polden <mpolden@mpolden.no>2024-01-22 14:20:12 +0100
commit8a79800d3b8f683cd1689e036ef7f8766d4189ff (patch)
tree5cd6ed8cb7a341692d1831b7f01244084a82719b /node-repository
parentcb41a669ba594a65084e996f94ed89f6dfa968ea (diff)
Consider group membership when retiring host for OS upgrade
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java76
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java73
2 files changed, 133 insertions, 16 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
index cb6c7683f23..a5ff7b82551 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/os/RetiringOsUpgrader.java
@@ -2,21 +2,32 @@
package com.yahoo.vespa.hosted.provision.os;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.node.ClusterId;
import com.yahoo.vespa.hosted.provision.node.filter.NodeListFilter;
import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
import java.util.Optional;
+import java.util.Set;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
/**
- * An upgrader that retires and deprovisions hosts on stale OS versions.
+ * An upgrader that retires and deprovisions hosts on stale OS versions. For hosts containing stateful clusters, this
+ * upgrader limits node retirement so that at most one group per cluster is affected at a time.
*
- * Used in clouds where hosts must be re-provisioned to upgrade their OS.
+ * Used in clouds where the host configuration (e.g. local disk) requires re-provisioning to upgrade OS.
*
* @author mpolden
*/
@@ -35,8 +46,8 @@ public class RetiringOsUpgrader extends OsUpgrader {
public void upgradeTo(OsVersionTarget target) {
NodeList allNodes = nodeRepository.nodes().list();
Instant now = nodeRepository.clock().instant();
- for (var candidate : candidates(now, target, allNodes)) {
- deprovision(candidate, target.version(), now);
+ for (Node host : deprovisionable(now, target, allNodes)) {
+ deprovision(host, target.version(), now);
}
}
@@ -45,18 +56,46 @@ public class RetiringOsUpgrader extends OsUpgrader {
// No action needed in this implementation.
}
- /** Returns nodes that are candidates for upgrade */
- private NodeList candidates(Instant instant, OsVersionTarget target, NodeList allNodes) {
+ /** Returns nodes that can be deprovisioned at given instant */
+ private List<Node> deprovisionable(Instant instant, OsVersionTarget target, NodeList allNodes) {
NodeList nodes = allNodes.state(Node.State.active, Node.State.provisioned).nodeType(target.nodeType());
if (softRebuild) {
- // Retire only hosts which do not have a replaceable root disk
+ // Consider only hosts which do not have a replaceable root disk
nodes = nodes.not().replaceableRootDisk();
}
- return nodes.not().deprovisioning()
- .not().onOsVersion(target.version())
- .matching(node -> canUpgradeTo(target.version(), instant, node))
- .byIncreasingOsVersion()
- .first(upgradeSlots(target, nodes.deprovisioning()));
+ // Retire hosts up to slot limit while ensuring that only one group is retired at a time
+ NodeList activeNodes = allNodes.state(Node.State.active);
+ Map<ClusterId, Set<ClusterSpec.Group>> retiringGroupsByCluster = groupsOf(activeNodes.retiring());
+ int limit = upgradeSlots(target, nodes.deprovisioning());
+ List<Node> result = new ArrayList<>();
+ NodeList candidates = nodes.not().deprovisioning()
+ .not().onOsVersion(target.version())
+ .matching(node -> canUpgradeTo(target.version(), instant, node))
+ .byIncreasingOsVersion();
+ for (Node host : candidates) {
+ if (result.size() == limit) break;
+ // For all clusters residing on this host: Determine if deprovisioning the host would imply retiring nodes
+ // in additional groups beyond those already having retired nodes. If true, defer deprovisioning the host
+ boolean canDeprovision = true;
+ Map<ClusterId, Set<ClusterSpec.Group>> groupsOnHost = groupsOf(activeNodes.childrenOf(host));
+ for (var clusterAndGroups : groupsOnHost.entrySet()) {
+ Set<ClusterSpec.Group> groups = clusterAndGroups.getValue();
+ Set<ClusterSpec.Group> retiringGroups = retiringGroupsByCluster.get(clusterAndGroups.getKey());
+ if (retiringGroups != null && !groups.equals(retiringGroups)) {
+ canDeprovision = false;
+ break;
+ }
+ }
+ // Deprovision host and count all cluster groups on the host as being retired
+ if (canDeprovision) {
+ result.add(host);
+ groupsOnHost.forEach((cluster, groups) -> retiringGroupsByCluster.merge(cluster, groups, (oldVal, newVal) -> {
+ oldVal.addAll(newVal);
+ return oldVal;
+ }));
+ }
+ }
+ return Collections.unmodifiableList(result);
}
/** Upgrade given host by retiring and deprovisioning it */
@@ -68,4 +107,17 @@ public class RetiringOsUpgrader extends OsUpgrader {
nodeRepository.nodes().upgradeOs(NodeListFilter.from(host), Optional.of(target));
}
+ /** Returns the stateful groups present on given nodes, grouped by their cluster ID */
+ private static Map<ClusterId, Set<ClusterSpec.Group>> groupsOf(NodeList nodes) {
+ return nodes.stream()
+ .filter(node -> node.allocation().isPresent() &&
+ node.allocation().get().membership().cluster().isStateful() &&
+ node.allocation().get().membership().cluster().group().isPresent())
+ .collect(Collectors.groupingBy(node -> new ClusterId(node.allocation().get().owner(),
+ node.allocation().get().membership().cluster().id()),
+ HashMap::new,
+ Collectors.mapping(n -> n.allocation().get().membership().cluster().group().get(),
+ Collectors.toCollection(HashSet::new))));
+ }
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index dcbac44a37f..3f2d7112224 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -253,6 +253,63 @@ public class OsVersionsTest {
}
@Test
+ public void upgrade_by_retiring_is_limited_by_group_membership() {
+ var versions = new OsVersions(tester.nodeRepository(), Cloud.builder().dynamicProvisioning(true).build(),
+ Optional.ofNullable(tester.hostProvisioner()));
+ int hostCount = 7;
+ int app1GroupCount = 2;
+ setMaxActiveUpgrades(hostCount);
+ ApplicationId app1 = ApplicationId.from("t1", "a1", "i1");
+ ApplicationId app2 = ApplicationId.from("t2", "a2", "i2");
+ provisionInfraApplication(hostCount, NodeType.host);
+ deployApplication(app1, app1GroupCount);
+ deployApplication(app2);
+ Supplier<NodeList> hosts = () -> tester.nodeRepository().nodes().list()
+ .nodeType(NodeType.host)
+ .not().state(Node.State.deprovisioned);
+
+ // All hosts are on initial version
+ var version0 = Version.fromString("8.0");
+ versions.setTarget(NodeType.host, version0, false);
+ setCurrentVersion(hosts.get().asList(), version0);
+
+ // New version is triggered
+ var version1 = Version.fromString("8.5");
+ versions.setTarget(NodeType.host, version1, false);
+ versions.resumeUpgradeOf(NodeType.host, true);
+ {
+ // At most one node per group is retired
+ NodeList allNodes = tester.nodeRepository().nodes().list().not().state(Node.State.deprovisioned);
+ assertEquals(hostCount - 1, allNodes.nodeType(NodeType.host).deprovisioning().size());
+ assertEquals(1, allNodes.owner(app1).retiring().group(0).size());
+ assertEquals(0, allNodes.owner(app1).retiring().group(1).size());
+ assertEquals(2, allNodes.owner(app2).retiring().size());
+
+ // Hosts complete reprovisioning
+ NodeList emptyHosts = allNodes.deprovisioning().nodeType(NodeType.host)
+ .matching(h -> allNodes.childrenOf(h).isEmpty());
+ completeReprovisionOf(emptyHosts.asList(), NodeType.host);
+ replaceNodes(app1, app1GroupCount);
+ replaceNodes(app2);
+ completeReprovisionOf(hosts.get().deprovisioning().asList(), NodeType.host);
+ }
+ {
+ // Last host/group is retired
+ versions.resumeUpgradeOf(NodeType.host, true);
+ NodeList allNodes = tester.nodeRepository().nodes().list().not().state(Node.State.deprovisioned);
+ assertEquals(1, allNodes.nodeType(NodeType.host).deprovisioning().size());
+ assertEquals(0, allNodes.owner(app1).retiring().group(0).size());
+ assertEquals(1, allNodes.owner(app1).retiring().group(1).size());
+ assertEquals(0, allNodes.owner(app2).retiring().size());
+ replaceNodes(app1, app1GroupCount);
+ completeReprovisionOf(hosts.get().deprovisioning().asList(), NodeType.host);
+ }
+ NodeList allHosts = hosts.get();
+ assertEquals(0, allHosts.deprovisioning().size());
+ assertEquals(allHosts.size(), allHosts.onOsVersion(version1).size());
+ }
+
+ @Test
public void upgrade_by_rebuilding() {
var versions = new OsVersions(tester.nodeRepository(), Cloud.defaultCloud(), Optional.ofNullable(tester.hostProvisioner()));
setMaxActiveUpgrades(1);
@@ -547,24 +604,32 @@ public class OsVersionsTest {
}
private void deployApplication(ApplicationId application) {
+ deployApplication(application, 1);
+ }
+
+ private void deployApplication(ApplicationId application, int groups) {
ClusterSpec contentSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1")).vespaVersion("7").build();
- List<HostSpec> hostSpecs = tester.prepare(application, contentSpec, 2, 1, new NodeResources(4, 8, 100, 0.3));
+ List<HostSpec> hostSpecs = tester.prepare(application, contentSpec, 2, groups, new NodeResources(4, 8, 100, 0.3));
tester.activate(application, hostSpecs);
}
- private void replaceNodes(ApplicationId application) {
+ private void replaceNodes(ApplicationId application, int groups) {
// Deploy to retire nodes
- deployApplication(application);
+ deployApplication(application, groups);
NodeList retired = tester.nodeRepository().nodes().list().owner(application).retired();
assertFalse("At least one node is retired", retired.isEmpty());
tester.nodeRepository().nodes().setRemovable(retired, false);
// Redeploy to deactivate removable nodes and allocate new ones
- deployApplication(application);
+ deployApplication(application, groups);
tester.nodeRepository().nodes().list(Node.State.inactive).owner(application)
.forEach(node -> tester.nodeRepository().nodes().removeRecursively(node, true));
}
+ private void replaceNodes(ApplicationId application) {
+ replaceNodes(application, 1);
+ }
+
private NodeList deprovisioningChildrenOf(Node parent) {
return tester.nodeRepository().nodes().list()
.childrenOf(parent)