summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main
diff options
context:
space:
mode:
Diffstat (limited to 'node-repository/src/main')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeMutex.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirer.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java77
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java63
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostResumeProvisioner.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java435
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java43
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNameResolver.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockProvisioner.java1
19 files changed, 464 insertions, 265 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
index dc86daf2c67..9bc18533ddf 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/LockedNodeList.java
@@ -17,9 +17,16 @@ import java.util.Objects;
*/
public final class LockedNodeList extends NodeList {
+ private final Mutex lock;
+
public LockedNodeList(List<Node> nodes, Mutex lock) {
super(nodes, false);
- Objects.requireNonNull(lock, "lock must be non-null");
+ this.lock = Objects.requireNonNull(lock, "lock must be non-null");
+ }
+
+ /** Returns a new LockedNodeList with the for the same lock. */
+ public LockedNodeList childList(List<Node> nodes) {
+ return new LockedNodeList(nodes, lock);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeMutex.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeMutex.java
index 60fd07951c6..20c246b3ebd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeMutex.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeMutex.java
@@ -28,4 +28,5 @@ public class NodeMutex implements Mutex {
return new NodeMutex(updatedNode, mutex);
}
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index d6671d41cbd..9da66413b9c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -229,7 +229,7 @@ public class NodeRepository extends AbstractComponent {
applicationNodes.asList(),
Agent.system,
Optional.of("Application is removed"),
- transaction.nested());
+ transaction);
applications.remove(transaction);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirer.java
index 8766dea3d61..e300591fbb2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DirtyExpirer.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
@@ -33,8 +35,12 @@ public class DirtyExpirer extends Expirer {
@Override
protected void expire(List<Node> expired) {
- for (Node expiredNode : expired)
- nodeRepository().nodes().fail(expiredNode.hostname(), wantToDeprovisionOnExpiry, Agent.DirtyExpirer, "Node is stuck in dirty");
+ nodeRepository().nodes().performOn(NodeList.copyOf(expired),
+ node -> node.state() == State.dirty && isExpired(node),
+ (node, lock) -> nodeRepository().nodes().fail(node.hostname(),
+ wantToDeprovisionOnExpiry,
+ Agent.DirtyExpirer,
+ "Node is stuck in dirty"));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
index fa3f9435c70..cb0a8005e87 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
@@ -6,13 +6,14 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
+import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.node.History;
+import com.yahoo.vespa.hosted.provision.node.History.Event.Type;
import java.time.Duration;
-import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.function.Predicate;
@@ -67,55 +68,47 @@ public class FailedExpirer extends NodeRepositoryMaintainer {
@Override
protected double maintain() {
- NodeList allNodes = nodeRepository.nodes().list();
- List<Node> remainingNodes = new ArrayList<>(allNodes.state(Node.State.failed)
- .nodeType(NodeType.tenant, NodeType.host)
- .asList());
+ Predicate<Node> isExpired = node -> node.state() == State.failed
+ && node.history().hasEventBefore(Type.failed, clock().instant().minus(expiryFor(node)));
+ NodeList allNodes = nodeRepository.nodes().list(); // Stale snapshot, not critical.
- recycleIf(node -> node.allocation().isEmpty(), remainingNodes, allNodes);
- recycleIf(node -> !node.allocation().get().membership().cluster().isStateful() &&
- node.history().hasEventBefore(History.Event.Type.failed, clock().instant().minus(statelessExpiry)),
- remainingNodes,
- allNodes);
- recycleIf(node -> node.allocation().get().membership().cluster().isStateful() &&
- node.history().hasEventBefore(History.Event.Type.failed, clock().instant().minus(statefulExpiry)),
- remainingNodes,
- allNodes);
+ nodeRepository.nodes().performOn(allNodes.nodeType(NodeType.tenant),
+ isExpired,
+ (node, lock) -> recycle(node, List.of(), allNodes).get());
+
+ nodeRepository.nodes().performOnRecursively(allNodes.nodeType(NodeType.host),
+ nodes -> isExpired.test(nodes.parent().node()),
+ nodes -> recycle(nodes.parent().node(),
+ nodes.children().stream().map(NodeMutex::node).toList(),
+ allNodes)
+ .map(List::of).orElse(List.of()));
return 1.0;
}
- /** Recycle the nodes matching condition, and remove those nodes from the nodes list. */
- private void recycleIf(Predicate<Node> condition, List<Node> failedNodes, NodeList allNodes) {
- List<Node> nodesToRecycle = failedNodes.stream().filter(condition).toList();
- failedNodes.removeAll(nodesToRecycle);
- recycle(nodesToRecycle, allNodes);
+ private Duration expiryFor(Node node) {
+ return node.allocation().isEmpty() ? Duration.ZERO
+ : node.allocation().get().membership().cluster().isStateful() ? statefulExpiry
+ : statelessExpiry;
}
- /** Move eligible nodes to dirty or parked. This may be a subset of the given nodes */
- private void recycle(List<Node> nodes, NodeList allNodes) {
- List<Node> nodesToRecycle = new ArrayList<>();
- for (Node candidate : nodes) {
- Optional<String> reason = shouldPark(candidate, allNodes);
- if (reason.isPresent()) {
- List<String> unparkedChildren = candidate.type().isHost() ?
- allNodes.childrenOf(candidate)
- .not()
- .state(Node.State.parked)
- .mapToList(Node::hostname) :
- List.of();
-
- if (unparkedChildren.isEmpty()) {
- nodeRepository.nodes().park(candidate.hostname(), true, Agent.FailedExpirer,
- "Parked by FailedExpirer due to " + reason.get());
- } else {
- log.info(String.format("Expired failed node %s was not parked because of unparked children: %s",
- candidate.hostname(), String.join(", ", unparkedChildren)));
- }
+ private Optional<Node> recycle(Node node, List<Node> children, NodeList allNodes) {
+ Optional<String> reason = shouldPark(node, allNodes);
+ if (reason.isPresent()) {
+ List<String> unparkedChildren = children.stream()
+ .filter(child -> child.state() != Node.State.parked)
+ .map(Node::hostname)
+ .toList();
+ if (unparkedChildren.isEmpty()) {
+ return Optional.of(nodeRepository.nodes().park(node.hostname(), true, Agent.FailedExpirer,
+ "Parked by FailedExpirer due to " + reason.get()));
} else {
- nodesToRecycle.add(candidate);
+ log.info(String.format("Expired failed node %s was not parked because of unparked children: %s",
+ node.hostname(), String.join(", ", unparkedChildren)));
+ return Optional.empty();
}
+ } else {
+ return Optional.of(nodeRepository.nodes().deallocate(node, Agent.FailedExpirer, "Expired by FailedExpirer"));
}
- nodeRepository.nodes().deallocate(nodesToRecycle, Agent.FailedExpirer, "Expired by FailedExpirer");
}
/** Returns whether the node should be parked instead of recycled */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index e9e3fd5179a..d70ee825860 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -72,7 +72,14 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
protected double maintain() {
List<Node> provisionedSnapshot;
try {
- provisionedSnapshot = provision(nodeRepository().nodes().list());
+ NodeList nodes;
+ // Host and child nodes are written in separate transactions, but both are written while holding the
+ // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children
+ // of newly provisioned hosts.
+ try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) {
+ nodes = nodeRepository().nodes().list();
+ }
+ provisionedSnapshot = provision(nodes);
} catch (NodeAllocationException | IllegalStateException e) {
log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage());
return 0; // avoid removing excess hosts
@@ -85,16 +92,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
private double markForRemoval(List<Node> provisionedSnapshot) {
- // Group nodes by parent; no parent means it's a host.
- Map<Optional<String>, List<Node>> nodesByParent = provisionedSnapshot.stream().collect(groupingBy(Node::parentHostname));
-
- // Find all hosts that we once thought were empty (first clause), or whose children are now all removable (second clause).
- List<Node> emptyHosts = nodesByParent.get(Optional.<String>empty()).stream()
- .filter(host -> host.hostEmptyAt().isPresent()
- || nodesByParent.getOrDefault(Optional.of(host.hostname()), List.of())
- .stream().allMatch(HostCapacityMaintainer::canDeprovision))
- .toList();
-
+ List<Node> emptyHosts = findEmptyOrRemovableHosts(provisionedSnapshot);
if (emptyHosts.isEmpty()) return 1;
int attempts = 0, success = 0;
@@ -108,18 +106,16 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
// Re-read all nodes under lock and compute the candidates for removal. The actual nodes we want
// to mark for removal is the intersection with typeEmptyHosts, which excludes the preprovisioned hosts.
Map<Optional<String>, List<Node>> currentNodesByParent = nodeRepository().nodes().list().stream().collect(groupingBy(Node::parentHostname));
- List<Node> candidateHosts = new ArrayList<>(currentNodesByParent.get(Optional.<String>empty()));
+ List<Node> candidateHosts = new ArrayList<>(getHosts(currentNodesByParent));
candidateHosts.retainAll(typeEmptyHosts);
for (Node host : candidateHosts) {
attempts++;
// Any hosts that are no longer empty should be marked as such, and excluded from removal.
- if (currentNodesByParent.getOrDefault(Optional.of(host.hostname()), List.of())
- .stream().anyMatch(n -> ! canDeprovision(n))) {
- if (host.hostEmptyAt().isPresent()) {
- nodeRepository().nodes().write(host.withHostEmptyAt(null), lock);
- }
+ if (currentNodesByParent.getOrDefault(Optional.of(host.hostname()), List.of()).stream().anyMatch(n -> ! canDeprovision(n))
+ && host.hostEmptyAt().isPresent()) {
+ nodeRepository().nodes().write(host.withHostEmptyAt(null), lock);
}
// If the host is still empty, we can mark it as empty now, or mark it for removal if it has already expired.
else {
@@ -282,11 +278,38 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
nodeResources,
nodeRepository().clock().instant()))
.toList();
-
}
private static NodeResources toNodeResources(ClusterCapacity clusterCapacity) {
- return new NodeResources(clusterCapacity.vcpu(), clusterCapacity.memoryGb(), clusterCapacity.diskGb(),
- clusterCapacity.bandwidthGbps());
+ return new NodeResources(clusterCapacity.vcpu(),
+ clusterCapacity.memoryGb(),
+ clusterCapacity.diskGb(),
+ clusterCapacity.bandwidthGbps(),
+ NodeResources.DiskSpeed.valueOf(clusterCapacity.diskSpeed()),
+ NodeResources.StorageType.valueOf(clusterCapacity.storageType()),
+ NodeResources.Architecture.valueOf(clusterCapacity.architecture()));
+ }
+
+ private static List<Node> findEmptyOrRemovableHosts(List<Node> provisionedSnapshot) {
+ // Group nodes by parent; no parent means it's a host.
+ var nodesByParent = provisionedSnapshot.stream().collect(groupingBy(Node::parentHostname));
+
+ // Find all hosts that we once thought were empty (first clause), or whose children are now all removable (second clause).
+ return getHosts(nodesByParent).stream()
+ .filter(host -> host.hostEmptyAt().isPresent() || allChildrenCanBeDeprovisioned(nodesByParent, host))
+ .toList();
+ }
+
+ private static List<Node> getHosts(Map<Optional<String>, List<Node>> nodesByParent) {
+ return nodesByParent.get(Optional.<String>empty());
}
+
+ private static List<Node> getChildren(Map<Optional<String>, List<Node>> nodesByParent, Node host) {
+ return nodesByParent.getOrDefault(Optional.of(host.hostname()), List.of());
+ }
+
+ private static boolean allChildrenCanBeDeprovisioned(Map<Optional<String>, List<Node>> nodesByParent, Node host) {
+ return getChildren(nodesByParent, host).stream().allMatch(HostCapacityMaintainer::canDeprovision);
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostResumeProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostResumeProvisioner.java
index e1624183607..fe89ba17469 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostResumeProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostResumeProvisioner.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.NodeType;
import com.yahoo.jdisc.Metric;
import com.yahoo.transaction.Mutex;
@@ -49,18 +50,15 @@ public class HostResumeProvisioner extends NodeRepositoryMaintainer {
NodeList hosts = allNodes.state(Node.State.provisioned).nodeType(NodeType.host, NodeType.confighost, NodeType.controllerhost);
int failures = 0;
for (Node host : hosts) {
- NodeList children = allNodes.childrenOf(host);
try {
- log.log(Level.INFO, "Provisioning " + host.hostname() + " with " + children.size() + " children");
- HostIpConfig hostIpConfig = hostProvisioner.provision(host, children.asSet());
- setIpConfig(host, children, hostIpConfig);
+ HostIpConfig hostIpConfig = hostProvisioner.provision(host);
+ setIpConfig(host, hostIpConfig);
} catch (IllegalArgumentException | IllegalStateException e) {
- log.log(Level.INFO, "Could not provision " + host.hostname() + " with " + children.size() + " children, will retry in " +
+ log.log(Level.INFO, "Could not provision " + host.hostname() + ", will retry in " +
interval() + ": " + Exceptions.toMessageString(e));
} catch (FatalProvisioningException e) {
failures++;
- log.log(Level.SEVERE, "Failed to provision " + host.hostname() + " with " + children.size() +
- " children, failing out the host recursively", e);
+ log.log(Level.SEVERE, "Failed to provision " + host.hostname() + ", failing out the host recursively", e);
nodeRepository().nodes().failOrMarkRecursively(
host.hostname(), Agent.HostResumeProvisioner, "Failed by HostResumeProvisioner due to provisioning failure");
} catch (RuntimeException e) {
@@ -75,19 +73,17 @@ public class HostResumeProvisioner extends NodeRepositoryMaintainer {
return asSuccessFactorDeviation(hosts.size(), failures);
}
- private void setIpConfig(Node host, NodeList children, HostIpConfig hostIpConfig) {
+ private void setIpConfig(Node host, HostIpConfig hostIpConfig) {
if (hostIpConfig.isEmpty()) return;
- NodeList nodes = NodeList.of(host).and(children);
- for (var node : nodes) {
- verifyDns(node, hostIpConfig.require(node.hostname()));
- }
+ hostIpConfig.asMap().forEach((hostname, ipConfig) ->
+ verifyDns(hostname, host.type(), host.cloudAccount(), ipConfig));
nodeRepository().nodes().setIpConfig(hostIpConfig);
}
/** Verify DNS configuration of given node */
- private void verifyDns(Node node, IP.Config ipConfig) {
+ private void verifyDns(String hostname, NodeType hostType, CloudAccount cloudAccount, IP.Config ipConfig) {
for (String ipAddress : ipConfig.primary()) {
- IP.verifyDns(node.hostname(), ipAddress, node.type(), nodeRepository().nameResolver(), node.cloudAccount(), nodeRepository().zone());
+ IP.verifyDns(hostname, ipAddress, hostType, nodeRepository().nameResolver(), cloudAccount, nodeRepository().zone());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
index aa7aac34389..503ac4be86c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
@@ -40,9 +42,9 @@ public class InactiveExpirer extends Expirer {
@Override
protected void expire(List<Node> expired) {
- expired.forEach(node -> {
- nodeRepository.nodes().deallocate(node, Agent.InactiveExpirer, "Expired by InactiveExpirer");
- });
+ nodeRepository.nodes().performOn(NodeList.copyOf(expired),
+ node -> node.state() == State.inactive && isExpired(node),
+ (node, lock) -> nodeRepository.nodes().deallocate(node, Agent.InactiveExpirer, "Expired by InactiveExpirer"));
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirer.java
index 6f06a2ac22e..2484f496ece 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirer.java
@@ -25,6 +25,8 @@ public class ReservationExpirer extends Expirer {
}
@Override
- protected void expire(List<Node> expired) { nodeRepository().nodes().deallocate(expired, Agent.ReservationExpirer, "Expired by ReservationExpirer"); }
+ protected void expire(List<Node> expired) {
+ nodeRepository().nodes().deallocate(expired, Agent.ReservationExpirer, "Expired by ReservationExpirer");
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
index cc7db3c138a..1ff6d2b300d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
@@ -113,7 +113,7 @@ public record IP() {
*
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
- public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
+ public static LockedNodeList verify(List<Node> nodes, LockedNodeList allNodes) {
NodeList sortedNodes = allNodes.sortedBy(Comparator.comparing(Node::hostname));
for (var node : nodes) {
for (var other : sortedNodes) {
@@ -135,7 +135,7 @@ public record IP() {
other.hostname());
}
}
- return nodes;
+ return allNodes.childList(nodes);
}
/** Returns whether IP address of existing node can be assigned to node */
@@ -152,7 +152,7 @@ public record IP() {
}
public static Node verify(Node node, LockedNodeList allNodes) {
- return verify(List.of(node), allNodes).get(0);
+ return verify(List.of(node), allNodes).asList().get(0);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index b10a371e8bd..490e7b9ac33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -1,7 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.node;
-import com.yahoo.collections.ListMap;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationTransaction;
@@ -10,6 +9,7 @@ import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
+import com.yahoo.time.TimeBudget;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.applicationmodel.HostName;
@@ -17,6 +17,7 @@ import com.yahoo.vespa.applicationmodel.InfrastructureApplication;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.NoSuchNodeException;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.applications.Applications;
@@ -31,20 +32,26 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Iterator;
import java.util.List;
-import java.util.Map;
-import java.util.Objects;
+import java.util.NavigableSet;
import java.util.Optional;
import java.util.Set;
+import java.util.TreeSet;
import java.util.function.BiFunction;
+import java.util.function.Function;
import java.util.function.Predicate;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
import static com.yahoo.vespa.hosted.provision.restapi.NodePatcher.DROP_DOCUMENTS_REPORT;
+import static java.util.Comparator.comparing;
+import static java.util.stream.Collectors.groupingBy;
+import static java.util.stream.Collectors.joining;
/**
* The nodes in the node repo and their state transitions
@@ -148,7 +155,7 @@ public class Nodes {
if (existing.isPresent())
throw new IllegalStateException("Cannot add " + node + ": A node with this name already exists");
}
- return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system);
+ return db.addNodesInState(nodes, Node.State.reserved, Agent.system);
}
/**
@@ -157,7 +164,8 @@ public class Nodes {
* with the history of that node.
*/
public List<Node> addNodes(List<Node> nodes, Agent agent) {
- try (Mutex lock = lockUnallocated()) {
+ try (NodeMutexes existingNodesLocks = lockAndGetAll(nodes, Optional.empty()); // Locks for any existing nodes we may remove.
+ Mutex allocationLock = lockUnallocated()) {
List<Node> nodesToAdd = new ArrayList<>();
List<Node> nodesToRemove = new ArrayList<>();
for (int i = 0; i < nodes.size(); i++) {
@@ -194,7 +202,7 @@ public class Nodes {
}
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(nodesToRemove, transaction);
- List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction);
+ List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(allocationLock)), Node.State.provisioned, agent, transaction);
transaction.commit();
return resultingNodes;
}
@@ -218,7 +226,7 @@ public class Nodes {
}
/** Activate nodes. This method does <b>not</b> lock the node repository. */
- public List<Node> activate(List<Node> nodes, NestedTransaction transaction) {
+ public List<Node> activate(List<Node> nodes, ApplicationTransaction transaction) {
return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction);
}
@@ -229,8 +237,7 @@ public class Nodes {
* @param reusable move the node directly to {@link Node.State#dirty} after removal
*/
public void setRemovable(NodeList nodes, boolean reusable) {
- performOn(nodes, (node, mutex) -> write(node.with(node.allocation().get().removable(true, reusable)),
- mutex));
+ performOn(nodes, (node, mutex) -> write(node.with(node.allocation().get().removable(true, reusable)), mutex));
}
/**
@@ -239,7 +246,7 @@ public class Nodes {
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
if ( ! zone.environment().isProduction() || zone.system().isCd())
- return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
+ return deallocate(nodes, Agent.application, "Deactivated by application", transaction);
NodeList nodeList = NodeList.copyOf(nodes);
NodeList stateless = nodeList.stateless();
@@ -247,9 +254,9 @@ public class Nodes {
NodeList statefulToInactive = stateful.not().reusable();
NodeList statefulToDirty = stateful.reusable();
List<Node> written = new ArrayList<>();
- written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
- written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction.nested()));
- written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction.nested()));
+ written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction));
+ written.addAll(deallocate(statefulToDirty.asList(), Agent.application, "Deactivated by application (recycled)", transaction));
+ written.addAll(db.writeTo(Node.State.inactive, statefulToInactive.asList(), Agent.application, Optional.empty(), transaction));
return written;
}
@@ -258,21 +265,9 @@ public class Nodes {
* transaction commits.
*/
public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) {
- return fail(nodes, Agent.application, "Failed by application", transaction.nested());
- }
-
- public List<Node> fail(List<Node> nodes, Agent agent, String reason) {
- NestedTransaction transaction = new NestedTransaction();
- nodes = fail(nodes, agent, reason, transaction);
- transaction.commit();
- return nodes;
- }
-
- private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
- nodes = nodes.stream()
- .map(n -> n.withWantToFail(false, agent, clock.instant()))
- .toList();
- return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction);
+ return db.writeTo(Node.State.failed,
+ nodes.stream().map(n -> n.withWantToFail(false, Agent.application, clock.instant())).toList(),
+ Agent.application, Optional.of("Failed by application"), transaction);
}
/** Move nodes to the dirty state */
@@ -282,40 +277,48 @@ public class Nodes {
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
Node nodeToDirty = node(hostname).orElseThrow(() -> new NoSuchNodeException("Could not deallocate " + hostname + ": Node not found"));
-
- List<Node> nodesToDirty =
- (nodeToDirty.type().isHost() ?
- Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) :
- Stream.of(nodeToDirty)).filter(node -> node.state() != Node.State.dirty).toList();
- List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
- .filter(node -> node.state() != Node.State.provisioned)
- .filter(node -> node.state() != Node.State.failed)
- .filter(node -> node.state() != Node.State.parked)
- .filter(node -> node.state() != Node.State.breakfixed)
- .map(Node::hostname).toList();
- if ( ! hostnamesNotAllowedToDirty.isEmpty())
- illegal("Could not deallocate " + nodeToDirty + ": " +
- hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
-
- return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).toList();
+ List<Node> nodesToDirty = new ArrayList<>();
+ try (RecursiveNodeMutexes locked = lockAndGetRecursively(hostname, Optional.empty())) {
+ for (NodeMutex child : locked.children())
+ if (child.node().state() != Node.State.dirty)
+ nodesToDirty.add(child.node());
+
+ if (locked.parent().node().state() != State.dirty)
+ nodesToDirty.add(locked.parent().node());
+
+ List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream()
+ .filter(node -> node.state() != Node.State.provisioned)
+ .filter(node -> node.state() != Node.State.failed)
+ .filter(node -> node.state() != Node.State.parked)
+ .filter(node -> node.state() != Node.State.breakfixed)
+ .map(Node::hostname).toList();
+ if ( ! hostnamesNotAllowedToDirty.isEmpty())
+ illegal("Could not deallocate " + nodeToDirty + ": " +
+ hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]");
+
+ return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).toList();
+ }
}
/**
- * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
+ * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state.
* Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold.
*/
public Node deallocate(Node node, Agent agent, String reason) {
- NestedTransaction transaction = new NestedTransaction();
- Node deallocated = deallocate(node, agent, reason, transaction);
- transaction.commit();
- return deallocated;
+ try (NodeMutex locked = lockAndGetRequired(node)) {
+ NestedTransaction transaction = new NestedTransaction();
+ Node deallocated = deallocate(locked.node(), agent, reason, transaction);
+ transaction.commit();
+ return deallocated;
+ }
}
- public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) {
- return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).toList();
+ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, ApplicationTransaction transaction) {
+ return nodes.stream().map(node -> deallocate(node, agent, reason, transaction.nested())).toList();
}
- public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
+ // Be sure to hold the right lock!
+ private Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) {
if (parkOnDeallocationOf(node, agent)) {
return park(node.hostname(), false, agent, reason, transaction);
} else {
@@ -339,7 +342,9 @@ public class Nodes {
}
public Node fail(String hostname, boolean forceDeprovision, Agent agent, String reason) {
- return move(hostname, Node.State.failed, agent, forceDeprovision, Optional.of(reason));
+ try (NodeMutex lock = lockAndGetRequired(hostname)) {
+ return move(hostname, Node.State.failed, agent, forceDeprovision, Optional.of(reason), lock);
+ }
}
/**
@@ -350,14 +355,16 @@ public class Nodes {
* @return all the nodes that were changed by this request
*/
public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) {
- NodeList children = list().childrenOf(hostname);
- List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock));
-
- if (children.state(Node.State.active).isEmpty())
- changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason)));
- else
- changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock)));
+ List<Node> changed = new ArrayList<>();
+ try (RecursiveNodeMutexes nodes = lockAndGetRecursively(hostname, Optional.empty())) {
+ for (NodeMutex child : nodes.children())
+ changed.add(failOrMark(child.node(), agent, reason, child));
+ if (changed.stream().noneMatch(child -> child.state() == Node.State.active))
+ changed.add(move(hostname, Node.State.failed, agent, false, Optional.of(reason), nodes.parent()));
+ else
+ changed.add(failOrMark(nodes.parent().node(), agent, reason, nodes.parent()));
+ }
return changed;
}
@@ -367,12 +374,14 @@ public class Nodes {
write(node, lock);
return node;
} else {
- return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason));
+ return move(node.hostname(), Node.State.failed, agent, false, Optional.of(reason), lock);
}
}
/** Update IP config for nodes in given config */
public void setIpConfig(HostIpConfig hostIpConfig) {
+ // Ideally this should hold the unallocated lock over the entire method, but unallocated lock must be taken
+ // after the application lock, making this impossible
Predicate<Node> nodeInConfig = (node) -> hostIpConfig.contains(node.hostname());
performOn(nodeInConfig, (node, lock) -> {
IP.Config ipConfig = hostIpConfig.require(node.hostname());
@@ -387,10 +396,12 @@ public class Nodes {
* @throws NoSuchNodeException if the node is not found
*/
public Node park(String hostname, boolean forceDeprovision, Agent agent, String reason) {
- NestedTransaction transaction = new NestedTransaction();
- Node parked = park(hostname, forceDeprovision, agent, reason, transaction);
- transaction.commit();
- return parked;
+ try (NodeMutex locked = lockAndGetRequired(hostname)) {
+ NestedTransaction transaction = new NestedTransaction();
+ Node parked = park(hostname, forceDeprovision, agent, reason, transaction);
+ transaction.commit();
+ return parked;
+ }
}
private Node park(String hostname, boolean forceDeprovision, Agent agent, String reason, NestedTransaction transaction) {
@@ -413,36 +424,38 @@ public class Nodes {
* @throws NoSuchNodeException if the node is not found
*/
public Node reactivate(String hostname, Agent agent, String reason) {
- return move(hostname, Node.State.active, agent, false, Optional.of(reason));
+ try (NodeMutex lock = lockAndGetRequired(hostname)) {
+ return move(hostname, Node.State.active, agent, false, Optional.of(reason), lock);
+ }
}
/**
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
- Node node = requireNode(hostname);
- try (Mutex lock = lockUnallocated()) {
- requireBreakfixable(node);
+ try (RecursiveNodeMutexes locked = lockAndGetRecursively(hostname, Optional.empty())) {
+ requireBreakfixable(locked.parent().node());
NestedTransaction transaction = new NestedTransaction();
- List<Node> removed = removeChildren(node, false, transaction);
- removed.add(move(node.hostname(), Node.State.breakfixed, agent, false, Optional.of(reason), transaction));
+ removeChildren(locked, false, transaction);
+ move(hostname, Node.State.breakfixed, agent, false, Optional.of(reason), transaction);
transaction.commit();
- return removed;
+ return locked.nodes().nodes().stream().map(NodeMutex::node).toList();
}
}
private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) {
- NestedTransaction transaction = new NestedTransaction();
- List<Node> moved = list().childrenOf(hostname).asList().stream()
- .map(child -> move(child.hostname(), toState, agent, false, reason, transaction))
- .collect(Collectors.toCollection(ArrayList::new));
- moved.add(move(hostname, toState, agent, false, reason, transaction));
- transaction.commit();
- return moved;
+ try (RecursiveNodeMutexes locked = lockAndGetRecursively(hostname, Optional.empty())) {
+ List<Node> moved = new ArrayList<>();
+ NestedTransaction transaction = new NestedTransaction();
+ for (NodeMutex node : locked.nodes().nodes())
+ moved.add(move(node.node().hostname(), toState, agent, false, reason, transaction));
+ transaction.commit();
+ return moved;
+ }
}
/** Move a node to given state */
- private Node move(String hostname, Node.State toState, Agent agent, boolean forceDeprovision, Optional<String> reason) {
+ private Node move(String hostname, Node.State toState, Agent agent, boolean forceDeprovision, Optional<String> reason, Mutex lock) {
NestedTransaction transaction = new NestedTransaction();
Node moved = move(hostname, toState, agent, forceDeprovision, reason, transaction);
transaction.commit();
@@ -451,8 +464,7 @@ public class Nodes {
/** Move a node to given state as part of a transaction */
private Node move(String hostname, Node.State toState, Agent agent, boolean forceDeprovision, Optional<String> reason, NestedTransaction transaction) {
- // TODO: Work out a safe lock acquisition strategy for moves. Lock is only held while adding operations to
- // transaction, but lock must also be held while committing
+ // TODO: This lock is already held here, but we still need to read the node. Perhaps change to requireNode(hostname) later.
try (NodeMutex lock = lockAndGetRequired(hostname)) {
Node node = lock.node();
if (toState == Node.State.active) {
@@ -521,17 +533,18 @@ public class Nodes {
}
public List<Node> removeRecursively(Node node, boolean force) {
- try (Mutex lock = lockUnallocated()) {
- requireRemovable(node, false, force);
+ try (RecursiveNodeMutexes locked = lockAndGetRecursively(node.hostname(), Optional.empty())) {
+ requireRemovable(locked.parent().node(), false, force);
NestedTransaction transaction = new NestedTransaction();
List<Node> removed;
- if (!node.type().isHost()) {
+ if ( ! node.type().isHost()) {
removed = List.of(node);
db.removeNodes(removed, transaction);
- } else {
- removed = removeChildren(node, force, transaction);
+ }
+ else {
+ removeChildren(locked, force, transaction);
move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction);
- removed.add(node);
+ removed = locked.nodes().nodes().stream().map(NodeMutex::node).toList();
}
transaction.commit();
return removed;
@@ -540,20 +553,22 @@ public class Nodes {
/** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */
public void forget(Node node) {
- if (node.state() != Node.State.deprovisioned)
- throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
- if (node.status().wantToRebuild())
- throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
- NestedTransaction transaction = new NestedTransaction();
- db.removeNodes(List.of(node), transaction);
- transaction.commit();
+ try (NodeMutex locked = lockAndGetRequired(node.hostname())) {
+ if (node.state() != Node.State.deprovisioned)
+ throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
+ if (node.status().wantToRebuild())
+ throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
+ NestedTransaction transaction = new NestedTransaction();
+ db.removeNodes(List.of(node), transaction);
+ transaction.commit();
+ }
}
- private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) {
- List<Node> children = list().childrenOf(node).asList();
+ private void removeChildren(RecursiveNodeMutexes nodes, boolean force, NestedTransaction transaction) {
+ if (nodes.children().isEmpty()) return;
+ List<Node> children = nodes.children().stream().map(NodeMutex::node).toList();
children.forEach(child -> requireRemovable(child, true, force));
db.removeNodes(children, transaction);
- return new ArrayList<>(children);
}
/**
@@ -715,8 +730,8 @@ public class Nodes {
return db.writeTo(nodes, Agent.system, Optional.empty());
}
- private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
- return performOn(list().matching(filter), action);
+ public List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
+ return performOn(list(), filter, action);
}
/**
@@ -725,35 +740,33 @@ public class Nodes {
* @param action the action to perform
* @return the set of nodes on which the action was performed, as they became as a result of the operation
*/
- private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
- List<Node> unallocatedNodes = new ArrayList<>();
- ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>();
+ public List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) {
+ return performOn(nodes, __ -> true, action);
+ }
- // Group matching nodes by the lock needed
- for (Node node : nodes) {
- Optional<ApplicationId> applicationId = applicationIdForLock(node);
- if (applicationId.isPresent())
- allocatedNodes.put(applicationId.get(), node);
- else
- unallocatedNodes.add(node);
- }
+ public List<Node> performOn(NodeList nodes, Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) {
+ List<Node> resultingNodes = new ArrayList<>();
+ nodes.stream().collect(groupingBy(Nodes::applicationIdForLock))
+ .forEach((applicationId, nodeList) -> { // Grouped only to reduce number of lock acquire/release cycles.
+ try (NodeMutexes locked = lockAndGetAll(nodeList, Optional.empty())) {
+ for (NodeMutex node : locked.nodes())
+ if (filter.test(node.node()))
+ resultingNodes.add(action.apply(node.node(), node));
+ }
+ });
+ return resultingNodes;
+ }
+
+ public List<Node> performOnRecursively(NodeList parents, Predicate<RecursiveNodeMutexes> filter, Function<RecursiveNodeMutexes, List<Node>> action) {
+ for (Node node : parents)
+ if (node.parentHostname().isPresent())
+ throw new IllegalArgumentException(node + " is not a parent host");
- // Perform operation while holding appropriate lock
List<Node> resultingNodes = new ArrayList<>();
- try (Mutex lock = lockUnallocated()) {
- for (Node node : unallocatedNodes) {
- Optional<Node> currentNode = db.readNode(node.hostname()); // Re-read while holding lock
- if (currentNode.isEmpty()) continue;
- resultingNodes.add(action.apply(currentNode.get(), lock));
- }
- }
- for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) {
- try (Mutex lock = applications.lock(applicationNodes.getKey())) {
- for (Node node : applicationNodes.getValue()) {
- Optional<Node> currentNode = db.readNode(node.hostname()); // Re-read while holding lock
- if (currentNode.isEmpty()) continue;
- resultingNodes.add(action.apply(currentNode.get(), lock));
- }
+ for (Node parent : parents) {
+ try (RecursiveNodeMutexes locked = lockAndGetRecursively(parent.hostname(), Optional.empty())) {
+ if (filter.test(locked))
+ resultingNodes.addAll(action.apply(locked));
}
}
return resultingNodes;
@@ -816,9 +829,7 @@ public class Nodes {
return Optional.empty();
}
- if (node.type() != NodeType.tenant ||
- Objects.equals(freshNode.get().allocation().map(Allocation::owner),
- staleNode.allocation().map(Allocation::owner))) {
+ if (applicationIdForLock(freshNode.get()).equals(applicationIdForLock(staleNode))) {
NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose);
lockToClose = null;
return Optional.of(nodeMutex);
@@ -879,6 +890,168 @@ public class Nodes {
return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'"));
}
+ /**
+ * Locks the children of the given node, the node itself, and finally takes the unallocated lock.
+ * <br>
+ * When taking multiple locks, it's crucial that we always take them in the same order, to avoid deadlocks.
+ * We want to take the most contended locks last, so that we don't block other operations for longer than necessary.
+ * This method does that, by first taking the locks for any children the given node may have, and then the node itself.
+ * (This is enforced by taking host locks after tenant node locks, in {@link #lockAndGetAll(Collection, Optional)}.)
+ * Finally, the allocation lock is taken, to ensure no new children are added while we hold this snapshot.
+ * Unfortunately, since that lock is taken last, we may detect new nodes after taking it, and then we have to retry.
+ * Closing the returned {@link RecursiveNodeMutexes} will release all the locks, and the locks should not be closed elsewhere.
+ */
+ public RecursiveNodeMutexes lockAndGetRecursively(String hostname, Optional<Duration> timeout) {
+ TimeBudget budget = TimeBudget.fromNow(clock, timeout.orElse(Duration.ofMinutes(2)));
+ Set<Node> children = new HashSet<>(list().childrenOf(hostname).asList());
+ Optional<Node> node = node(hostname);
+
+ int attempts = 5; // We'll retry locking the whole list of children this many times, in case new children appear.
+ for (int attempt = 0; attempt < attempts; attempt++) {
+ NodeMutexes mutexes = null;
+ Mutex unallocatedLock = null;
+ try {
+ // First, we lock all the children, and the host; then we take the allocation lock to ensure our snapshot is valid.
+ List<Node> nodes = new ArrayList<>(children.size() + 1);
+ nodes.addAll(children);
+ node.ifPresent(nodes::add);
+ mutexes = lockAndGetAll(nodes, budget.timeLeftOrThrow());
+ unallocatedLock = db.lockInactive(budget.timeLeftOrThrow().get());
+ RecursiveNodeMutexes recursive = new RecursiveNodeMutexes(hostname, mutexes, unallocatedLock);
+ Set<Node> freshChildren = list().childrenOf(hostname).asSet();
+ Optional<Node> freshNode = recursive.parent.map(NodeMutex::node);
+ if (children.equals(freshChildren) && node.equals(freshNode)) {
+ // No new nodes have appeared, and none will now, so we have a consistent snapshot.
+ if (node.isEmpty() && ! children.isEmpty())
+ throw new IllegalStateException("node '" + hostname + "' was not found, but it has children: " + children);
+
+ mutexes = null;
+ unallocatedLock = null;
+ return recursive;
+ }
+ else {
+ // New nodes have appeared, so we need to let go of the locks and try again with the new set of nodes.
+ children = freshChildren;
+ node = freshNode;
+ }
+ }
+ finally {
+ if (unallocatedLock != null) unallocatedLock.close();
+ if (mutexes != null) mutexes.close();
+ }
+ }
+ throw new IllegalStateException("giving up (after " + attempts + " attempts) fetching an up to " +
+ "date recursive node set under lock for node " + hostname);
+ }
+
+ /** Locks all nodes in the given list, in a universal order, and returns the locks and nodes required. */
+ public NodeMutexes lockAndRequireAll(Collection<Node> nodes, Optional<Duration> timeout) {
+ return lockAndGetAll(nodes, timeout, true);
+ }
+
+ /** Locks all nodes in the given list, in a universal order, and returns the locks and nodes acquired. */
+ public NodeMutexes lockAndGetAll(Collection<Node> nodes, Optional<Duration> timeout) {
+ return lockAndGetAll(nodes, timeout, false);
+ }
+
+ /** Locks all nodes in the given list, in a universal order, and returns the locks and nodes. */
+ private NodeMutexes lockAndGetAll(Collection<Node> nodes, Optional<Duration> timeout, boolean required) {
+ TimeBudget budget = TimeBudget.fromNow(clock, timeout.orElse(Duration.ofMinutes(2)));
+ Comparator<Node> universalOrder = (a, b) -> {
+ Optional<ApplicationId> idA = applicationIdForLock(a);
+ Optional<ApplicationId> idB = applicationIdForLock(b);
+ if (idA.isPresent() != idB.isPresent()) return idA.isPresent() ? -1 : 1; // Allocated nodes first.
+ if (a.type() != b.type()) return a.type().compareTo(b.type()); // Tenant nodes first among those.
+ if ( ! idA.equals(idB)) return idA.get().compareTo(idB.get()); // Sort primarily by tenant owner id.
+ return a.hostname().compareTo(b.hostname()); // Sort secondarily by hostname.
+ };
+ NavigableSet<NodeMutex> locked = new TreeSet<>(comparing(NodeMutex::node, universalOrder));
+ NavigableSet<Node> unlocked = new TreeSet<>(universalOrder);
+ unlocked.addAll(nodes);
+ try {
+ int attempts = 10; // We'll accept getting the wrong lock at most this many times before giving up.
+ for (int attempt = 0; attempt < attempts; ) {
+ if (unlocked.isEmpty()) {
+ NodeMutexes mutexes = new NodeMutexes(List.copyOf(locked));
+ locked.clear();
+ return mutexes;
+ }
+
+ // If the first node is now earlier in lock order than some other locks we have, we need to close those and re-acquire them.
+ Node next = unlocked.pollFirst();
+ Set<NodeMutex> outOfOrder = locked.tailSet(new NodeMutex(next, () -> { }), false);
+ NodeMutexes.close(outOfOrder.iterator());
+ for (NodeMutex node : outOfOrder) unlocked.add(node.node());
+ outOfOrder.clear();
+
+ Mutex lock = lock(next, budget.timeLeftOrThrow());
+ try {
+ Optional<Node> fresh = node(next.hostname());
+ if (fresh.isEmpty()) {
+ if (required) throw new NoSuchNodeException("No node with hostname '" + next.hostname() + "'");
+ continue; // Node is gone; skip to close lock.
+ }
+
+ if (applicationIdForLock(fresh.get()).equals(applicationIdForLock(next))) {
+ // We held the right lock, so this node is ours now.
+ locked.add(new NodeMutex(fresh.get(), lock));
+ lock = null;
+ }
+ else {
+ // We held the wrong lock, and need to try again.
+ ++attempt;
+ unlocked.add(fresh.get());
+ }
+ }
+ finally {
+ // If we didn't hold the right lock, we must close the wrong one before we continue.
+ if (lock != null) lock.close();
+ }
+ }
+ throw new IllegalStateException("giving up (after " + attempts + " extra attempts) to lock nodes: " +
+ nodes.stream().map(Node::hostname).collect(joining(", ")));
+ }
+ finally {
+ // If we didn't manage to lock all nodes, we must close the ones we did lock before we throw.
+ NodeMutexes.close(locked.iterator());
+ }
+ }
+
+ /** A node with their locks, acquired in a universal order. */
+ public record NodeMutexes(List<NodeMutex> nodes) implements AutoCloseable {
+ @Override public void close() { close(nodes.iterator()); }
+ private static void close(Iterator<NodeMutex> nodes) {
+ if (nodes.hasNext()) try (NodeMutex node = nodes.next()) { close(nodes); }
+ }
+ }
+
+ /** A parent node, all its children, their locks acquired in a universal order, and then the unallocated lock. */
+ public static class RecursiveNodeMutexes implements AutoCloseable {
+
+ private final String hostname;
+ private final NodeMutexes nodes;
+ private final Mutex unallocatedLock;
+ private final List<NodeMutex> children;
+ private final Optional<NodeMutex> parent;
+
+ public RecursiveNodeMutexes(String hostname, NodeMutexes nodes, Mutex unallocatedLock) {
+ this.hostname = hostname;
+ this.nodes = nodes;
+ this.unallocatedLock = unallocatedLock;
+ this.children = nodes.nodes().stream().filter(node -> ! node.node().hostname().equals(hostname)).toList();
+ this.parent = nodes.nodes().stream().filter(node -> node.node().hostname().equals(hostname)).findFirst();
+ }
+
+ /** Any children of the node. */
+ public List<NodeMutex> children() { return children; }
+ /** The node itself, or throws if the node was not found. */
+ public NodeMutex parent() { return parent.orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); }
+ /** Empty if the node was not found, or the node, and any children. */
+ public NodeMutexes nodes() { return nodes; }
+ /** Closes the allocation lock, and all the node locks. */
+ @Override public void close() { try (nodes; unallocatedLock) { } }
+ }
+
/** Returns the application ID that should be used for locking when modifying this node */
private static Optional<ApplicationId> applicationIdForLock(Node node) {
return switch (node.type()) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index fc008b7b9dc..037338cb2ed 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -18,6 +18,7 @@ import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.curator.recipes.CuratorCounter;
import com.yahoo.vespa.curator.transaction.CuratorOperations;
import com.yahoo.vespa.curator.transaction.CuratorTransaction;
+import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.archive.ArchiveUris;
@@ -105,7 +106,7 @@ public class CuratorDb {
}
/** Adds a set of nodes. Rollbacks/fails transaction if any node is not in the expected state. */
- public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState, Agent agent, NestedTransaction transaction) {
+ public List<Node> addNodesInState(LockedNodeList nodes, Node.State expectedState, Agent agent, NestedTransaction transaction) {
CuratorTransaction curatorTransaction = db.newCuratorTransactionIn(transaction);
for (Node node : nodes) {
if (node.state() != expectedState)
@@ -116,10 +117,10 @@ public class CuratorDb {
curatorTransaction.add(CuratorOperations.create(nodePath(node).getAbsolute(), serialized));
}
transaction.onCommitted(() -> nodes.forEach(node -> log.log(Level.INFO, "Added " + node)));
- return nodes;
+ return nodes.asList();
}
- public List<Node> addNodesInState(List<Node> nodes, Node.State expectedState, Agent agent) {
+ public List<Node> addNodesInState(LockedNodeList nodes, Node.State expectedState, Agent agent) {
NestedTransaction transaction = new NestedTransaction();
List<Node> writtenNodes = addNodesInState(nodes, expectedState, agent, transaction);
transaction.commit();
@@ -175,6 +176,7 @@ public class CuratorDb {
return writtenNodes;
}
}
+
public Node writeTo(Node.State toState, Node node, Agent agent, Optional<String> reason) {
return writeTo(toState, Collections.singletonList(node), agent, reason).get(0);
}
@@ -192,6 +194,12 @@ public class CuratorDb {
*/
public List<Node> writeTo(Node.State toState, List<Node> nodes,
Agent agent, Optional<String> reason,
+ ApplicationTransaction transaction) {
+ return writeTo(toState, nodes, agent, reason, transaction.nested());
+ }
+
+ public List<Node> writeTo(Node.State toState, List<Node> nodes,
+ Agent agent, Optional<String> reason,
NestedTransaction transaction) {
if (nodes.isEmpty()) return nodes;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index caf936e8aeb..c25f33bc8c2 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -88,7 +88,7 @@ class Activator {
NodeList activeToRemove = oldActive.matching(node -> ! hostnames.contains(node.hostname()));
remove(activeToRemove, transaction); // TODO: Pass activation time in this call and next line
- nodeRepository.nodes().activate(newActive.asList(), transaction.nested()); // activate also continued active to update node state
+ nodeRepository.nodes().activate(newActive.asList(), transaction); // activate also continued active to update node state
rememberResourceChange(transaction, generation, activationTime,
oldActive.not().retired(),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
index 2bc5a0719d9..2e9cca21052 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java
@@ -5,12 +5,18 @@ import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provisioning.FlavorsConfig;
+import static com.yahoo.config.provision.Flavor.Type.BARE_METAL;
+import static com.yahoo.config.provision.Flavor.Type.DOCKER_CONTAINER;
import static com.yahoo.config.provision.NodeResources.Architecture;
+import static com.yahoo.config.provision.NodeResources.Architecture.arm64;
+import static com.yahoo.config.provision.NodeResources.Architecture.x86_64;
/**
* Simplifies creation of a node-repository config containing flavors.
* This is needed because the config builder API is inconvenient.
*
+ * Note: Flavors added will have fast disk and remote storage unless explicitly specified.
+ *
* @author bratseth
*/
public class FlavorConfigBuilder {
@@ -27,7 +33,7 @@ public class FlavorConfigBuilder {
double disk,
double bandwidth,
Flavor.Type type) {
- return addFlavor(flavorName, cpu, mem, disk, bandwidth, true, true, type, Architecture.x86_64, 0, 0);
+ return addFlavor(flavorName, cpu, mem, disk, bandwidth, true, true, type, x86_64, 0, 0);
}
public FlavorsConfig.Flavor.Builder addFlavor(String flavorName,
@@ -69,31 +75,20 @@ public class FlavorConfigBuilder {
/** Convenience method which creates a node flavors instance from a list of flavor names */
public static NodeFlavors createDummies(String... flavors) {
-
- FlavorConfigBuilder flavorConfigBuilder = new FlavorConfigBuilder();
+ FlavorConfigBuilder builder = new FlavorConfigBuilder();
for (String flavorName : flavors) {
- if (flavorName.equals("docker"))
- flavorConfigBuilder.addFlavor(flavorName, 1., 30., 20., 1.5, Flavor.Type.DOCKER_CONTAINER);
- else if (flavorName.equals("docker2"))
- flavorConfigBuilder.addFlavor(flavorName, 2., 40., 40., 0.5, Flavor.Type.DOCKER_CONTAINER);
- else if (flavorName.equals("host"))
- flavorConfigBuilder.addFlavor(flavorName, 7., 100., 120., 5, Flavor.Type.BARE_METAL);
- else if (flavorName.equals("host2"))
- flavorConfigBuilder.addFlavor(flavorName, 16, 24, 100, 1, Flavor.Type.BARE_METAL);
- else if (flavorName.equals("host3"))
- flavorConfigBuilder.addFlavor(flavorName, 24, 64, 100, 10, Flavor.Type.BARE_METAL);
- else if (flavorName.equals("host4"))
- flavorConfigBuilder.addFlavor(flavorName, 48, 128, 1000, 10, Flavor.Type.BARE_METAL);
- else if (flavorName.equals("devhost"))
- flavorConfigBuilder.addFlavor(flavorName, 4., 80., 100, 10, Flavor.Type.BARE_METAL);
- else if (flavorName.equals("arm64"))
- flavorConfigBuilder.addFlavor(flavorName,2., 30., 20., 3, Flavor.Type.BARE_METAL, Architecture.arm64);
- else if (flavorName.equals("gpu"))
- flavorConfigBuilder.addFlavor(flavorName,4, 16, 125, 10, true, false, Flavor.Type.BARE_METAL, Architecture.x86_64, 1, 16);
- else
- flavorConfigBuilder.addFlavor(flavorName, 1., 30., 20., 3, Flavor.Type.BARE_METAL);
+ switch (flavorName) {
+ case "docker" -> builder.addFlavor(flavorName, 1., 30., 20., 1.5, DOCKER_CONTAINER);
+ case "host" -> builder.addFlavor(flavorName, 7., 100., 120., 5, BARE_METAL);
+ case "host2" -> builder.addFlavor(flavorName, 16, 24, 100, 1, BARE_METAL);
+ case "host3" -> builder.addFlavor(flavorName, 24, 64, 100, 10, BARE_METAL);
+ case "host4" -> builder.addFlavor(flavorName, 48, 128, 1000, 10, BARE_METAL);
+ case "arm64" -> builder.addFlavor(flavorName, 2., 30., 20., 3, BARE_METAL, arm64);
+ case "gpu" -> builder.addFlavor(flavorName, 4, 16, 125, 10, true, false, BARE_METAL, x86_64, 1, 16);
+ default -> builder.addFlavor(flavorName, 1., 30., 20., 3, BARE_METAL);
+ }
}
- return new NodeFlavors(flavorConfigBuilder.build());
+ return new NodeFlavors(builder.build());
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index 397eb4d7af9..dd838375a59 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -7,7 +7,6 @@ import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.List;
-import java.util.Set;
import java.util.function.Consumer;
/**
@@ -46,12 +45,11 @@ public interface HostProvisioner {
* Continue provisioning of given list of Nodes.
*
* @param host the host to provision
- * @param children list of all the nodes that run on the given host
* @return IP config for the provisioned host and its children
* @throws FatalProvisioningException if the provisioning has irrecoverably failed and the input nodes
* should be deleted from node-repo.
*/
- HostIpConfig provision(Node host, Set<Node> children) throws FatalProvisioningException;
+ HostIpConfig provision(Node host) throws FatalProvisioningException;
/**
* Deprovisions a given host and resources associated with it and its children (such as DNS entries).
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index 3d5987cd04d..bc10a97068e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -97,15 +97,13 @@ public class MockHostProvisioner implements HostProvisioner {
}
@Override
- public HostIpConfig provision(Node host, Set<Node> children) throws FatalProvisioningException {
+ public HostIpConfig provision(Node host) throws FatalProvisioningException {
if (behaviour(Behaviour.failProvisioning)) throw new FatalProvisioningException("Failed to provision node(s)");
if (host.state() != Node.State.provisioned) throw new IllegalStateException("Host to provision must be in " + Node.State.provisioned);
Map<String, IP.Config> result = new HashMap<>();
result.put(host.hostname(), createIpConfig(host));
- for (var child : children) {
- if (child.state() != Node.State.reserved) throw new IllegalStateException("Child to provisioned must be in " + Node.State.reserved);
- result.put(child.hostname(), createIpConfig(child));
- }
+ host.ipConfig().pool().hostnames().forEach(hostname ->
+ result.put(hostname.value(), IP.Config.ofEmptyPool(nameResolver.resolveAll(hostname.value()))));
return new HostIpConfig(result);
}
@@ -199,8 +197,6 @@ public class MockHostProvisioner implements HostProvisioner {
return this;
}
- public Optional<Flavor> getHostFlavor(ClusterSpec.Type type) { return Optional.ofNullable(hostFlavors.get(type)); }
-
public MockHostProvisioner addEvent(HostEvent event) {
hostEvents.add(event);
return this;
@@ -230,18 +226,17 @@ public class MockHostProvisioner implements HostProvisioner {
}
public IP.Config createIpConfig(Node node) {
- if (!node.type().isHost()) {
- return node.ipConfig().withPrimary(nameResolver.resolveAll(node.hostname()));
- }
+ if (!node.type().isHost()) throw new IllegalArgumentException("Node " + node + " is not a host");
int hostIndex = Integer.parseInt(node.hostname().replaceAll("^[a-z]+|-\\d+$", ""));
Set<String> addresses = Set.of("::" + hostIndex + ":0");
Set<String> ipAddressPool = new HashSet<>();
if (!behaviour(Behaviour.failDnsUpdate)) {
nameResolver.addRecord(node.hostname(), addresses.iterator().next());
- for (int i = 1; i <= 2; i++) {
- String ip = "::" + hostIndex + ":" + i;
+ int i = 1;
+ for (HostName hostName : node.ipConfig().pool().hostnames()) {
+ String ip = "::" + hostIndex + ":" + i++;
ipAddressPool.add(ip);
- nameResolver.addRecord(node.hostname() + "-" + i, ip);
+ nameResolver.addRecord(hostName.value(), ip);
}
}
IP.Pool pool = node.ipConfig().pool().withIpAddresses(ipAddressPool);
@@ -250,7 +245,7 @@ public class MockHostProvisioner implements HostProvisioner {
public enum Behaviour {
- /** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node, java.util.Set)} */
+ /** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node)} */
failProvisioning,
/** Fail call to {@link MockHostProvisioner#provisionHosts(HostProvisionRequest, Consumer)} */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNameResolver.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNameResolver.java
index 94cb05d20cc..722dc5ef96c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNameResolver.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNameResolver.java
@@ -6,7 +6,6 @@ import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
import java.net.UnknownHostException;
import java.util.Arrays;
-import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index b7d6e0a9dd9..714374ccb8a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -176,7 +176,7 @@ public class MockNodeRepository extends NodeRepository {
.build());
// Ready all nodes, except 7 and 55
- nodes = nodes().addNodes(nodes, Agent.system);
+ nodes = new ArrayList<>(nodes().addNodes(nodes, Agent.system));
nodes.remove(node7);
nodes.remove(node55);
nodes = nodes().deallocate(nodes, Agent.system, getClass().getSimpleName());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockProvisioner.java
index 5a9da1e1c3f..d8ad892e210 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockProvisioner.java
@@ -18,6 +18,7 @@ import java.util.List;
/**
* @author freva
*/
+@SuppressWarnings("unused") // Injected in container from test code (services.xml)
public class MockProvisioner implements Provisioner {
@Override