aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java
diff options
context:
space:
mode:
authorHåkon Hallingstad <hakon@yahooinc.com>2022-07-11 11:29:09 +0200
committerHåkon Hallingstad <hakon@yahooinc.com>2022-07-11 11:29:09 +0200
commitdac838f1fa390d59c000fdc91b54fc7218599c08 (patch)
treef189888d08401ed24734efe4caab8e0bab34d6ce /node-repository/src/main/java
parentc6c08db4fc7579fed53f7920e7966ede47c97e7c (diff)
Revert "Avoid the host lock while failing the children"
This reverts commit 2cdaef56e18ace2ee2269d28f959f5a534bd68ee.
Diffstat (limited to 'node-repository/src/main/java')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java100
1 files changed, 46 insertions, 54 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index f9b3dae72c5..3c5b20da4d0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -12,7 +12,6 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
@@ -20,11 +19,9 @@ import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.time.Instant;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
-import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.logging.Level;
@@ -109,6 +106,26 @@ public class NodeFailer extends NodeRepositoryMaintainer {
failActive(failing);
}
+ // Active hosts
+ NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
+ for (Node host : activeNodes.hosts().failing()) {
+ if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
+ Optional<NodeMutex> locked = Optional.empty();
+ try {
+ attempts++;
+ locked = nodeRepository().nodes().lockAndGet(host);
+ if (locked.isEmpty()) continue;
+ nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
+ "Host should be failed and have no tenant nodes");
+ }
+ catch (Exception e) {
+ failures++;
+ }
+ finally {
+ locked.ifPresent(NodeMutex::close);
+ }
+ }
+
int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures);
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
@@ -136,9 +153,6 @@ public class NodeFailer extends NodeRepositoryMaintainer {
Set<FailingNode> failingNodes = new HashSet<>();
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
- for (Node host : activeNodes.hosts().failing())
- failingNodes.add(new FailingNode(host, "Host should be failed and have no tenant nodes"));
-
for (Node node : activeNodes) {
Instant graceTimeStart = clock().instant().minus(nodeRepository().nodes().suspended(node) ? suspendedDownTimeLimit : downTimeLimit);
if (node.isDown() && node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) {
@@ -227,63 +241,42 @@ public class NodeFailer extends NodeRepositoryMaintainer {
deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30));
if (deployment.isEmpty()) return false;
- // If the active node that we are trying to fail is of type host, we need to successfully fail all
- // the children nodes running on it before we fail the host. Failing a child node in a dynamically
- // provisioned zone may require provisioning new hosts that require the host application lock to be held,
- // so we must release ours before failing the children.
- List<FailingNode> activeChildrenToFail = new ArrayList<>();
- try (NodeMutex lock = nodeRepository().nodes().lockAndGetRequired(failing.node())) {
- // Now that we have gotten the node object under the proper lock, sanity-check it still makes sense to fail
- if (!Objects.equals(failing.node().allocation().map(Allocation::owner), lock.node().allocation().map(Allocation::owner)))
- return false;
- if (lock.node().state() == Node.State.failed)
- return true;
- if (!Objects.equals(failing.node().state(), lock.node().state()))
- return false;
- failing = new FailingNode(lock.node(), failing.reason);
-
+ try (Mutex lock = nodeRepository().nodes().lock(failing.node().allocation().get().owner())) {
+ // If the active node that we are trying to fail is of type host, we need to successfully fail all
+ // the children nodes running on it before we fail the host
+ boolean allTenantNodesFailedOutSuccessfully = true;
String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason();
for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) {
if (failingTenantNode.state() == Node.State.active) {
- activeChildrenToFail.add(new FailingNode(failingTenantNode, reasonForChildFailure));
- } else if (failingTenantNode.state() != Node.State.failed) {
+ allTenantNodesFailedOutSuccessfully &= failActive(new FailingNode(failingTenantNode, reasonForChildFailure));
+ } else {
nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure);
}
}
- // A parent with children gets wantToFail to avoid getting more nodes allocated to it.
+ if (! allTenantNodesFailedOutSuccessfully) return false;
wantToFail(failing.node(), true, lock);
-
- if (activeChildrenToFail.isEmpty()) {
- try {
- deployment.get().activate();
- return true;
- } catch (TransientException e) {
- log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
- " with a transient error, will be retried by application maintainer: " +
- Exceptions.toMessageString(e));
- return true;
- } catch (RuntimeException e) {
- // Reset want to fail: We'll retry failing unless it heals in the meantime
- nodeRepository().nodes().node(failing.node().hostname())
- .ifPresent(n -> wantToFail(n, false, lock));
- log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
- " for " + failing.reason() + ": " + Exceptions.toMessageString(e));
- return false;
- }
+ try {
+ deployment.get().activate();
+ return true;
+ } catch (TransientException e) {
+ log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() +
+ " with a transient error, will be retried by application maintainer: " +
+ Exceptions.toMessageString(e));
+ return true;
+ } catch (RuntimeException e) {
+ // Reset want to fail: We'll retry failing unless it heals in the meantime
+ nodeRepository().nodes().node(failing.node().hostname())
+ .ifPresent(n -> wantToFail(n, false, lock));
+ log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() +
+ " for " + failing.reason() + ": " + Exceptions.toMessageString(e));
+ return false;
}
}
-
- // In a dynamically provisioned zone the failing of an active child node takes ~10 minutes,
- // so perhaps this should be done in parallel.
- activeChildrenToFail.forEach(this::failActive);
-
- return false;
}
private void wantToFail(Node node, boolean wantToFail, Mutex lock) {
- if (!node.status().wantToFail())
- nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
+ nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock);
}
/** Returns true if node failing should be throttled */
@@ -291,10 +284,9 @@ public class NodeFailer extends NodeRepositoryMaintainer {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
NodeList allNodes = nodeRepository().nodes().list();
- NodeList recentlyFailedNodes = allNodes
- .matching(n -> n.status().wantToFail() ||
- (n.state() == Node.State.failed &&
- n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)));
+ NodeList recentlyFailedNodes = allNodes.state(Node.State.failed)
+ .matching(n -> n.history().hasEventAfter(History.Event.Type.failed,
+ startOfThrottleWindow));
// Allow failing any node within policy
if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false;