summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
blob: f194d9d53fdfea4add72266601582632e4937d2f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;

import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Deployment;
import com.yahoo.config.provision.HostLivenessTracker;
import com.yahoo.config.provision.NodeType;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.ServiceCluster;
import com.yahoo.vespa.applicationmodel.ServiceInstance;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Flavor;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
import com.yahoo.vespa.service.monitor.ServiceMonitorStatus;

import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;

/**
 * Maintains information in the node repo about when this node last responded to ping
 * and fails nodes which have not responded within the given time limit.
 *
 * @author bratseth
 */
public class NodeFailer extends Maintainer {

    private static final Logger log = Logger.getLogger(NodeFailer.class.getName());

    /** Provides information about the status of ready hosts */
    private final HostLivenessTracker hostLivenessTracker;
    
    /** Provides (more accurate) information about the status of active hosts */
    private final ServiceMonitor serviceMonitor;

    private final Deployer deployer;
    private final Duration downTimeLimit;
    private final Clock clock;
    private final Orchestrator orchestrator;
    
    private final Duration nodeRequestInterval = Duration.ofMinutes(10);
    private final Instant constructionTime;

    public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, 
                      ServiceMonitor serviceMonitor, NodeRepository nodeRepository,
                      Duration downTimeLimit, Clock clock, Orchestrator orchestrator) {
        // check ping status every five minutes, but at least twice as often as the down time limit
        super(nodeRepository, min(downTimeLimit.dividedBy(2), Duration.ofMinutes(5)));
        this.deployer = deployer;
        this.hostLivenessTracker = hostLivenessTracker;
        this.serviceMonitor = serviceMonitor;
        this.downTimeLimit = downTimeLimit;
        this.clock = clock;
        this.orchestrator = orchestrator;
        constructionTime = clock.instant();
    }

    private static Duration min(Duration d1, Duration d2) {
        return d1.toMillis() < d2.toMillis() ? d1 : d2;
    }

    @Override
    protected void maintain() {
        // Ready nodes
        updateNodeLivenessEventsForReadyNodes();
        for (Node node : readyNodesWhichAreDead( )) {
            // ready docker containers do not run Vespa, so skip those
            if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) continue;
            nodeRepository().fail(node.hostname());
        }
        for (Node node : readyNodesWithHardwareFailure())
            nodeRepository().fail(node.hostname());

        // Active nodes
        for (Node node : determineActiveNodeDownStatus()) {
            Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit);
            if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type()))
                failActive(node);
        }
    }
    
    private void updateNodeLivenessEventsForReadyNodes() {
        // Update node last request events through ZooKeeper to collect request to all config servers.
        // We do this here ("lazily") to avoid writing to zk for each config request.
        try (Mutex lock = nodeRepository().lockUnallocated()) {
            for (Node node : nodeRepository().getNodes(Node.State.ready)) {
                Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
                if ( ! lastLocalRequest.isPresent()) continue;

                Optional<History.Event> recordedRequest = node.history().event(History.Event.Type.requested);
                if ( ! recordedRequest.isPresent() || recordedRequest.get().at().isBefore(lastLocalRequest.get())) {
                    History updatedHistory = node.history().with(new History.Event(History.Event.Type.requested,
                                                                                   lastLocalRequest.get()));
                    nodeRepository().write(node.with(updatedHistory));
                }
            }
        }
    }

    private List<Node> readyNodesWhichAreDead() {    
        // Allow requests some time to be registered in case all config servers have been down
        if (constructionTime.isAfter(clock.instant().minus(nodeRequestInterval).minus(nodeRequestInterval) ))
            return Collections.emptyList();
        
        // Nodes are taken as dead if they have not made a config request since this instant.
        // Add 10 minutes to the down time limit to allow nodes to make a request that infrequently.
        Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval);
        
        return nodeRepository().getNodes(Node.State.ready).stream()
                .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node))
                .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node))
                .collect(Collectors.toList());
    }

    private boolean wasMadeReadyBefore(Instant instant, Node node) {
        Optional<History.Event> readiedEvent = node.history().event(History.Event.Type.readied);
        if ( ! readiedEvent.isPresent()) return false;
        return readiedEvent.get().at().isBefore(instant);
    }

    private boolean hasRecordedRequestAfter(Instant instant, Node node) {
        Optional<History.Event> lastRequest = node.history().event(History.Event.Type.requested);
        if ( ! lastRequest.isPresent()) return false;
        return lastRequest.get().at().isAfter(instant);
    }

    private List<Node> readyNodesWithHardwareFailure() {
        return nodeRepository().getNodes(Node.State.ready).stream()
                .filter(node -> node.status().hardwareFailure().isPresent())
                .collect(Collectors.toList());
    }

    private boolean applicationSuspended(Node node) {
        try {
            return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner())
                   == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN;
        } catch (ApplicationIdNotFoundException e) {
            //Treat it as not suspended and allow to fail the node anyway
            return false;
        }
    }

    /**
     * We can attempt to fail any number of *tenant* nodes because the operation will not be effected unless
     * the node is replaced.
     * However, nodes of other types are not replaced (because all of the type are used by a single application),
     * so we only allow one to be in failed at any point in time to protect against runaway failing.
     */
    private boolean failAllowedFor(NodeType nodeType) {
        if (nodeType == NodeType.tenant) return true;
        return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
    }
    
    /**
     * If the node is positively DOWN, and there is no "down" history record, we add it.
     * If the node is positively UP we remove any "down" history record.
     *
     * @return a list of all nodes which are positively currently in the down state
     */
    private List<Node> determineActiveNodeDownStatus() {
        List<Node> downNodes = new ArrayList<>();
        for (ApplicationInstance<ServiceMonitorStatus> application : serviceMonitor.queryStatusOfAllApplicationInstances().values()) {
            for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) {
                for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) {
                    Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active);
                    if ( ! node.isPresent()) continue; // we also get status from infrastructure nodes, which are not in the repo. TODO: remove when proxy nodes are in node repo everywhere

                    if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN))
                        downNodes.add(recordAsDown(node.get()));
                    else if (service.serviceStatus().equals(ServiceMonitorStatus.UP))
                        clearDownRecord(node.get());
                    // else: we don't know current status; don't take any action until we have positive information
                }
            }
        }
        return downNodes;
    }

    /**
     * Record a node as down if not already recorded and returns the node in the new state.
     * This assumes the node is found in the node
     * repo and that the node is allocated. If we get here otherwise something is truly odd.
     */
    private Node recordAsDown(Node node) {
        if (node.history().event(History.Event.Type.down).isPresent()) return node; // already down: Don't change down timestamp

        try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
            node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); // re-get inside lock
            return nodeRepository().write(node.downAt(clock.instant()));
        }
    }

    private void clearDownRecord(Node node) {
        if ( ! node.history().event(History.Event.Type.down).isPresent()) return;

        try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
            node = nodeRepository().getNode(node.hostname(), Node.State.active).get(); // re-get inside lock
            nodeRepository().write(node.up());
        }
    }

    /**
     * Called when a node should be moved to the failed state: Do that if it seems safe,
     * which is when the node repo has available capacity to replace the node.
     * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken.
     */
    private void failActive(Node node) {
        Optional<Deployment> deployment =
            deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30));
        if ( ! deployment.isPresent()) return; // this will be done at another config server

        try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) {
            node = nodeRepository().fail(node.hostname());
            try {
                deployment.get().prepare();
                deployment.get().activate();
            }
            catch (RuntimeException e) {
                // The expected reason for deployment to fail here is that there is no capacity available to redeploy.
                // In that case we should leave the node in the active state to avoid failing additional nodes.
                nodeRepository().reactivate(node.hostname());
                log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() +
                                       ", but redeploying without the node failed", e);
            }
        }
    }

    @Override
    public String toString() { return "Node failer"; }

}