summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
blob: e506216f13dcd7ea502e80a923237c50c91333ee (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;

import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodesAndHosts;
import com.yahoo.vespa.hosted.provision.node.Nodes;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;

import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;

/**
 * Builds up data structures necessary for node prioritization. It wraps each node
 * up in a {@link NodeCandidate} object with attributes used in sorting.
 *
 * The prioritization logic is implemented by {@link NodeCandidate}.
 *
 * @author smorgrav
 */
public class NodePrioritizer {

    private final List<NodeCandidate> nodes = new ArrayList<>();
    private final NodesAndHosts<LockedNodeList> allNodes;
    private final HostCapacity capacity;
    private final NodeSpec requestedNodes;
    private final ApplicationId application;
    private final ClusterSpec clusterSpec;
    private final NameResolver nameResolver;
    private final boolean dynamicProvisioning;
    /** Whether node specification allows new nodes to be allocated. */
    private final boolean canAllocateNew;
    private final boolean canAllocateToSpareHosts;
    private final boolean topologyChange;
    private final int currentClusterSize;
    private final Set<Node> spareHosts;

    public NodePrioritizer(LockedNodeList allLockedNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
                           int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver,
                           HostResourcesCalculator hostResourcesCalculator, int spareCount) {
        this.allNodes = NodesAndHosts.create(allLockedNodes);
        this.capacity = new HostCapacity(allNodes, hostResourcesCalculator);
        this.requestedNodes = nodeSpec;
        this.clusterSpec = clusterSpec;
        this.application = application;
        this.dynamicProvisioning = dynamicProvisioning;
        this.spareHosts = dynamicProvisioning ?
                capacity.findSpareHostsInDynamicallyProvisionedZones(allNodes.nodes().asList()) :
                capacity.findSpareHosts(allNodes.nodes().asList(), spareCount);
        this.nameResolver = nameResolver;

        NodeList nodesInCluster = allNodes.nodes().owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
        NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
        long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream()
                .flatMap(node -> node.allocation()
                        .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index))
                        .stream())
                .distinct()
                .count();
        this.topologyChange = currentGroups != wantedGroups;

        this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream()
                .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group()))
                .filter(clusterSpec.group()::equals)
                .count();

        // In dynamically provisioned zones, we can always take spare hosts since we can provision new on-demand,
        // NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
        // In non-dynamically provisioned zones, we only allow allocating to spare hosts to replace failed nodes.
        this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
        // Do not allocate new nodes for exclusive deployments in dynamically provisioned zones: provision new host instead.
        this.canAllocateNew = requestedNodes instanceof NodeSpec.CountNodeSpec
                              && (!dynamicProvisioning || !requestedNodes.isExclusive());
    }

    /** Collects all node candidates for this application and returns them in the most-to-least preferred order */
    public List<NodeCandidate> collect(List<Node> surplusActiveNodes) {
        addApplicationNodes();
        addSurplusNodes(surplusActiveNodes);
        addReadyNodes();
        addCandidatesOnExistingHosts();
        return prioritize();
    }

    /** Returns the list of nodes sorted by {@link NodeCandidate#compareTo(NodeCandidate)} */
    private List<NodeCandidate> prioritize() {
        // Group candidates by their switch hostname
        Map<String, List<NodeCandidate>> candidatesBySwitch = this.nodes.stream()
                .collect(Collectors.groupingBy(candidate -> candidate.parent.orElseGet(candidate::toNode)
                                                                            .switchHostname()
                                                                            .orElse("")));
        // Mark lower priority nodes on shared switch as non-exclusive
        List<NodeCandidate> nodes = new ArrayList<>(this.nodes.size());
        for (var clusterSwitch : candidatesBySwitch.keySet()) {
            List<NodeCandidate> switchCandidates = candidatesBySwitch.get(clusterSwitch);
            if (clusterSwitch.isEmpty()) {
                nodes.addAll(switchCandidates); // Nodes are on exclusive switch by default
            } else {
                Collections.sort(switchCandidates);
                NodeCandidate bestNode = switchCandidates.get(0);
                nodes.add(bestNode);
                for (var node : switchCandidates.subList(1, switchCandidates.size())) {
                    nodes.add(node.withExclusiveSwitch(false));
                }
            }
        }
        Collections.sort(nodes);
        return nodes;
    }

    /**
     * Add nodes that have been previously reserved to the same application from
     * an earlier downsizing of a cluster
     */
    private void addSurplusNodes(List<Node> surplusNodes) {
        for (Node node : surplusNodes) {
            NodeCandidate candidate = candidateFrom(node, true);
            if (!candidate.violatesSpares || canAllocateToSpareHosts) {
                nodes.add(candidate);
            }
        }
    }

    /** Add a node on each host with enough capacity for the requested flavor  */
    private void addCandidatesOnExistingHosts() {
        if ( !canAllocateNew) return;

        for (Node host : allNodes.nodes()) {
            if ( ! Nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
            if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
            if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
            if (host.exclusiveToApplicationId().isPresent()) continue; // Never allocate new nodes to exclusive hosts
            if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
            if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
            if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
            if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
            nodes.add(NodeCandidate.createNewChild(requestedNodes.resources().get(),
                                                   capacity.availableCapacityOf(host),
                                                   host,
                                                   spareHosts.contains(host),
                                                   allNodes.nodes(),
                                                   nameResolver));
        }
    }

    /** Add existing nodes allocated to the application */
    private void addApplicationNodes() {
        EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved);
        allNodes.nodes().stream()
                .filter(node -> node.type() == requestedNodes.type())
                .filter(node -> legalStates.contains(node.state()))
                .filter(node -> node.allocation().isPresent())
                .filter(node -> node.allocation().get().owner().equals(application))
                .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
                .filter(node -> node.state() == Node.State.active || canStillAllocate(node))
                .map(node -> candidateFrom(node, false))
                .forEach(nodes::add);
    }

    /** Add nodes already provisioned, but not allocated to any application */
    private void addReadyNodes() {
        allNodes.nodes().stream()
                .filter(node -> node.type() == requestedNodes.type())
                .filter(node -> node.state() == Node.State.ready)
                .map(node -> candidateFrom(node, false))
                .filter(n -> !n.violatesSpares || canAllocateToSpareHosts)
                .forEach(nodes::add);
    }

    /** Create a candidate from given pre-existing node */
    private NodeCandidate candidateFrom(Node node, boolean isSurplus) {
        Optional<Node> optionalParent = allNodes.parentOf(node);
        if (optionalParent.isPresent()) {
            Node parent = optionalParent.get();
            return NodeCandidate.createChild(node,
                                             capacity.availableCapacityOf(parent),
                                             parent,
                                             spareHosts.contains(parent),
                                             isSurplus,
                                             false,
                                             parent.exclusiveToApplicationId().isEmpty()
                                             && requestedNodes.canResize(node.resources(),
                                                                         capacity.availableCapacityOf(parent),
                                                                         topologyChange,
                                                                         currentClusterSize));
        } else {
            return NodeCandidate.createStandalone(node, isSurplus, false);
        }
    }

    /** Returns whether we are allocating to replace a failed node */
    private boolean isReplacement(NodeList nodesInCluster, Optional<ClusterSpec.Group> group) {
        NodeList nodesInGroup = group.map(ClusterSpec.Group::index)
                                     .map(nodesInCluster::group)
                                     .orElse(nodesInCluster);
        int failedNodesInGroup = nodesInGroup.failing().size() + nodesInGroup.state(Node.State.failed).size();
        if (failedNodesInGroup == 0) return false;
        return ! requestedNodes.fulfilledBy(nodesInGroup.size() - failedNodesInGroup);
    }

    /**
     * We may regret that a non-active node is allocated to a host and not offer it to the application
     * now, e.g if we want to retire the host.
     *
     * @return true if we still want to allocate the given node to its parent
     */
    private boolean canStillAllocate(Node node) {
        if (node.type() != NodeType.tenant || node.parentHostname().isEmpty()) return true;
        Optional<Node> parent = allNodes.parentOf(node);
        return parent.isPresent() ? Nodes.canAllocateTenantNodeTo(parent.get(), dynamicProvisioning) : null;
    }

}