aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
blob: a74671d4e2695ae3087bdfc1e31bdbef058c88ab (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;

import com.google.inject.Inject;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.Zone;
import java.util.logging.Level;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.filter.ApplicationFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeHostFilter;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 * Implementation of the host provisioner API for hosted Vespa, using the node repository to allocate nodes.
 * Does not allocate hosts for the routing application, see VespaModelFactory.createHostProvisioner
 *
 * @author bratseth
 */
public class NodeRepositoryProvisioner implements Provisioner {

    private static final Logger log = Logger.getLogger(NodeRepositoryProvisioner.class.getName());
    private static final int SPARE_CAPACITY_PROD = 0;
    private static final int SPARE_CAPACITY_NONPROD = 0;

    private final NodeRepository nodeRepository;
    private final CapacityPolicies capacityPolicies;
    private final Zone zone;
    private final Preparer preparer;
    private final Activator activator;
    private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;

    int getSpareCapacityProd() {
        return SPARE_CAPACITY_PROD;
    }

    @Inject
    public NodeRepositoryProvisioner(NodeRepository nodeRepository, Zone zone,
                                     ProvisionServiceProvider provisionServiceProvider, FlagSource flagSource) {
        this.nodeRepository = nodeRepository;
        this.capacityPolicies = new CapacityPolicies(zone);
        this.zone = zone;
        this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
        this.preparer = new Preparer(nodeRepository,
                                     zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
                                     provisionServiceProvider.getHostProvisioner(),
                                     provisionServiceProvider.getHostResourcesCalculator(),
                                     flagSource,
                                     loadBalancerProvisioner);
        this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
    }

    /**
     * Returns a list of nodes in the prepared or active state, matching the given constraints.
     * The nodes are ordered by increasing index number.
     */
    @Override
    public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested,
                                  ProvisionLogger logger) {
        log.log(zone.system().isCd() ? Level.INFO : LogLevel.DEBUG,
                () -> "Received deploy prepare request for " + requested +
                      " for application " + application + ", cluster " + cluster);

        if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");

        if ( ! hasQuota(application, requested.maxResources().nodes()))
            throw new IllegalArgumentException(requested + " requested for " + cluster +
                                               ". Max value exceeds your quota. Resolve this at https://cloud.vespa.ai/quota");

        int groups;
        NodeResources resources;
        NodeSpec nodeSpec;
        if ( requested.type() == NodeType.tenant) {
            ClusterResources target = decideTargetResources(application, cluster.id(), requested);
            int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application);
            resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster);
            boolean exclusive = capacityPolicies.decideExclusivity(cluster.isExclusive());
            groups = Math.min(target.groups(), nodeCount); // cannot have more groups than nodes
            nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail());
            logIfDownscaled(target.nodes(), nodeCount, cluster, logger);
        }
        else {
            groups = 1; // type request with multiple groups is not supported
            resources = requested.minResources().nodeResources();
            nodeSpec = NodeSpec.from(requested.type());
        }
        return asSortedHosts(preparer.prepare(application, cluster, nodeSpec, groups), resources);
    }

    @Override
    public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
        validate(hosts);
        activator.activate(application, hosts, transaction);
    }

    @Override
    public void restart(ApplicationId application, HostFilter filter) {
        nodeRepository.restart(ApplicationFilter.from(application, NodeHostFilter.from(filter)));
    }

    @Override
    public void remove(NestedTransaction transaction, ApplicationId application) {
        nodeRepository.deactivate(application, transaction);
        loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.deactivate(application, transaction));
    }

    /**
     * Returns the target cluster resources, a value between the min and max in the requested capacity,
     * and updates the application store with the received min and max.
     */
    private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity requested) {
        try (Mutex lock = nodeRepository.lock(applicationId)) {
            Application application = nodeRepository.applications().get(applicationId).orElse(new Application(applicationId));
            application = application.withClusterLimits(clusterId, requested.minResources(), requested.maxResources());
            nodeRepository.applications().put(application, lock);
            return application.clusters().get(clusterId).targetResources()
                    .orElseGet(() -> currentResources(applicationId, clusterId, requested)
                    .orElse(requested.minResources()));
        }
    }

    /** Returns the current resources of this cluster, if it's already deployed and inside the requested limits */
    private Optional<ClusterResources> currentResources(ApplicationId applicationId,
                                                        ClusterSpec.Id clusterId,
                                                        Capacity requested) {
        List<Node> nodes = NodeList.copyOf(nodeRepository.getNodes(applicationId, Node.State.active))
                                   .cluster(clusterId)
                                   .not().retired()
                                   .not().removable()
                                   .asList();
        if (nodes.isEmpty()) return Optional.empty();
        long groups = nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();

        // To allow non-numeric settings to be updated without resetting to the min target, we need to use
        // the non-numeric settings of the current min limit with the current numeric settings
        NodeResources nodeResources = nodes.get(0).allocation().get().requestedResources()
                                           .with(requested.minResources().nodeResources().diskSpeed())
                                           .with(requested.minResources().nodeResources().storageType());
        var currentResources = new ClusterResources(nodes.size(), (int)groups, nodeResources);
        if ( ! currentResources.isWithin(requested.minResources(), requested.maxResources())) return Optional.empty();

        return Optional.of(currentResources);
    }

    private void logIfDownscaled(int targetNodes, int actualNodes, ClusterSpec cluster, ProvisionLogger logger) {
        if (zone.environment().isManuallyDeployed() && actualNodes < targetNodes)
            logger.log(Level.INFO, "Requested " + targetNodes + " nodes for " + cluster +
                                   ", downscaling to " + actualNodes + " nodes in " + zone.environment());
    }

    private boolean hasQuota(ApplicationId application, int requestedNodes) {
        if ( ! this.zone.system().isPublic()) return true; // no quota management

        if (application.tenant().value().hashCode() == 3857)        return requestedNodes <= 60;
        if (application.tenant().value().hashCode() == -1271827001) return requestedNodes <= 75;
        return requestedNodes <= 5;
    }

    private List<HostSpec> asSortedHosts(List<Node> nodes, NodeResources requestedResources) {
        nodes.sort(Comparator.comparingInt(node -> node.allocation().get().membership().index()));
        List<HostSpec> hosts = new ArrayList<>(nodes.size());
        for (Node node : nodes) {
            log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " - " + node.flavor());
            Allocation nodeAllocation = node.allocation().orElseThrow(IllegalStateException::new);
            hosts.add(new HostSpec(node.hostname(),
                                   List.of(),
                                   Optional.of(node.flavor()),
                                   Optional.of(nodeAllocation.membership()),
                                   node.status().vespaVersion(),
                                   nodeAllocation.networkPorts(),
                                   requestedResources == NodeResources.unspecified ? Optional.empty() : Optional.of(requestedResources),
                                   node.status().dockerImage()));
            if (nodeAllocation.networkPorts().isPresent()) {
                log.log(LogLevel.DEBUG, () -> "Prepared node " + node.hostname() + " has port allocations");
            }
        }
        return hosts;
    }

    private void validate(Collection<HostSpec> hosts) {
        for (HostSpec host : hosts) {
            if (host.membership().isEmpty())
                throw new IllegalArgumentException("Hosts must be assigned a cluster when activating, but got " + host);
            if (host.membership().get().cluster().group().isEmpty())
                throw new IllegalArgumentException("Hosts must be assigned a group when activating, but got " + host);
        }
    }

}