summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
blob: 69c03dbf6dcafffa9c7eb3c1f664cddc6c1a6a8a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;

import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.Environment;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
import com.yahoo.vespa.hosted.provision.node.History;
import java.time.Duration;
import java.time.Instant;
import java.util.Map;
import java.util.Optional;

/**
 * Maintainer making automatic scaling decisions
 *
 * @author bratseth
 */
public class AutoscalingMaintainer extends NodeRepositoryMaintainer {

    private final Autoscaler autoscaler;
    private final Deployer deployer;
    private final Metric metric;

    public AutoscalingMaintainer(NodeRepository nodeRepository,
                                 Deployer deployer,
                                 Metric metric,
                                 Duration interval) {
        super(nodeRepository, interval, metric);
        this.autoscaler = new Autoscaler(nodeRepository);
        this.deployer = deployer;
        this.metric = metric;
    }

    @Override
    protected double maintain() {
        if ( ! nodeRepository().nodes().isWorking()) return 0.0;
        if (nodeRepository().zone().environment().isTest()) return 1.0;

        int attempts = 0;
        int failures = 0;
        for (var applicationNodes : activeNodesByApplication().entrySet()) {
            for (var clusterNodes : nodesByCluster(applicationNodes.getValue()).entrySet()) {
                attempts++;
                if ( ! autoscale(applicationNodes.getKey(), clusterNodes.getKey()))
                    failures++;
            }
        }
        return asSuccessFactor(attempts, failures);
    }

    /**
     * Autoscales the given cluster.
     *
     * @return true if an autoscaling decision was made or nothing should be done, false if there was an error
     */
    private boolean autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId) {
        try (var lock = nodeRepository().applications().lock(applicationId)) {
            Optional<Application> application = nodeRepository().applications().get(applicationId);
            if (application.isEmpty()) return true;
            if (application.get().cluster(clusterId).isEmpty()) return true;
            Cluster cluster = application.get().cluster(clusterId).get();

            NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId);
            cluster = updateCompletion(cluster, clusterNodes);

            var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();

            // Autoscale unless an autoscaling is already in progress
            Autoscaling autoscaling = null;
            if (cluster.target().resources().isEmpty() || current.equals(cluster.target().resources().get())) {
                autoscaling = autoscaler.autoscale(application.get(), cluster, clusterNodes);
                if ( autoscaling.isPresent() || cluster.target().isEmpty()) // Ignore empty from recently started servers
                    cluster = cluster.withTarget(autoscaling);
            }

            // Always store updates
            applications().put(application.get().with(cluster), lock);

            // Attempt to perform the autoscaling immediately, and log it regardless
            if (autoscaling != null && autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) {
                try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) {
                    if (deployment.isValid())
                        deployment.activate();
                    logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired());
                }
            }
            return true;
        }
        catch (ApplicationLockException e) {
            return false;
        }
        catch (IllegalArgumentException e) {
            throw new IllegalArgumentException("Illegal arguments for " + applicationId + " cluster " + clusterId, e);
        }
    }

    private Applications applications() {
        return nodeRepository().applications();
    }

    /** Check if the last scaling event for this cluster has completed and if so record it in the returned instance */
    private Cluster updateCompletion(Cluster cluster, NodeList clusterNodes) {
        if (cluster.lastScalingEvent().isEmpty()) return cluster;
        var event = cluster.lastScalingEvent().get();
        if (event.completion().isPresent()) return cluster;

        // Scaling event is complete if:
        // - 1. no nodes which was retired by this are still present (which also implies data distribution is complete)
        if (clusterNodes.retired().stream()
                        .anyMatch(node -> node.history().hasEventAt(History.Event.Type.retired, event.at())))
            return cluster;
        // - 2. all nodes have switched to the right config generation (currently only measured on containers)
        for (var nodeTimeseries : nodeRepository().metricsDb().getNodeTimeseries(Duration.between(event.at(), clock().instant()),
                                                                                 clusterNodes)) {
            Optional<NodeMetricSnapshot> onNewGeneration =
                    nodeTimeseries.asList().stream()
                                  .filter(snapshot -> snapshot.generation() >= event.generation()).findAny();
            if (onNewGeneration.isEmpty()) return cluster; // Not completed
        }

        // Set the completion time to the instant we notice completion.
        Instant completionTime = nodeRepository().clock().instant();
        return cluster.with(event.withCompletion(completionTime));
    }

    private void logAutoscaling(ClusterResources from, ClusterResources to, ApplicationId application, NodeList clusterNodes) {
        log.info("Autoscaling " + application + " " + clusterNodes.clusterSpec() + ":" +
                 "\nfrom " + toString(from) + "\nto   " + toString(to));
    }

    static String toString(ClusterResources r) {
        return r + " (total: " + r.totalResources() + ")";
    }

    private Map<ClusterSpec.Id, NodeList> nodesByCluster(NodeList applicationNodes) {
        return applicationNodes.groupingBy(n -> n.allocation().get().membership().cluster().id());
    }

}