summaryrefslogtreecommitdiffstats
path: root/node-repository/src/main/java
diff options
context:
space:
mode:
authorHarald Musum <musum@verizonmedia.com>2019-06-20 13:45:20 +0200
committerGitHub <noreply@github.com>2019-06-20 13:45:20 +0200
commitb03f40a06e872c8646407c9476e12537c1b2bd65 (patch)
tree3e69805096d4adddc8fbc15e9dbffc4ddc8797ea /node-repository/src/main/java
parent9b58a9b1aed4edcca152ef4c28d4d378f252e67b (diff)
parentf7e3e48d19723494a3c9fffe0693d4caa3e8c47a (diff)
Merge pull request #9854 from vespa-engine/mpolden/prepare-lbs
Prepare and activate load balancers
Diffstat (limited to 'node-repository/src/main/java')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java58
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java61
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java88
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java97
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java8
10 files changed, 249 insertions, 110 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index bedfbc5bdc1..9b78f558a7a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -399,10 +399,7 @@ public class NodeRepository extends AbstractComponent {
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex lock = lock(application)) {
- db.writeTo(Node.State.inactive,
- db.getNodes(application, Node.State.reserved, Node.State.active),
- Agent.application, Optional.empty(), transaction
- );
+ deactivate(db.getNodes(application, Node.State.reserved, Node.State.active), transaction);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
index 58c576d3f44..369366a1f08 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.vespa.hosted.provision.maintenance.LoadBalancerExpirer;
+import java.time.Instant;
import java.util.Objects;
/**
@@ -14,12 +15,14 @@ public class LoadBalancer {
private final LoadBalancerId id;
private final LoadBalancerInstance instance;
- private final boolean inactive;
+ private final State state;
+ private final Instant changedAt;
- public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, boolean inactive) {
+ public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, State state, Instant changedAt) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.instance = Objects.requireNonNull(instance, "instance must be non-null");
- this.inactive = inactive;
+ this.state = Objects.requireNonNull(state, "state must be non-null");
+ this.changedAt = Objects.requireNonNull(changedAt, "changedAt must be non-null");
}
/** An identifier for this load balancer. The ID is unique inside the zone */
@@ -32,17 +35,48 @@ public class LoadBalancer {
return instance;
}
- /**
- * Returns whether this load balancer is inactive. Inactive load balancers are eventually removed by
- * {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is redeployed.
- */
- public boolean inactive() {
- return inactive;
+ /** The current state of this */
+ public State state() {
+ return state;
}
- /** Return a copy of this that is set inactive */
- public LoadBalancer deactivate() {
- return new LoadBalancer(id, instance, true);
+ /** Returns when this was last changed */
+ public Instant changedAt() {
+ return changedAt;
+ }
+
+ /** Returns a copy of this with state set to given state */
+ public LoadBalancer with(State state, Instant changedAt) {
+ if (changedAt.isBefore(this.changedAt)) {
+ throw new IllegalArgumentException("Invalid changeAt: '" + changedAt + "' is before existing value '" +
+ this.changedAt + "'");
+ }
+ if (this.state == State.active && state == State.reserved) {
+ throw new IllegalArgumentException("Invalid state transition: " + this.state + " -> " + state);
+ }
+ return new LoadBalancer(id, instance, state, changedAt);
+ }
+
+ /** Returns a copy of this with instance set to given instance */
+ public LoadBalancer with(LoadBalancerInstance instance) {
+ return new LoadBalancer(id, instance, state, changedAt);
+ }
+
+ public enum State {
+
+ /** This load balancer has been provisioned and reserved for an application */
+ reserved,
+
+ /**
+ * The load balancer has been deactivated and is ready to be removed. Inactive load balancers are eventually
+ * removed by {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is
+ * redeployed.
+ */
+ inactive,
+
+ /** The load balancer is in active use by an application */
+ active,
+
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
index ba7a83169ad..c0bb53ddfe4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java
@@ -29,7 +29,7 @@ public class LoadBalancerList {
/** Returns the subset of load balancers that are inactive */
public LoadBalancerList inactive() {
- return of(loadBalancers.stream().filter(LoadBalancer::inactive));
+ return of(loadBalancers.stream().filter(lb -> lb.state() == LoadBalancer.State.inactive));
}
public List<LoadBalancer> asList() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index 371ed4d2496..61ca19a4cb9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -484,7 +484,7 @@ public class CuratorDatabaseClient {
}
private Optional<LoadBalancer> readLoadBalancer(LoadBalancerId id) {
- return read(loadBalancerPath(id), LoadBalancerSerializer::fromJson);
+ return read(loadBalancerPath(id), (data) -> LoadBalancerSerializer.fromJson(data, clock.instant()));
}
public void writeLoadBalancer(LoadBalancer loadBalancer) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
index fd2294c1b5d..d04dd2b5c18 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java
@@ -15,9 +15,9 @@ import com.yahoo.vespa.hosted.provision.lb.Real;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.time.Instant;
import java.util.LinkedHashSet;
import java.util.Optional;
-import java.util.Set;
import java.util.function.Function;
/**
@@ -36,12 +36,13 @@ public class LoadBalancerSerializer {
private static final String idField = "id";
private static final String hostnameField = "hostname";
+ private static final String stateField = "state";
+ private static final String changedAtField = "changedAt";
private static final String dnsZoneField = "dnsZone";
private static final String inactiveField = "inactive";
private static final String portsField = "ports";
private static final String networksField = "networks";
private static final String realsField = "reals";
- private static final String nameField = "name";
private static final String ipAddressField = "ipAddress";
private static final String portField = "port";
@@ -51,6 +52,8 @@ public class LoadBalancerSerializer {
root.setString(idField, loadBalancer.id().serializedForm());
root.setString(hostnameField, loadBalancer.instance().hostname().toString());
+ root.setString(stateField, asString(loadBalancer.state()));
+ root.setLong(changedAtField, loadBalancer.changedAt().toEpochMilli());
loadBalancer.instance().dnsZone().ifPresent(dnsZone -> root.setString(dnsZoneField, dnsZone.id()));
Cursor portArray = root.setArray(portsField);
loadBalancer.instance().ports().forEach(portArray::addLong);
@@ -63,8 +66,6 @@ public class LoadBalancerSerializer {
realObject.setString(ipAddressField, real.ipAddress());
realObject.setLong(portField, real.port());
});
- root.setBool(inactiveField, loadBalancer.inactive());
-
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
@@ -72,10 +73,10 @@ public class LoadBalancerSerializer {
}
}
- public static LoadBalancer fromJson(byte[] data) {
+ public static LoadBalancer fromJson(byte[] data, Instant defaultChangedAt) {
Cursor object = SlimeUtils.jsonToSlime(data).get();
- Set<Real> reals = new LinkedHashSet<>();
+ var reals = new LinkedHashSet<Real>();
object.field(realsField).traverse((ArrayTraverser) (i, realObject) -> {
reals.add(new Real(HostName.from(realObject.field(hostnameField).asString()),
realObject.field(ipAddressField).asString(),
@@ -83,25 +84,61 @@ public class LoadBalancerSerializer {
});
- Set<Integer> ports = new LinkedHashSet<>();
+ var ports = new LinkedHashSet<Integer>();
object.field(portsField).traverse((ArrayTraverser) (i, port) -> ports.add((int) port.asLong()));
- Set<String> networks = new LinkedHashSet<>();
+ var networks = new LinkedHashSet<String>();
object.field(networksField).traverse((ArrayTraverser) (i, network) -> networks.add(network.asString()));
return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()),
new LoadBalancerInstance(
HostName.from(object.field(hostnameField).asString()),
- optionalField(object.field(dnsZoneField), DnsZone::new),
+ optionalString(object.field(dnsZoneField), DnsZone::new),
ports,
networks,
reals
),
- object.field(inactiveField).asBool());
+ stateFromSlime(object),
+ instantFromSlime(object.field(changedAtField), defaultChangedAt));
+ }
+
+ private static Instant instantFromSlime(Cursor field, Instant defaultValue) {
+ return optionalValue(field, (value) -> Instant.ofEpochMilli(value.asLong())).orElse(defaultValue);
+ }
+
+ private static LoadBalancer.State stateFromSlime(Inspector object) {
+ var inactiveValue = optionalValue(object.field(inactiveField), Inspector::asBool);
+ if (inactiveValue.isPresent()) { // TODO(mpolden): Remove reading of "inactive" field after June 2019
+ return inactiveValue.get() ? LoadBalancer.State.inactive : LoadBalancer.State.active;
+ } else {
+ return stateFromString(object.field(stateField).asString());
+ }
+ }
+
+ private static <T> Optional<T> optionalValue(Inspector field, Function<Inspector, T> fieldMapper) {
+ return Optional.of(field).filter(Inspector::valid).map(fieldMapper);
+ }
+
+ private static <T> Optional<T> optionalString(Inspector field, Function<String, T> fieldMapper) {
+ return optionalValue(field, Inspector::asString).map(fieldMapper);
}
- private static <T> Optional<T> optionalField(Inspector field, Function<String, T> fieldMapper) {
- return Optional.of(field).filter(Inspector::valid).map(Inspector::asString).map(fieldMapper);
+ private static String asString(LoadBalancer.State state) {
+ switch (state) {
+ case active: return "active";
+ case inactive: return "inactive";
+ case reserved: return "reserved";
+ default: throw new IllegalArgumentException("No serialization defined for state enum '" + state + "'");
+ }
+ }
+
+ private static LoadBalancer.State stateFromString(String state) {
+ switch (state) {
+ case "active": return LoadBalancer.State.active;
+ case "inactive": return LoadBalancer.State.inactive;
+ case "reserved": return LoadBalancer.State.reserved;
+ default: throw new IllegalArgumentException("No serialization defined for state string '" + state + "'");
+ }
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 4626a600d2c..1e83c2c9176 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -2,6 +2,8 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.transaction.Mutex;
@@ -22,16 +24,26 @@ import java.util.function.Function;
import java.util.stream.Collectors;
/**
- * Performs activation of nodes for an application
+ * Performs activation of resources for an application. E.g. nodes or load balancers.
*
* @author bratseth
*/
class Activator {
private final NodeRepository nodeRepository;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
- public Activator(NodeRepository nodeRepository) {
+ public Activator(NodeRepository nodeRepository, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
+ }
+
+ /** Activate required resources for given application */
+ public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
+ try (Mutex lock = nodeRepository.lock(application)) {
+ activateNodes(application, hosts, transaction, lock);
+ activateLoadBalancers(application, hosts, lock);
+ }
}
/**
@@ -46,36 +58,50 @@ class Activator {
* @param transaction Transaction with operations to commit together with any operations done within the repository.
* @param application the application to allocate nodes for
* @param hosts the hosts to make the set of active nodes of this
+ * @param applicationLock application lock that must be held when calling this
*/
- public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
- try (Mutex lock = nodeRepository.lock(application)) {
- Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
- NodeList allNodes = nodeRepository.list();
- NodeList applicationNodes = allNodes.owner(application);
-
- List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
- List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
- List<Node> active = applicationNodes.state(Node.State.active).asList();
- List<Node> continuedActive = retainHostsInList(hostnames, active);
- List<Node> allActive = new ArrayList<>(continuedActive);
- allActive.addAll(reservedToActivate);
- if ( ! containsAll(hostnames, allActive))
- throw new IllegalArgumentException("Activation of " + application + " failed. " +
- "Could not find all requested hosts." +
- "\nRequested: " + hosts +
- "\nReserved: " + toHostNames(reserved) +
- "\nActive: " + toHostNames(active) +
- "\nThis might happen if the time from reserving host to activation takes " +
- "longer time than reservation expiry (the hosts will then no longer be reserved)");
-
- validateParentHosts(application, allNodes, reservedToActivate);
-
- List<Node> activeToRemove = removeHostsFromList(hostnames, active);
- activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
- nodeRepository.deactivate(activeToRemove, transaction);
- nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
- nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
- }
+ private void activateNodes(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
+ NodeList allNodes = nodeRepository.list();
+ NodeList applicationNodes = allNodes.owner(application);
+
+ List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
+ List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
+ List<Node> active = applicationNodes.state(Node.State.active).asList();
+ List<Node> continuedActive = retainHostsInList(hostnames, active);
+ List<Node> allActive = new ArrayList<>(continuedActive);
+ allActive.addAll(reservedToActivate);
+ if (!containsAll(hostnames, allActive))
+ throw new IllegalArgumentException("Activation of " + application + " failed. " +
+ "Could not find all requested hosts." +
+ "\nRequested: " + hosts +
+ "\nReserved: " + toHostNames(reserved) +
+ "\nActive: " + toHostNames(active) +
+ "\nThis might happen if the time from reserving host to activation takes " +
+ "longer time than reservation expiry (the hosts will then no longer be reserved)");
+
+ validateParentHosts(application, allNodes, reservedToActivate);
+
+ List<Node> activeToRemove = removeHostsFromList(hostnames, active);
+ activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
+ nodeRepository.deactivate(activeToRemove, transaction);
+ nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
+ nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
+ }
+
+ /** Activate load balancers */
+ private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts)));
+ }
+
+ private static List<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
+ return hosts.stream()
+ .map(HostSpec::membership)
+ .flatMap(Optional::stream)
+ .map(ClusterMembership::cluster)
+ .collect(Collectors.toUnmodifiableList());
}
private static void validateParentHosts(ApplicationId application, NodeList nodes, List<Node> potentialChildren) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 372dca84a53..6e688a08c84 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -18,8 +18,6 @@ import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
-import java.util.Collections;
-import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
@@ -27,10 +25,14 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
- * Provides provisioning of load balancers for applications.
+ * Provisions and configures application load balancers.
*
* @author mpolden
*/
+// Load balancer state transitions:
+// 1) (new) -> reserved -> active
+// 2) active | reserved -> inactive
+// 3) inactive -> active | (removed)
public class LoadBalancerProvisioner {
private final NodeRepository nodeRepository;
@@ -44,43 +46,72 @@ public class LoadBalancerProvisioner {
}
/**
- * Provision load balancer(s) for given application.
+ * Prepare a load balancer for given application and cluster.
*
- * If the application has multiple container clusters, one load balancer will be provisioned for each cluster.
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes. It's state will remain unchanged.
+ *
+ * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State#reserved}.
+ *
+ * Calling this for irrelevant node or cluster types is a no-op.
*/
- public Map<LoadBalancerId, LoadBalancer> provision(ApplicationId application) {
- try (Mutex applicationLock = nodeRepository.lock(application)) {
- try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- Map<LoadBalancerId, LoadBalancer> loadBalancers = new LinkedHashMap<>();
- for (Map.Entry<ClusterSpec, List<Node>> kv : activeContainers(application).entrySet()) {
- LoadBalancerId id = new LoadBalancerId(application, kv.getKey().id());
- LoadBalancerInstance instance = create(application, kv.getKey().id(), kv.getValue());
- // Load balancer is always re-activated here to avoid reallocation if an application/cluster is
- // deleted and then redeployed.
- LoadBalancer loadBalancer = new LoadBalancer(id, instance, false);
- loadBalancers.put(loadBalancer.id(), loadBalancer);
- db.writeLoadBalancer(loadBalancer);
- }
- return Collections.unmodifiableMap(loadBalancers);
- }
+ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type
+ if (cluster.type() != ClusterSpec.Type.container) return; // Nothing to provision for this cluster type
+ provision(application, cluster.id(), false);
+ }
+
+ /**
+ * Activate load balancer for given application and cluster.
+ *
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes and the load balancer itself will be moved to {@link LoadBalancer.State#active}.
+ *
+ * Calling this when no load balancer has been prepared for given cluster is a no-op.
+ */
+ public void activate(ApplicationId application, List<ClusterSpec> clusters) {
+ for (var clusterId : containerClusterIdsOf(clusters)) {
+ // Provision again to ensure that load balancer instance re-configured with correct nodes
+ provision(application, clusterId, true);
}
}
/**
* Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any
- * load balancer(s)
+ * load balancer(s).
*/
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex applicationLock = nodeRepository.lock(application)) {
try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- List<LoadBalancer> deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
- .map(LoadBalancer::deactivate)
- .collect(Collectors.toList());
+ var now = nodeRepository.clock().instant();
+ var deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
+ .map(lb -> lb.with(LoadBalancer.State.inactive, now))
+ .collect(Collectors.toList());
db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
}
}
}
+ /** Idempotently provision a load balancer for given application and cluster */
+ private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate) {
+ try (var applicationLock = nodeRepository.lock(application)) {
+ try (var loadBalancersLock = db.lockLoadBalancers()) {
+ var id = new LoadBalancerId(application, clusterId);
+ var now = nodeRepository.clock().instant();
+ var instance = create(application, clusterId, allocatedContainers(application, clusterId));
+ var loadBalancer = db.readLoadBalancers().get(id);
+ if (loadBalancer == null) {
+ if (activate) return; // Nothing to activate as this load balancer was never prepared
+ loadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
+ } else {
+ var newState = activate ? LoadBalancer.State.active : loadBalancer.state();
+ loadBalancer = loadBalancer.with(instance).with(newState, now);
+ }
+ db.writeLoadBalancer(loadBalancer);
+ }
+ }
+ }
+
private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes) {
Map<HostName, Set<String>> hostnameToIpAdresses = nodes.stream()
.collect(Collectors.toMap(node -> HostName.from(node.hostname()),
@@ -92,15 +123,14 @@ public class LoadBalancerProvisioner {
return service.create(application, cluster, reals);
}
- /** Returns a list of active containers for given application, grouped by cluster spec */
- private Map<ClusterSpec, List<Node>> activeContainers(ApplicationId application) {
- return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.active))
+ /** Returns a list of active and reserved nodes of type container in given cluster */
+ private List<Node> allocatedContainers(ApplicationId application, ClusterSpec.Id clusterId) {
+ return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.reserved, Node.State.active))
.owner(application)
.filter(node -> node.state().isAllocated())
.type(ClusterSpec.Type.container)
- .asList()
- .stream()
- .collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster()));
+ .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterId))
+ .asList();
}
/** Find IP addresses reachable by the load balancer service */
@@ -118,4 +148,11 @@ public class LoadBalancerProvisioner {
return reachable;
}
+ private static List<ClusterSpec.Id> containerClusterIdsOf(List<ClusterSpec> clusters) {
+ return clusters.stream()
+ .filter(c -> c.type() == ClusterSpec.Type.container)
+ .map(ClusterSpec::id)
+ .collect(Collectors.toUnmodifiableList());
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 21bfc1b6886..90ca8ef4d33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -62,14 +62,14 @@ public class NodeRepositoryProvisioner implements Provisioner {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone, flavors);
this.zone = zone;
+ this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
- provisionServiceProvider.getHostProvisioner(),
- provisionServiceProvider.getHostResourcesCalculator(),
- Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource));
- this.activator = new Activator(nodeRepository);
- this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService ->
- new LoadBalancerProvisioner(nodeRepository, lbService));
+ provisionServiceProvider.getHostProvisioner(),
+ provisionServiceProvider.getHostResourcesCalculator(),
+ Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource),
+ loadBalancerProvisioner);
+ this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
}
/**
@@ -112,14 +112,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
validate(hosts);
activator.activate(application, hosts, transaction);
- transaction.onCommitted(() -> {
- try {
- loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.provision(application));
- } catch (Exception e) {
- log.log(LogLevel.ERROR, "Failed to provision load balancer for application " +
- application.toShortString(), e);
- }
- });
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index ca958f15c69..31ec964dceb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -24,15 +24,24 @@ class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
- HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled) {
+ HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled,
+ Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, dynamicProvisioningEnabled);
}
+ /** Prepare all required resources for the given application and cluster */
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ prepareLoadBalancer(application, cluster, requestedNodes);
+ return prepareNodes(application, cluster, requestedNodes, wantedGroups);
+ }
+
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
@@ -41,7 +50,7 @@ class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ public List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
@@ -58,6 +67,11 @@ class Preparer {
return acceptedNodes;
}
+ /** Prepare a load balancer for given application and cluster */
+ public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
+ }
+
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
index d31834567ab..bfbf7775031 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java
@@ -55,6 +55,8 @@ public class LoadBalancersResponse extends HttpResponse {
loadBalancers().forEach(lb -> {
Cursor lbObject = loadBalancerArray.addObject();
lbObject.setString("id", lb.id().serializedForm());
+ lbObject.setString("state", lb.state().name());
+ lbObject.setLong("changedAt", lb.changedAt().toEpochMilli());
lbObject.setString("application", lb.id().application().application().value());
lbObject.setString("tenant", lb.id().application().tenant().value());
lbObject.setString("instance", lb.id().application().instance().value());
@@ -76,9 +78,9 @@ public class LoadBalancersResponse extends HttpResponse {
realObject.setLong("port", real.port());
});
- lbObject.setArray("rotations"); // To avoid changing the API. This can be removed when clients stop expecting this
-
- lbObject.setBool("inactive", lb.inactive());
+ // TODO(mpolden): The following fields preserves API compatibility. These can be removed once clients stop expecting them
+ lbObject.setArray("rotations");
+ lbObject.setBool("inactive", lb.state() == LoadBalancer.State.inactive);
});
new JsonFormat(true).encode(stream, slime);