aboutsummaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorMartin Polden <mpolden@mpolden.no>2019-06-19 15:56:31 +0200
committerMartin Polden <mpolden@mpolden.no>2019-06-20 12:17:30 +0200
commitf7e3e48d19723494a3c9fffe0693d4caa3e8c47a (patch)
tree194ff721f0baf9b4e80ec025b4b42b3467786510 /node-repository
parent88ebc1587a98733ced7547cb4d93784939adf82e (diff)
Prepare and activate load balancers
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java88
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java96
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java79
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json21
9 files changed, 252 insertions, 121 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 4626a600d2c..1e83c2c9176 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -2,6 +2,8 @@
package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.ParentHostUnavailableException;
import com.yahoo.transaction.Mutex;
@@ -22,16 +24,26 @@ import java.util.function.Function;
import java.util.stream.Collectors;
/**
- * Performs activation of nodes for an application
+ * Performs activation of resources for an application. E.g. nodes or load balancers.
*
* @author bratseth
*/
class Activator {
private final NodeRepository nodeRepository;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
- public Activator(NodeRepository nodeRepository) {
+ public Activator(NodeRepository nodeRepository, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
+ }
+
+ /** Activate required resources for given application */
+ public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
+ try (Mutex lock = nodeRepository.lock(application)) {
+ activateNodes(application, hosts, transaction, lock);
+ activateLoadBalancers(application, hosts, lock);
+ }
}
/**
@@ -46,36 +58,50 @@ class Activator {
* @param transaction Transaction with operations to commit together with any operations done within the repository.
* @param application the application to allocate nodes for
* @param hosts the hosts to make the set of active nodes of this
+ * @param applicationLock application lock that must be held when calling this
*/
- public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) {
- try (Mutex lock = nodeRepository.lock(application)) {
- Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
- NodeList allNodes = nodeRepository.list();
- NodeList applicationNodes = allNodes.owner(application);
-
- List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
- List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
- List<Node> active = applicationNodes.state(Node.State.active).asList();
- List<Node> continuedActive = retainHostsInList(hostnames, active);
- List<Node> allActive = new ArrayList<>(continuedActive);
- allActive.addAll(reservedToActivate);
- if ( ! containsAll(hostnames, allActive))
- throw new IllegalArgumentException("Activation of " + application + " failed. " +
- "Could not find all requested hosts." +
- "\nRequested: " + hosts +
- "\nReserved: " + toHostNames(reserved) +
- "\nActive: " + toHostNames(active) +
- "\nThis might happen if the time from reserving host to activation takes " +
- "longer time than reservation expiry (the hosts will then no longer be reserved)");
-
- validateParentHosts(application, allNodes, reservedToActivate);
-
- List<Node> activeToRemove = removeHostsFromList(hostnames, active);
- activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
- nodeRepository.deactivate(activeToRemove, transaction);
- nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
- nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
- }
+ private void activateNodes(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
+ NodeList allNodes = nodeRepository.list();
+ NodeList applicationNodes = allNodes.owner(application);
+
+ List<Node> reserved = applicationNodes.state(Node.State.reserved).asList();
+ List<Node> reservedToActivate = retainHostsInList(hostnames, reserved);
+ List<Node> active = applicationNodes.state(Node.State.active).asList();
+ List<Node> continuedActive = retainHostsInList(hostnames, active);
+ List<Node> allActive = new ArrayList<>(continuedActive);
+ allActive.addAll(reservedToActivate);
+ if (!containsAll(hostnames, allActive))
+ throw new IllegalArgumentException("Activation of " + application + " failed. " +
+ "Could not find all requested hosts." +
+ "\nRequested: " + hosts +
+ "\nReserved: " + toHostNames(reserved) +
+ "\nActive: " + toHostNames(active) +
+ "\nThis might happen if the time from reserving host to activation takes " +
+ "longer time than reservation expiry (the hosts will then no longer be reserved)");
+
+ validateParentHosts(application, allNodes, reservedToActivate);
+
+ List<Node> activeToRemove = removeHostsFromList(hostnames, active);
+ activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired
+ nodeRepository.deactivate(activeToRemove, transaction);
+ nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes
+ nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction);
+ }
+
+ /** Activate load balancers */
+ private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts,
+ @SuppressWarnings("unused") Mutex applicationLock) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts)));
+ }
+
+ private static List<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
+ return hosts.stream()
+ .map(HostSpec::membership)
+ .flatMap(Optional::stream)
+ .map(ClusterMembership::cluster)
+ .collect(Collectors.toUnmodifiableList());
}
private static void validateParentHosts(ApplicationId application, NodeList nodes, List<Node> potentialChildren) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index a37bdf6804b..6e688a08c84 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -18,8 +18,6 @@ import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
-import java.util.Collections;
-import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
@@ -27,10 +25,14 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
- * Provides provisioning of load balancers for applications.
+ * Provisions and configures application load balancers.
*
* @author mpolden
*/
+// Load balancer state transitions:
+// 1) (new) -> reserved -> active
+// 2) active | reserved -> inactive
+// 3) inactive -> active | (removed)
public class LoadBalancerProvisioner {
private final NodeRepository nodeRepository;
@@ -44,44 +46,72 @@ public class LoadBalancerProvisioner {
}
/**
- * Provision load balancer(s) for given application.
+ * Prepare a load balancer for given application and cluster.
*
- * If the application has multiple container clusters, one load balancer will be provisioned for each cluster.
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes. It's state will remain unchanged.
+ *
+ * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State#reserved}.
+ *
+ * Calling this for irrelevant node or cluster types is a no-op.
*/
- public Map<LoadBalancerId, LoadBalancer> provision(ApplicationId application) {
- try (Mutex applicationLock = nodeRepository.lock(application)) {
- try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
- Map<LoadBalancerId, LoadBalancer> loadBalancers = new LinkedHashMap<>();
- for (Map.Entry<ClusterSpec, List<Node>> kv : activeContainers(application).entrySet()) {
- LoadBalancerId id = new LoadBalancerId(application, kv.getKey().id());
- LoadBalancerInstance instance = create(application, kv.getKey().id(), kv.getValue());
- // Load balancer is always re-activated here to avoid reallocation if an application/cluster is
- // deleted and then redeployed.
- LoadBalancer loadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.active, nodeRepository.clock().instant());
- loadBalancers.put(loadBalancer.id(), loadBalancer);
- db.writeLoadBalancer(loadBalancer);
- }
- return Collections.unmodifiableMap(loadBalancers);
- }
+ public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type
+ if (cluster.type() != ClusterSpec.Type.container) return; // Nothing to provision for this cluster type
+ provision(application, cluster.id(), false);
+ }
+
+ /**
+ * Activate load balancer for given application and cluster.
+ *
+ * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated
+ * nodes and the load balancer itself will be moved to {@link LoadBalancer.State#active}.
+ *
+ * Calling this when no load balancer has been prepared for given cluster is a no-op.
+ */
+ public void activate(ApplicationId application, List<ClusterSpec> clusters) {
+ for (var clusterId : containerClusterIdsOf(clusters)) {
+ // Provision again to ensure that load balancer instance re-configured with correct nodes
+ provision(application, clusterId, true);
}
}
/**
* Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any
- * load balancer(s)
+ * load balancer(s).
*/
public void deactivate(ApplicationId application, NestedTransaction transaction) {
try (Mutex applicationLock = nodeRepository.lock(application)) {
try (Mutex loadBalancersLock = db.lockLoadBalancers()) {
var now = nodeRepository.clock().instant();
- List<LoadBalancer> deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
- .map(lb -> lb.with(LoadBalancer.State.inactive, now))
- .collect(Collectors.toList());
+ var deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream()
+ .map(lb -> lb.with(LoadBalancer.State.inactive, now))
+ .collect(Collectors.toList());
db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
}
}
}
+ /** Idempotently provision a load balancer for given application and cluster */
+ private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate) {
+ try (var applicationLock = nodeRepository.lock(application)) {
+ try (var loadBalancersLock = db.lockLoadBalancers()) {
+ var id = new LoadBalancerId(application, clusterId);
+ var now = nodeRepository.clock().instant();
+ var instance = create(application, clusterId, allocatedContainers(application, clusterId));
+ var loadBalancer = db.readLoadBalancers().get(id);
+ if (loadBalancer == null) {
+ if (activate) return; // Nothing to activate as this load balancer was never prepared
+ loadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
+ } else {
+ var newState = activate ? LoadBalancer.State.active : loadBalancer.state();
+ loadBalancer = loadBalancer.with(instance).with(newState, now);
+ }
+ db.writeLoadBalancer(loadBalancer);
+ }
+ }
+ }
+
private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes) {
Map<HostName, Set<String>> hostnameToIpAdresses = nodes.stream()
.collect(Collectors.toMap(node -> HostName.from(node.hostname()),
@@ -93,15 +123,14 @@ public class LoadBalancerProvisioner {
return service.create(application, cluster, reals);
}
- /** Returns a list of active containers for given application, grouped by cluster spec */
- private Map<ClusterSpec, List<Node>> activeContainers(ApplicationId application) {
- return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.active))
+ /** Returns a list of active and reserved nodes of type container in given cluster */
+ private List<Node> allocatedContainers(ApplicationId application, ClusterSpec.Id clusterId) {
+ return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.reserved, Node.State.active))
.owner(application)
.filter(node -> node.state().isAllocated())
.type(ClusterSpec.Type.container)
- .asList()
- .stream()
- .collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster()));
+ .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterId))
+ .asList();
}
/** Find IP addresses reachable by the load balancer service */
@@ -119,4 +148,11 @@ public class LoadBalancerProvisioner {
return reachable;
}
+ private static List<ClusterSpec.Id> containerClusterIdsOf(List<ClusterSpec> clusters) {
+ return clusters.stream()
+ .filter(c -> c.type() == ClusterSpec.Type.container)
+ .map(ClusterSpec::id)
+ .collect(Collectors.toUnmodifiableList());
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 21bfc1b6886..90ca8ef4d33 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -62,14 +62,14 @@ public class NodeRepositoryProvisioner implements Provisioner {
this.nodeRepository = nodeRepository;
this.capacityPolicies = new CapacityPolicies(zone, flavors);
this.zone = zone;
+ this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
- provisionServiceProvider.getHostProvisioner(),
- provisionServiceProvider.getHostResourcesCalculator(),
- Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource));
- this.activator = new Activator(nodeRepository);
- this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService ->
- new LoadBalancerProvisioner(nodeRepository, lbService));
+ provisionServiceProvider.getHostProvisioner(),
+ provisionServiceProvider.getHostResourcesCalculator(),
+ Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource),
+ loadBalancerProvisioner);
+ this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
}
/**
@@ -112,14 +112,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) {
validate(hosts);
activator.activate(application, hosts, transaction);
- transaction.onCommitted(() -> {
- try {
- loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.provision(application));
- } catch (Exception e) {
- log.log(LogLevel.ERROR, "Failed to provision load balancer for application " +
- application.toShortString(), e);
- }
- });
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index ca958f15c69..31ec964dceb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -24,15 +24,24 @@ class Preparer {
private final NodeRepository nodeRepository;
private final GroupPreparer groupPreparer;
+ private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final int spareCount;
public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner,
- HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled) {
+ HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled,
+ Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.spareCount = spareCount;
+ this.loadBalancerProvisioner = loadBalancerProvisioner;
this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, dynamicProvisioningEnabled);
}
+ /** Prepare all required resources for the given application and cluster */
+ public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ prepareLoadBalancer(application, cluster, requestedNodes);
+ return prepareNodes(application, cluster, requestedNodes, wantedGroups);
+ }
+
/**
* Ensure sufficient nodes are reserved or active for the given application and cluster
*
@@ -41,7 +50,7 @@ class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
+ public List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) {
List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups);
MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster));
@@ -58,6 +67,11 @@ class Preparer {
return acceptedNodes;
}
+ /** Prepare a load balancer for given application and cluster */
+ public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes));
+ }
+
/**
* Returns a list of the nodes which are
* in groups with index number above or equal the group count
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index 07cc3aa634c..6d94e4ab992 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -4,11 +4,11 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.google.common.collect.Iterators;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.RotationName;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
@@ -26,6 +26,7 @@ import java.util.function.Supplier;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
/**
@@ -40,26 +41,29 @@ public class LoadBalancerProvisionerTest {
@Test
public void provision_load_balancer() {
+ Supplier<List<LoadBalancer>> lbApp1 = () -> tester.nodeRepository().loadBalancers().owner(app1).asList();
+ Supplier<List<LoadBalancer>> lbApp2 = () -> tester.nodeRepository().loadBalancers().owner(app2).asList();
ClusterSpec.Id containerCluster1 = ClusterSpec.Id.from("qrs1");
ClusterSpec.Id contentCluster = ClusterSpec.Id.from("content");
- Set<RotationName> rotationsCluster1 = Set.of(RotationName.from("r1-1"), RotationName.from("r1-2"));
- tester.activate(app1, prepare(app1,
- clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1),
- clusterRequest(ClusterSpec.Type.content, contentCluster)));
- tester.activate(app2, prepare(app2,
- clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
// Provision a load balancer for each application
- Supplier<List<LoadBalancer>> loadBalancers = () -> tester.nodeRepository().loadBalancers().owner(app1).asList();
- assertEquals(1, loadBalancers.get().size());
-
- assertEquals(app1, loadBalancers.get().get(0).id().application());
- assertEquals(containerCluster1, loadBalancers.get().get(0).id().cluster());
- assertEquals(Collections.singleton(4443), loadBalancers.get().get(0).instance().ports());
- assertEquals("127.0.0.1", get(loadBalancers.get().get(0).instance().reals(), 0).ipAddress());
- assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 0).port());
- assertEquals("127.0.0.2", get(loadBalancers.get().get(0).instance().reals(), 1).ipAddress());
- assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 1).port());
+ var nodes = prepare(app1,
+ clusterRequest(ClusterSpec.Type.container, containerCluster1),
+ clusterRequest(ClusterSpec.Type.content, contentCluster));
+ assertEquals(1, lbApp1.get().size());
+ assertEquals("Prepare provisions load balancer with 0 reals", Set.of(), lbApp1.get().get(0).instance().reals());
+ tester.activate(app1, nodes);
+ tester.activate(app2, prepare(app2, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
+ assertEquals(1, lbApp2.get().size());
+
+ // Reals are configured after activation
+ assertEquals(app1, lbApp1.get().get(0).id().application());
+ assertEquals(containerCluster1, lbApp1.get().get(0).id().cluster());
+ assertEquals(Collections.singleton(4443), lbApp1.get().get(0).instance().ports());
+ assertEquals("127.0.0.1", get(lbApp1.get().get(0).instance().reals(), 0).ipAddress());
+ assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 0).port());
+ assertEquals("127.0.0.2", get(lbApp1.get().get(0).instance().reals(), 1).ipAddress());
+ assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 1).port());
// A container is failed
Supplier<List<Node>> containers = () -> tester.getNodes(app1).type(ClusterSpec.Type.container).asList();
@@ -78,17 +82,17 @@ public class LoadBalancerProvisionerTest {
.noneMatch(hostname -> hostname.equals(toFail.hostname())));
assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().reals(), 0).hostname().value());
assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().reals(), 1).hostname().value());
+ assertSame("State is unchanged", LoadBalancer.State.active, loadBalancer.state());
// Add another container cluster
- Set<RotationName> rotationsCluster2 = Set.of(RotationName.from("r2-1"), RotationName.from("r2-2"));
ClusterSpec.Id containerCluster2 = ClusterSpec.Id.from("qrs2");
tester.activate(app1, prepare(app1,
- clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1),
- clusterRequest(ClusterSpec.Type.container, containerCluster2, rotationsCluster2),
+ clusterRequest(ClusterSpec.Type.container, containerCluster1),
+ clusterRequest(ClusterSpec.Type.container, containerCluster2),
clusterRequest(ClusterSpec.Type.content, contentCluster)));
// Load balancer is provisioned for second container cluster
- assertEquals(2, loadBalancers.get().size());
+ assertEquals(2, lbApp1.get().size());
List<HostName> activeContainers = tester.getNodes(app1, Node.State.active)
.type(ClusterSpec.Type.container).asList()
.stream()
@@ -96,7 +100,7 @@ public class LoadBalancerProvisionerTest {
.map(HostName::from)
.sorted()
.collect(Collectors.toList());
- List<HostName> reals = loadBalancers.get().stream()
+ List<HostName> reals = lbApp1.get().stream()
.map(LoadBalancer::instance)
.map(LoadBalancerInstance::reals)
.flatMap(Collection::stream)
@@ -110,38 +114,35 @@ public class LoadBalancerProvisionerTest {
tester.provisioner().remove(removeTransaction, app1);
removeTransaction.commit();
- assertEquals(2, loadBalancers.get().size());
- assertTrue("Deactivated load balancers", loadBalancers.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive));
+ assertEquals(2, lbApp1.get().size());
+ assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive));
+ assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active));
// Application is redeployed with one cluster and load balancer is re-activated
tester.activate(app1, prepare(app1,
clusterRequest(ClusterSpec.Type.container, containerCluster1),
clusterRequest(ClusterSpec.Type.content, contentCluster)));
- assertEquals("Re-activated load balancer for " + containerCluster1, LoadBalancer.State.active,
- loadBalancers.get().stream()
- .filter(lb -> lb.id().cluster().equals(containerCluster1))
- .map(LoadBalancer::state)
- .findFirst()
- .orElseThrow());
- }
-
- private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) {
- return clusterRequest(type, id, Collections.emptySet());
- }
-
- private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id, Set<RotationName> rotations) {
- return ClusterSpec.request(type, id, Version.fromString("6.42"), false, rotations);
+ assertSame("Re-activated load balancer for " + containerCluster1, LoadBalancer.State.active,
+ lbApp1.get().stream()
+ .filter(lb -> lb.id().cluster().equals(containerCluster1))
+ .map(LoadBalancer::state)
+ .findFirst()
+ .orElseThrow());
}
private Set<HostSpec> prepare(ApplicationId application, ClusterSpec... specs) {
tester.makeReadyNodes(specs.length * 2, "d-1-1-1");
Set<HostSpec> allNodes = new LinkedHashSet<>();
for (ClusterSpec spec : specs) {
- allNodes.addAll(tester.prepare(application, spec, 2, 1, new NodeResources(1, 1, 1)));
+ allNodes.addAll(tester.prepare(application, spec, Capacity.fromCount(2, new NodeResources(1, 1, 1), false, true), 1, false));
}
return allNodes;
}
+ private static ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) {
+ return ClusterSpec.request(type, id, Version.fromString("6.42"), false);
+ }
+
private static <T> T get(Set<T> set, int position) {
return Iterators.get(set.iterator(), position, null);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index c8051c3bdee..294c153f86f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -139,16 +139,21 @@ public class ProvisioningTester {
}
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups) {
+ return prepare(application, cluster, capacity, groups, true);
+ }
+
+ public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups, boolean idempotentPrepare) {
Set<String> reservedBefore = toHostNames(nodeRepository.getNodes(application, Node.State.reserved));
Set<String> inactiveBefore = toHostNames(nodeRepository.getNodes(application, Node.State.inactive));
- // prepare twice to ensure idempotence
List<HostSpec> hosts1 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
- List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
- assertEquals(hosts1, hosts2);
+ if (idempotentPrepare) { // prepare twice to ensure idempotence
+ List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger);
+ assertEquals(hosts1, hosts2);
+ }
Set<String> newlyActivated = toHostNames(nodeRepository.getNodes(application, Node.State.reserved));
newlyActivated.removeAll(reservedBefore);
newlyActivated.removeAll(inactiveBefore);
- return hosts2;
+ return hosts1;
}
public void activate(ApplicationId application, Collection<HostSpec> hosts) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
index 6524292f48c..bfb24d30284 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
@@ -831,7 +831,7 @@ public class RestApiTest {
@Test
public void test_load_balancers() throws Exception {
assertFile(new Request("http://localhost:8080/loadbalancers/v1/"), "load-balancers.json");
- assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers.json");
+ assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers-single.json");
assertResponse(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant.nonexistent.default"), "{\"loadBalancers\":[]}");
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json
new file mode 100644
index 00000000000..67d2c3bfa4b
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json
@@ -0,0 +1,36 @@
+{
+ "loadBalancers": [
+ {
+ "id": "tenant4:application4:instance4:id4",
+ "state": "active",
+ "changedAt": 123,
+ "application": "application4",
+ "tenant": "tenant4",
+ "instance": "instance4",
+ "cluster": "id4",
+ "hostname": "lb-tenant4.application4.instance4-id4",
+ "dnsZone": "zone-id-1",
+ "networks": [
+ "10.2.3.0/24",
+ "10.4.5.0/24"
+ ],
+ "ports": [
+ 4443
+ ],
+ "reals": [
+ {
+ "hostname": "host13.yahoo.com",
+ "ipAddress": "127.0.13.1",
+ "port": 4080
+ },
+ {
+ "hostname": "host14.yahoo.com",
+ "ipAddress": "127.0.14.1",
+ "port": 4080
+ }
+ ],
+ "rotations": [],
+ "inactive": false
+ }
+ ]
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
index 67d2c3bfa4b..36d4de598e2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json
@@ -1,6 +1,27 @@
{
"loadBalancers": [
{
+ "id": "tenant1:application1:instance1:id1",
+ "state": "reserved",
+ "changedAt": 123,
+ "application": "application1",
+ "tenant": "tenant1",
+ "instance": "instance1",
+ "cluster": "id1",
+ "hostname": "lb-tenant1.application1.instance1-id1",
+ "dnsZone": "zone-id-1",
+ "networks": [
+ "10.2.3.0/24",
+ "10.4.5.0/24"
+ ],
+ "ports": [
+ 4443
+ ],
+ "reals": [],
+ "rotations": [],
+ "inactive": false
+ },
+ {
"id": "tenant4:application4:instance4:id4",
"state": "active",
"changedAt": 123,