summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorMartin Polden <mpolden@mpolden.no>2020-06-10 13:57:23 +0200
committerMartin Polden <mpolden@mpolden.no>2020-06-11 09:20:34 +0200
commitb2719588db5896062bfb9f29a6d49948fdf44ecd (patch)
tree9bc55c6102beaa23e2796024c62429c13ff4941f /node-repository
parentb83eb4c7cbaa8b736333db9a5e07dc8db810f6d5 (diff)
Provision load balancer for config server cluster
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java71
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java20
6 files changed, 80 insertions, 33 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
index a38f115b0bf..edf2932ad6e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeType;
/**
* A managed load balance service.
@@ -27,6 +28,12 @@ public interface LoadBalancerService {
/** Returns the protocol supported by this load balancer service */
Protocol protocol();
+ /** Returns whether load balancers created by this service can forward traffic to given node and cluster type */
+ default boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
+ return (nodeType == NodeType.tenant && clusterType.isContainer()) ||
+ (nodeType == NodeType.config && clusterType == ClusterSpec.Type.admin);
+ }
+
/** Load balancer protocols */
enum Protocol {
ipv4,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
index 658d2313ab0..bc4381573c6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
@@ -66,6 +66,12 @@ public class SharedLoadBalancerService implements LoadBalancerService {
return Protocol.dualstack;
}
+ @Override
+ public boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
+ // Shared routing layer only supports routing to tenant nodes
+ return nodeType == NodeType.tenant && clusterType.isContainer();
+ }
+
private static String withPrefixLength(String address) {
if (IP.isV6(address)) {
return address + "/128";
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index ebe9327967e..a61032af276 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -111,10 +111,10 @@ class Activator {
/** Activate load balancers */
private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction,
@SuppressWarnings("unused") Mutex applicationLock) {
- loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts), applicationLock, transaction));
+ loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, allClustersOf(hosts), applicationLock, transaction));
}
- private static Set<ClusterSpec> clustersOf(Collection<HostSpec> hosts) {
+ private static Set<ClusterSpec> allClustersOf(Collection<HostSpec> hosts) {
return hosts.stream()
.map(HostSpec::membership)
.flatMap(Optional::stream)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 144959ca2f9..460e1e71e65 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -8,6 +8,9 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
+import com.yahoo.vespa.flags.BooleanFlag;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
@@ -15,6 +18,7 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
+import com.yahoo.vespa.hosted.provision.lb.LoadBalancerSpec;
import com.yahoo.vespa.hosted.provision.lb.Real;
import com.yahoo.vespa.hosted.provision.node.IP;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
@@ -23,6 +27,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.logging.Level;
@@ -45,11 +50,13 @@ public class LoadBalancerProvisioner {
private final NodeRepository nodeRepository;
private final CuratorDatabaseClient db;
private final LoadBalancerService service;
+ private final BooleanFlag provisionConfigServerLoadBalancer;
- public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service) {
+ public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service, FlagSource flagSource) {
this.nodeRepository = nodeRepository;
this.db = nodeRepository.database();
this.service = service;
+ this.provisionConfigServerLoadBalancer = Flags.CONFIGSERVER_PROVISION_LB.bindTo(flagSource);
// Read and write all load balancers to make sure they are stored in the latest version of the serialization format
for (var id : db.readLoadBalancerIds()) {
try (var lock = db.lock(id.application())) {
@@ -70,11 +77,12 @@ public class LoadBalancerProvisioner {
* Calling this for irrelevant node or cluster types is a no-op.
*/
public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
- if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type
- if (!cluster.type().isContainer()) return; // Nothing to provision for this cluster type
+ if (!canForwardTo(requestedNodes.type(), cluster)) return; // Nothing to provision for this node and cluster type
if (application.instance().isTester()) return; // Do not provision for tester instances
try (var lock = db.lock(application)) {
- provision(application, effectiveId(cluster), false, lock);
+ ClusterSpec.Id clusterId = effectiveId(cluster);
+ List<Node> nodes = nodesOf(clusterId, application);
+ provision(application, clusterId, nodes,false, lock);
}
}
@@ -91,13 +99,14 @@ public class LoadBalancerProvisioner {
public void activate(ApplicationId application, Set<ClusterSpec> clusters,
@SuppressWarnings("unused") Mutex applicationLock, NestedTransaction transaction) {
try (var lock = db.lock(application)) {
- var containerClusters = containerClustersOf(clusters);
- for (var clusterId : containerClusters) {
+ for (var cluster : loadBalancedClustersOf(application).entrySet()) {
// Provision again to ensure that load balancer instance is re-configured with correct nodes
- provision(application, clusterId, true, lock);
+ provision(application, cluster.getKey(), cluster.getValue(), true, lock);
}
// Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed
- var surplusLoadBalancers = surplusLoadBalancersOf(application, containerClusters);
+ var surplusLoadBalancers = surplusLoadBalancersOf(application, clusters.stream()
+ .map(LoadBalancerProvisioner::effectiveId)
+ .collect(Collectors.toSet()));
deactivate(surplusLoadBalancers, transaction);
}
}
@@ -138,9 +147,17 @@ public class LoadBalancerProvisioner {
db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
}
+ // TODO(mpolden): Inline when feature flag is removed
+ private boolean canForwardTo(NodeType type, ClusterSpec cluster) {
+ boolean canForwardTo = service.canForwardTo(type, cluster.type());
+ if (canForwardTo && type == NodeType.config) {
+ return provisionConfigServerLoadBalancer.value();
+ }
+ return canForwardTo;
+ }
/** Idempotently provision a load balancer for given application and cluster */
- private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate,
+ private void provision(ApplicationId application, ClusterSpec.Id clusterId, List<Node> nodes, boolean activate,
@SuppressWarnings("unused") Mutex loadBalancersLock) {
var id = new LoadBalancerId(application, clusterId);
var now = nodeRepository.clock().instant();
@@ -148,7 +165,7 @@ public class LoadBalancerProvisioner {
if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
var force = loadBalancer.isPresent() && loadBalancer.get().state() != LoadBalancer.State.active;
- var instance = create(application, clusterId, allocatedContainers(application, clusterId), force);
+ var instance = provisionInstance(application, clusterId, nodes, force);
LoadBalancer newLoadBalancer;
if (loadBalancer.isEmpty()) {
newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
@@ -159,7 +176,8 @@ public class LoadBalancerProvisioner {
db.writeLoadBalancer(newLoadBalancer);
}
- private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes, boolean force) {
+ private LoadBalancerInstance provisionInstance(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes,
+ boolean force) {
var reals = new LinkedHashSet<Real>();
for (var node : nodes) {
for (var ip : reachableIpAddresses(node)) {
@@ -177,14 +195,21 @@ public class LoadBalancerProvisioner {
}
}
- /** Returns a list of active and reserved nodes of type container in given cluster */
- private List<Node> allocatedContainers(ApplicationId application, ClusterSpec.Id clusterId) {
- return NodeList.copyOf(nodeRepository.getNodes(NodeType.tenant, Node.State.reserved, Node.State.active))
- .owner(application)
- .matching(node -> node.state().isAllocated())
- .container()
- .matching(node -> effectiveId(node.allocation().get().membership().cluster()).equals(clusterId))
- .asList();
+ /** Returns the nodes allocated to the given load balanced cluster */
+ private List<Node> nodesOf(ClusterSpec.Id loadBalancedCluster, ApplicationId application) {
+ return loadBalancedClustersOf(application).getOrDefault(loadBalancedCluster, List.of());
+ }
+
+ /** Returns the load balanced clusters of given application and their nodes */
+ private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) {
+ NodeList nodes = NodeList.copyOf(nodeRepository.getNodes(Node.State.reserved, Node.State.active))
+ .owner(application);
+ if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) {
+ nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin);
+ } else {
+ nodes = nodes.nodeType(NodeType.tenant).container();
+ }
+ return nodes.stream().collect(Collectors.groupingBy(node -> effectiveId(node.allocation().get().membership().cluster())));
}
/** Find IP addresses reachable by the load balancer service */
@@ -202,14 +227,6 @@ public class LoadBalancerProvisioner {
return reachable;
}
- /** Returns the container cluster IDs of the given clusters */
- private static Set<ClusterSpec.Id> containerClustersOf(Set<ClusterSpec> clusters) {
- return clusters.stream()
- .filter(c -> c.type().isContainer())
- .map(LoadBalancerProvisioner::effectiveId)
- .collect(Collectors.toUnmodifiableSet());
- }
-
private static ClusterSpec.Id effectiveId(ClusterSpec cluster) {
return cluster.combinedId().orElse(cluster.id());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 59fca955a68..fe46070da1d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -7,7 +7,6 @@ import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.HostFilter;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
@@ -15,7 +14,6 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.config.provision.Zone;
-import java.util.logging.Level;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.flags.FlagSource;
@@ -36,6 +34,7 @@ import java.util.Collection;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -66,7 +65,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
this.allocationOptimizer = new AllocationOptimizer(nodeRepository);
this.capacityPolicies = new CapacityPolicies(nodeRepository);
this.zone = zone;
- this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
+ this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService, flagSource));
this.nodeResourceLimits = new NodeResourceLimits(nodeRepository);
this.preparer = new Preparer(nodeRepository,
zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index 26039c29ae8..f48127f650d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -11,6 +11,8 @@ import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.transaction.NestedTransaction;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
@@ -43,7 +45,8 @@ public class LoadBalancerProvisionerTest {
private final ApplicationId app2 = ApplicationId.from("tenant2", "application2", "default");
private final ApplicationId infraApp1 = ApplicationId.from("vespa", "tenant-host", "default");
- private final ProvisioningTester tester = new ProvisioningTester.Builder().build();
+ private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
+ private final ProvisioningTester tester = new ProvisioningTester.Builder().flagSource(flagSource).build();
@Test
public void provision_load_balancer() {
@@ -212,6 +215,21 @@ public class LoadBalancerProvisionerTest {
assertEquals(combinedId, lbs.get().get(0).id().cluster());
}
+ @Test
+ public void provision_load_balancer_config_server_cluster() {
+ flagSource.withBooleanFlag(Flags.CONFIGSERVER_PROVISION_LB.id(), true);
+ ApplicationId configServerApp = ApplicationId.from("hosted-vespa", "zone-config-servers", "default");
+ Supplier<List<LoadBalancer>> lbs = () -> tester.nodeRepository().loadBalancers(configServerApp).asList();
+ var cluster = ClusterSpec.Id.from("zone-config-servers");
+ var nodes = prepare(configServerApp, Capacity.fromRequiredNodeType(NodeType.config), false,
+ clusterRequest(ClusterSpec.Type.admin, cluster));
+ assertEquals(1, lbs.get().size());
+ assertEquals("Prepare provisions load balancer with reserved nodes", 2, lbs.get().get(0).instance().reals().size());
+ tester.activate(configServerApp, nodes);
+ assertSame(LoadBalancer.State.active, lbs.get().get(0).state());
+ assertEquals(cluster, lbs.get().get(0).id().cluster());
+ }
+
private void dirtyNodesOf(ApplicationId application) {
tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, this.getClass().getSimpleName());
}